<$button tooltip="View the top level of the Author Index">
{{$:/core/images/up-arrow}} Author Index
<$action-navigate $to="Author Index"/>
</$button>
\rules except wikilink

@@.cpredtext
!!! Press to Save {{$:/core/ui/Buttons/save-wiki}}
@@

!!! Problem Tiddlers

Count of subscript/superscript: <$count filter="[regexp[,,]] [regexp[\^\^]]" />, Count of missing: <$count filter="[all[missing]sort[title]]" />   <$link to="$:/causal/ProblemTiddlers"><$button>View</$button></$link>

!!! Sidebar Tabs
| <$fieldmangler tiddler="$:/core/ui/SideBar/More"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/More"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler> |[[$:/core/ui/SideBar/More]] |
| <$fieldmangler tiddler="$:/core/ui/SideBar/Tools"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/Tools"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/core/ui/SideBar/Tools]] |
| <$fieldmangler tiddler="$:/core/ui/SideBar/Recent"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/Recent"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/core/ui/SideBar/Recent]] |
| <$fieldmangler tiddler="$:/core/ui/SideBar/History"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/core/ui/SideBar/History"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/core/ui/SideBar/History]] |
| <$fieldmangler tiddler="$:/plugins/wimmoermans/history/HistoryTab"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/plugins/wimmoermans/history/HistoryTab"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/plugins/wimmoermans/history/HistoryTab]] |
| <$fieldmangler tiddler="$:/causal/Causal Productions History View"><$button><$action-sendmessage $message="tm-add-tag" $param="$:/tags/SideBar"  />Add</$button></$fieldmangler> | <$fieldmangler tiddler="$:/causal/Causal Productions History View"><$button><$action-sendmessage $message="tm-remove-tag" $param="$:/tags/SideBar"  />Remove</$button></$fieldmangler>|[[$:/causal/Causal Productions History View]] |

!!! Sidebar Buttons
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home]] |
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-tiddler" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-tiddler" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-tiddler]] |
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/control-panel" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/control-panel" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/control-panel]] |
| <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/save-wiki" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/save-wiki" text="hide"/>Remove</$button> |[[$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/save-wiki]] |

!!! Toolbar Buttons
| <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions" text="hide"/>Remove</$button> |[[$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions]] |
| <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/edit" text="show"/>Add</$button> | <$button><$action-setfield $tiddler="$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/edit" text="hide"/>Remove</$button> |[[$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/edit]] |

!!! Causal PDF/MEDIA Display Configuration
| <$button><$action-setfield $tiddler="$:/causal/config/hidePDFandMEDIA" text="hide"/>Hide</$button> | <$button><$action-setfield $tiddler="$:/causal/config/hidePDFandMEDIA" text="show"/>Show</$button> |[[$:/causal/config/hidePDFandMEDIA]] |

<hr>

!!! Style Sheets
<<list-links "[tag[$:/tags/Stylesheet]]">>

{{{[history[]]}}}

<$button>Clear History<$action-setfield $tiddler="$:/HistoryList" text=""></$button>

show
<$button tooltip="View the session which holds this paper">
{{$:/core/images/up-arrow}} This Session
<$action-navigate $to={{!!current_session}}/>
</$button>
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 13.0.2, SVG Export Plug-In . SVG Version: 6.00 Build 14948)  -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="welcome" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
	 width="100%" height="100%" viewBox="0 0 986.598 1032.034" enable-background="new 0 0 986.598 1032.034"
	 xml:space="preserve">
<g>
	<defs>
		<rect id="SVGID_1_" x="93.266" y="200.559" width="841.89" height="595.275"/>
	</defs>
	<clipPath id="SVGID_2_">
		<use xlink:href="#SVGID_1_"  overflow="visible"/>
	</clipPath>
	<g transform="matrix(1 0 0 1 -1.675662e-007 0)" opacity="0.3" clip-path="url(#SVGID_2_)">
		
			<image overflow="visible" width="880" height="1005" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEARgBGAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAACMtAAB9xAAA/cb/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIA+wDcAMBIgACEQEDEQH/
xAC4AAADAQEBAQEAAAAAAAAAAAAAAQIDBAUGBwEBAQAAAAAAAAAAAAAAAAAAAAEQAAICAgICAQMC
BQUBAAMBAAECAAMRBBIFIRMQMSIUIBUwIzMlBkBQYEEyJLA0NUIRAAEDAgQDBgIFCgQFBAMAAwEA
EQIhMUFREgNhcSIQgZEyEwShscHRQnIjIGDw4VJigqKyQzNzFEQw8ZJTo9JjJDRAUIPikwUSAQAA
AAAAAAAAAAAAAAAAALD/2gAMAwEAAhEDEQAAAPSy1ysAAAAAAAAAAABiYAAAAMAABgADAAAYAAAA
AA0wAAAAAAAGIAAAAAAAAAAAAABDEwEFEsGgYgYgYgoljEDEDEDEDchQgYgokG5YxA9MtDDLbMko
JKQigkoJKBDBDBDZJQIYIoJbBFAhsQwRQSUElAigkoJKCW2SUElBJQSUElBJQSUhDCSgkoJKCRgh
ghhJQSME2CVAhghghghgmAJgAAAAAAAMAAAA0z0Ms9syRskbJKCSglsEMEMENktghslsEUEtslsE
UEugkoJKCXQSUElBLoJKCSgkoJKCSgkoIKCSgkYSUElBJSEUhKgkpCKQigkoEMENklBIwQwSoJGC
GCGCGCGCGCYBpnoRnrmIYIbJGCGySgkoENklAhskoEUCKCXQS21kpklCSWLJQSUyCwkoJKZBYQWE
FhBaJKCSggtElBKtElBJQSrRJQSUEFokoENklBJQSUElBJQSMSSgkYJUElBJQSUElBJSFpGhOekC
GCGCGCGCKBDBFAhskoEUKigRQIoEUCKZDoJKZDoJKZBYSUyCgksIKCSgkoJKCCwgoJVhBaJVhBQS
rCC0SUEFokoJKZJQSUElBJSJKCSgkoIKCSggoJKCSgkoJKCdJszjSBDEQ2IYIYIbVDBNgmwRQIoE
UCKBNsltktsltklBLoJKCXRElBJQSWEFMgoJKCVYQWEFFQWEFBBaJKCC0SUEFBJQQ6CHQSUElBJQ
SUElBKtElBKtElBBaJKCSgkoJKCbVGUaZgMEwAYIbEME2CbBNgmwRQA2IbEUCbcS2yW2S2ySgl0E
lBJTILCSgkoJVhBYQUElBKsIKCSggoJVolWEFBKtElFSURJQSUElBJZUFBJQQUElBJSJKCVYQUEl
BJQSUEWrOeLgABiYwABgDAGAwBgMYmwGMTZANiGxNsltktskoEUElBLoJKCSgkoJKCSggsIKCSgg
oJKCVaJKCCggtElBBYQUElMh0EFMhsJKCSggoJKCVaJKCSgkoJKRJQSUE0UccXFDTABGAowGANpg
wBjBpgxgxwMAYwYxMYDYhsQwTYIoJbBFBJQSUElBJSEqCSgkoJVBJQSrRJQQWiVYQWiSglWiSgSo
JKCSgkoJKQhhJQSUEFBJQSUEqwgoJKCaKPPi4pgADQaagMGAMBsAYwaY2EDGDAbTGDBjAGAMAYMA
BgAAwQwQwQwQwQwQwSoJKCSkSUElBBQSUEjCSgkpCKQlQSUElBJQSUEFhBaJKCVYQWiSglWEFoko
JbZ5kXFMAGCDTUYDaYNMbTBjBjgYwBgxgwGDGDBpgwBgDAAYAAAAwTAAAAAAAAAAAEwQwQwQwQwk
oJGCGElBJSJKCSgkoJKCSmQUElBJSJKCSgkpCVBJQSxnkxcUwBgDAGDBpjaYUmDGDCGwGDCkwYDY
DBgDBpg0waYAAwAAAYhgmAAAAAAAAAIYIYIYIYIYIYIYIaAAAATBDBDBFAhghglQSUhDBKgkpCVB
JSJbZ40XFMTGANpgwGDGDBjBqoGmNpjaYwYMYMAYwBgDAGAAMAAAYAAAAAAwQwQwQwQ0AwQAAAAA
AAAAAAJgAAAIYJgAANMAAAENAAAAJghhIwTA8WLimAMAbTG0waY2mNpjacNpjaY2mDGDAYMGMABg
DABgAAMAAAAAYAAAAAAAAAAAAAAAAAAAAAAAIYIAAAGgAAAGmAAAACAAAABAAAACZ4sXFMGg01Gm
NpjBgxgxjBw2mNpjBgxg0wYwYA0wYADAAGmAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMEMAA
AAAAAEAAACAAAQAAR5QpuKGAMYAwYwYDYwaqBqgqaBjBqgaY2mDAbTAAYAwAAGAAANMBMAAAAAAA
AAAAAAAAAYgYhghgmAmAhggAAAAAAAAAAABACGgAENACeI9fme8y5MHZ6sXCtpg0xgwYwaY2mNpw
2A2mNpjaY2mDTGANpg0wAGAAAwAAAGAAAAAAAAAAAAAAMEMAAAAAAAAAAAAEwQAAxDBAAACAEACA
AAQMUmHz+3HU3z5m/tx6x5sXA2mDTG0xtMbTGDG04bTG0xtMbTBgFIGANpgwAAYAAwAAAYgYAAAA
AAAAAAAAwTAAAAAAAAAAAAAENAAANAAACGgBOQMuQ9BeTxH0WfykH1XN88q9bg4+M7y+M0OriO/s
87vO6LlBpqwBtMbTG0xtMbThtMbTG0xtMYA2gYA2mDTAAGmAAwAAAAYmAAAAAAAAAAAANAwAAAAA
AEDQAAAAAAIGIAEMQAIcPyzzOXTSzkfbxrnLkz3w6zLg9XmOP0vD9g4uw8w9D1PHzPrZuAYAwG0x
tMbTG04bTG0xtMbTG0FCYxMbQNoGJg0wAGJgAMTAAABiBgAAAAAAAAANAwQxAAAAAIYgaEMQMQAk
UZhoSwCSuTk8M6+bPKtNuQPcw5PRPM5fY8Arow1EvV5Dm8n6niIzx9M8H1eej6ibgGmDAYMGA2mO
pcNpjaY2mNoG0xtMGmDTBpg0DaBgAAMTAAYgYmAAADEwAAAAAAAAAAAAEAACAEDQhmXnHq8Hk89d
3LjCbLnS/Qd3yWR9H5XDJeJBrOLNoVi7uGD635T1fMOfoy9gx6vb4CTz/oD4T6T1vmo+q+K/QeE5
ouaAYAwaY2mDAdJwMBtMbTGANpjAGAMAYAwAaYADAAAGgYAADAAAGgGmAAAAAAAAAgAQAgBACxNP
K8/jroyyg2jJGswjXJZm+DgoQSpRWiRXTkw1w2EUh+xhBp9L8f8AXnk+F9t5kV43nfUGft/n/wB4
eXFzQAMAYA2mNpjacMTG0FCY2mNyxtMYmMTBoGAMAYgYAxMAAAGJgAMQMAAAaYCYAAIAAEAIQAgF
JPzF+fV5zJUqS6eRpi8zWLzKzrMpyybsAUlKWdRmFqZPpsOX0TxvqPlvoz0GtI8f5v6/5KuvPg5Y
+4i5pMAYAwBgNpjacDTGANpjAG0DaYxMYmMTBoG0DABgAAxAwAABpgADGIbJGCaAAAAQIAQIQCA8
X1Pjyc1FUpRTkLzQUAAApuSNJC3KBQDas3ksidcT0voPmfsz5n1OWT3PI7PlTNT6p5q+++Sj3ZuK
GmAMABtMGA2nDExgDaBtMYmMTGJjaBiYxBQgtxQ2gaSKJYxAxMACiQskKJBiBiBiABACBCBACA8f
5zt8+kkipABMAsiiSzNGqzY8rwNHkzSVoHQwm5Y8qzOn675D6A38f6X5Ue3q9hy+yEHl+oHlRcUx
MGmDTBpjExtEMAbQNoG0DaBtBQgpyFOGU4Y3LKqAt5spwynDLlIYgYgoQMQMTAAAABDQAhAhDQh8
3Qj4nn9Hz6hAMQOpzNImyWwbbEVgKL9SPK0+o8eubDpRy7rE7IysE5On3vn/AKgfmenY/SycbGIb
GIcEVFMTGIKEFEsollCChEUSyiWUSyhBQgbkKchThlEhZDKcMokLIZRLKJChBRIUIKJCnDKJCiQY
kUpBmOJ1rjR2rls3OVHB859T8rUy0NQgBlAh0qD0p9k7efFHd0eSj1+bz8zzub1+A556MDnemZos
9DT7f4X7Q0rwUe+eAH0C8HM97P53M+knLM6Tmk6zjZ1nKzqOYOl8odRyh1nKHUcrOo5WdT5GdS5g
6jmDpOYOp8jOo5Q6jmDpfKzqfIzrOVx1HKHUcodZyo6nyB1nIHWuUOs4Ed556PRXnZ16mfkSenlw
SndjgzTOUKNMxVkLvlUJPF1+cqgBU2Kswsll9s9IEBqZhqZh0Pn0QZA5NTHk9DmXz56cg+u+L9Y9
Xh9zwSiZRcPXsvF2kHZneKaGIdGevOavKy6eZoueTpfHobrKxnZxlN5lF85tWQbHNZqcodJnoKuf
Qt55HTfDqbF4lvkDpOcOmVmavns0rq1Xzo7eZMpYJVsYdfP0nd5vQ14ghJmqMp1kmXSzFIR30cfB
0+WEtktoEwTAOue8hjCeiTE3ow023PMfqUeSd3SeQaIh2EY+nZ4HN9FxC18XsPQXoieeelzL49Hr
HJn2o457OY2waNrzo5t1mbb4aHXrycx27+KjbHpZle7ON72cke0HjelxcZ1ZqTauVHrZKDKeiDHT
vo5rTMtstDl9rj7zPz/a8k7EkbPh7Dy8PU5AI5zRzscsGZ1747GOnN0mnP8AQeacWPpo82cqNNuH
sPY4O3yTy3vqcUa5kvfAEkGr9Y0hamYB6evjaHRHFB2dPkdBqsEayamdcWx03nwHqa+B3Hq82nIl
+H3YLHTwWepbs36ObkShca+vzc3Qb8uOZ6+fKHVyrlNejD6w8Th96z5jfo5D0Mn3HhX63hHSYZnb
6Xydn03mcWh6Ex7J8/cI7Ofv5g08juPTw8zUL37Th3wonk6MzDTeTn6r4Su7zketxahh6XldRtnV
mhy6GkcNGpmjXTHmPU7/AJbpPT7fF2OiOLuO7z+7zzm5Eg6tfOFFMlrrO31b8oKWJkcnrHP7nBBy
79Ghwr0UcM9KMZvY4+zHyD0fMmh78/tlc9YGE9OQuT1OUx9vw6OuePoO7y/o/mjv6uHpCpoqUGWf
SE+v4foi6fL6SfW+b9M93ly5o9P4r0fNrq4/V5Dnd4HNXb5cb6codvX5ir6bl83cauyN/L9A3vCz
p5NdDzMIDu08r0Dp5L2M94yOjXztT2Y83oPUnzrKPMo7eTDvOc1RL9eTyDp889nzDlO68fbMuPq5
jyp6MDo4toIkQfS+B9OafN9vOYtUd3m+j4535V0GHYcxPniHN0Hb5odGMsqjQX0vzWwc9ILzsUX6
Jwc3uo8LrUx3l412Pj9Ax387QVdXCV6ng9Bpy+jgeTHdoecfSeSYV16HBrl9OeZ17ow+d+p8w8bW
tDhOujzY9Ojx124x0dfHvUdFc5rfHR2dG/mG8+P0mynM6NOVnXT6jz9evsPB27URxd/OPLXzzvz5
vQDfo3FPk9Ry82vKd23je4dGePEeplXMc0VmZxpBKaPRWux5WW0EsROezMTVE57MwrYMvT4kE66G
Na5jTgKGIigtUYvTA7+7wkep5eVR9L08Spbc3OacXTscmX0XOeC/b9g+O7/p/mTk0NT2+fq9OPnf
C/RPEPjPsuXSujOwwW3YeIezieJwfV/MmenHZ26cFC5e7pPHro4I9Hs8Pevq+z5X2z5/Lt5zHm3U
dE5Kte7w/WOu8fUOLtw7Tp8T0vDJrDoFRgel7vyOwp6kcdcyPe4OCjp25tT2fn+qDh6s+Y7cJszT
sjVZhRQiZLh0Q6Ym6JdARvwnX2c/tGHi+r5IKwMduUldepzTPFHo34/SejXF3U2+U9Z+dzHtR5/Y
X1ZWfUcvY4+T+q+Q7z6H5HLnrLm13ObTbqPa9L5qj2vOw0OnFcAeh43pnRnMG3z/ALvzxgudx15r
rrLt5czu8bu5zj9/5z7GOj2PG9yvhjgzjrjnZdZUR14dYuhehXlL0+c4nrBtl2ZGefRRy67dRzHT
xmLvtOPDRhsagtqPPfWHF0vrPn+nqDPi0yLcMqUxtsVCLpMSWQnpB3XxZkaTJppMmHL2aRj9F4xW
db8ZyS5iuioOjPn7Dp5Pa5Ky9ry2aXhRr3ebqePl9DmeJ7/g/QHD6EdhzLt405lzaLlTk9XzIozj
oR28Xpcxl5tScy6Ji6w7a5+Xv5zPbn2jb2fG9mu72PD9o/NObq441vJG7xDp15Wet2fO9Ne2+LY6
N+Wzs5YwOnj58zuXCz1OXCCtsqI322ObH05PG06LMsfSxOO9dDni+s8pdnIDlGszBpU2XvtAstcz
PIQwsuHzkZ3pF12cldWPZJzVriZeeEaVaM10aGG2vVXoYdWYrQm+vDgvtLw5PoOv47hPY+n+Go+5
+Z8zoLnWQQioqTm5qYJUNUjNER1VMVpWUGZLjXp5CvR38b6wj1OfQ+Nnt8qOvzernOfUZnc0G2Oh
teDPQ6vH6q338bY9vLn7Tk5vSo8bo6u85eL20fPHvI8zqrlOzn8+T0Ony8jq4ix686PZ83ECdIJG
ofZy9ddnOtTLDNEW5LzTCLQyeeOrmzC9uVHq+dkHV0cPQdcZldd8exos9T0+faTWsIQ15eJe5Yyb
48exO3FYet5O56D86z0ebHzjXfs6DzI6uE0mqDnvAjOqilnoNKQWjMjXnN/pfF6q+g38DU4vB+j8
GM9og7Xn79eHr6Ac3J15E86UYa74ivOy88pOr1fn9z6Lp8XWvSz83M9fXwdj2r4ehN887MF1cizh
vRwx6NnjL2cTmw9nrPlTSBb42d3JigaolKhhmUZkGdIhXBI2QUCbQ9uei7xo7Orls9iOXkr115PS
HF08pthWJpiRFRmzXTlZ244yexh5wept5MnTzRB6Wvk6nbjjZdZWaQZmmGdDx0ozdWXeVHR6/i6V
6Hi93MSaKK6+JV6HDmxFqMjSQYBpkytcAjLroyv0fINs8g2Uo205dj0+/wAXrru5r4zprnDo24Og
3ItOnXn2MfB+38ZfAjQJKQS5FTCMdeKNNI0EVRldIJEE62c66EY1rRGzmtMuPWO7h7+IevPRvt5+
9dmnFmdGb3Oc6PQPn+X7S4+Hfteac/TOppyMOjg32OOujkLcQdNclHbHPJ6/KQb8eWo7mi6VjvSq
4jRlcvb2nk36nki0nuOBbBgbZC9DLY8tdGcJPpqOt6Hs83DR4EfSYHil4w9c0bdnmbHq48hVvHY6
d+LU6zOjfTlwPb5vKxO/lxzNjFlyqKp5EcnockK10Gb3isXYZaaanM7CWSaxy88dXGSZduPUvocX
bwpPP0cgdvB3HZx9vAcraOz3Pmfp6fk+t82epwc+8Y36Gp5NGResbmVWjLm9DM5n0o5zXMgqyKND
Tp4fWrWnJtw8kGs5o6NMfQOHP2g8jX6LrPmdvpcT5Xm6w15NNjkj0keeVJfRw5n0OvzGh9GvE6Tq
8v0dj5rH63gPBv2yPH09LWvHr1ZOCt8TfPFDeehaELPbMRaM24iqzoqntTcyMrQhe7R83ydXNGGs
oIW5znXgLNBp1cHad/n9vCPk64OXtz6Dbn3xFk2Y+z5nadvgej5hN52e57Phe/XzePRyxntNlYuS
oUGj5w3Iomhg2iabH2efqbZY6lJOu36L5PQ+n5eTYxfRQq16ieH2dzy+veIvz+3zz5OFNXGrIW1G
E3BXRzWe9p899OeLl9X4RxrOTfPGDVRQ3lib55EbROwrmDXOUUXtUaOiacEuqJg5Y+48b57orCLz
jM0BXj0ET1anBPXzGfTj3FcXp8IW0abYVWWGkQVniX38HcHH07Hmbe5AvV8n1q+bz9DkjLp5uyuP
Dq5olOAIZsSjqgkrIsW8WE64E2tTLRA6izf2PG9WtvW8n2zpzjKN1OxzX6aPN5MsauZSb+Hp2r4W
foYHPvh7R4vq6gvf8r2C/lPp/EPAndkTSJmsiZuoz0AqYk2MWa7491Tl0Sc1Uy6z7iq6uo+cyvzS
Zr148uttzzjs5Ts6eLrrke1nG9tjyurq8+PY5++K4o9TkOaeiDn5fRR5XR0aRpYqx6Meg5ctucFo
zs5t5Mzj0OrPPc8uZIGpLSRrplqQrRenPZsElEWUiTS8uit/S87tN/b8X2yr5FHRjnzV6vT836ke
Fz9WVZ74QJVsZ9hRyVohhKbdnmSvpc+fefNZ7chTmjOVnGs50ZtgCRXZz7mhRWkT1HPl08R1e5t4
J0b/ADPqR5+eiFE9Jj1Zyer5nX0143VzYx6wvSrkw9DkDq87sOjBeCfTcni+oagAJCEGEzmdfRPO
RLcdXn9GNa4mcPXH0Dg5uznMRA5YpUo0rKk0Kg0rmDp05A7HyWdM8+pfbwa16u/k9h6fo/KdR7XF
5epeNhOuYOUgTDGo0AmTfp83c61izbLl5zq5EiU4jqzx6Kyz10jmrbAvOmZl2Oi6da4nUuvoMujL
wDq82biemNDDXHrFHTyUrior0eLWo4vU5Dy+np4Y9Xq+c7z0so6az8T3vPjn9fXCmsg1MKK5umCO
XqxhYXkd5x6hv5tnZzZo2fPqYzANCWhA6gKQjXWLS7yZcgRn02cVdkHMulmZmHXHLqdunms9/Xx9
q7qeBeso5lQSAJWzOOrtPG6Pb5DwYrCCWEt2VVc4OUehyR1nFW2JvlGpprxfQ1yevNHV8z6nzoZH
pRwrSSNuvhJ6uTc6ufWKx6eDpj1uY8WuzXj9M546YOTCt44fRyD2fM9LzqOLt8yKMbNKwDr289np
Yc9Dx6Q5H1Bgt2c1bMynZmU6hm7oye0Ei1M4bMtFoS7kekoazRqpDcyRu+RnWcXQPPps8++9nAdO
A+nkk9Jecj09/FuvW7fn+o+iy4/pDD1ulR4nwv6t+dnlzSDTPQjFgmMOrm2CHB2dnN010x4+R655
OpODmDRsqSzdtVxPTGOu8da4534o3ykH18mx7Feb01l5Hs+NFd3mdR7vl+j5ZXn9qOF65rKAGgoS
NSA2UJNdeazpOQN0MKdBrngd5xanXGN1edI55uowvVGcaWckdWJOmWg80ynFBUI1CDe8cjvvg6Dp
XPVOLuOWe5HDp0Zmfo+dJ9N1/Hh9zx/KdNYK+eNB4jQDaB7YdRkTJ08vRkDVAPMjWNyR5GrzRpLQ
1pZltz7j5+nevCff58X05WbXgzr8/u5zh6sOg9PxvV4g2jQiOlVyYejMeZPpZnnnZmc5rmqBFiRc
plIk0156TXbkZ1GEHdPJodJhoVFMdZ5HoPjs2rDSssu3OPPXSjF7yZxsHOdPOJNEbZWvbM6Jx+j5
npDz3xo2z7jnz7fONZzIvOtTCOsMef0cDlTQwRtssTLTLY2vi0NVnZEWFEhZmGuco2mKK2xY3LNM
7wNOdAVfKa8m0l9nn9Zl3+d2FcHr+Ieh2cHcNFVkaSKNIJKowjrmOPD0oPNj0pPOOzI5zbJU0Dch
TljcSVrizbTmadVcTO7HOip1kzCyNVibGdmZtock99HDttJl0ZhhrpJx9/neqTTgr0fOKQtCGIRd
GK3iLmuenjqoiwFnUi2y6BJhm9EZpUDALVEKglaM5dNUK+fU6eXblLTgShlLejkVZl78vUdfl+jg
ba5hoLesJ1xHLiJnos5e0KTaEnRktQ5F0qOPP0GeZn6uR5p3ZHMts1lpggG0h1LNyehJqgVyyt8N
jVzVY9fD5Uer6Hh7nUXpWHL15R5HscnQd8ZVUTvJgdOReVI052iU6gTRWSoeWsGLGLfDcTSKyaBl
kjYraJazNFijV4ItZs6spCVpZjHbmV087L4u7zw6ebpPQ5NOU1iqDTNGphY4cnW+a61IobzDYwg6
IyRtXOzasrCdAzVhjn1xHJl2M4c/Qk81eoHlv0JOXp06jnvpKxumSWyNVBr43q+bGHreRke7r4+p
358uZ154Sd2vlh6sefsdTxqtlOY3ngdUY5xdrY4Dts5dLZzrWRWqDKwTMzWuWzZzAhoSaBZ2V28V
HVx9WRiTYEhRMGm3Doehjy0Z9WOp08vfzmN2EtoZIUSxphLmTR89m04BrKkb6dDj26AjRKgpE2QW
nA3Fg0ipGQ6RQ8yqQMSKmpK8/pUcGPTJjOuC0ILeQW8mXWbNjFHdfLSauNTHV5m2nJ0FVz0dBzBr
nNCuJL5YCqWJvtnzHW+ajZKSpixWSVjYRcWJCLSDX1PIs74x2OXH1KPG6tA6Ofo5SnjB0rKTVRQp
1yNJtk2IpTQColaoWsunSsTTNomgcZmqyg6IxR0VzbFAjSVRlbkItFJUOHZjbyN+RTHLl6WRxLpw
WGkNDAAuGAJmxzs2MNB9fF2JhfOjrvhs7K4aO5YBtMaGFaM5+fuzMNloaLCTaYg3yz0IuNBVnRpl
pAKmTUh0aclGhlQdGWpePVzAAMihDZE7Bm7shaUZVoVnq5G5RbyRqYONFEly6IegZukKjM1nLQoq
qpzYRUlmOR1TyEdGK0OeO9nA/QDhrqDne0GGe7OTL0Q8qfYyPMfdkvMqkACkg6Y6OJBApUMdSF6Y
BoZo3eKOnTjad04M2eUGkPU431yZ57wJ59BGenMaXKLrFlCYm8zTq4ew7eXp5xFBLoJpoE0W4RTy
ZalDGyHbM3WZdRJqskaPFG0wDmmYXuyaA1fOG0xoZR1Bzb6OpqwStCcMZLGRYZbBzm+RJqjGdw5z
VQ5EZ49UnEeizi4/UyOBdOq83R0CcuXdgcq7MjnYKAhgwQBpjZZEnQYs6HiJ0xhZ0Plo2x0o5zqk
430UYK4J6+XqXv5OzjRnB0myigGyDRkjkt5I3WYWoRecSaOLHLozeqEWybzRoZ0IuiDaqxq4KqaK
qWNRRYkWIALIkBjyLFRI2QtZE5sQwmaCY2DnWyMaeUOVJpm6BToc8dmRy49eCwhDEykdphXoUnk5
euHk162JxR1ZnOdGa5W4HrjRpfN0oFyJpE9OG5383VzEcXoIypSUToQyS5TGgAdEtWQ3IyUaJBJV
mVaMk2kmh00BooZaTG87KJYmBZLBqgm0Z2SXDQVLBgORjRJThhNIqcEKaiFnTHFSTrnZn0ZIu4Cl
lB2ZYbEc/XJx9N2VpiFw4NXlZnz9XMJrrMuP2UeSu9HPfZmc03iE1mu3RxdydvJ08o5EVmUIEFKh
UAnMmqyRpNUQbMy0dEjdJqxNwU0xM0MqtCKZDaAaE1RI0UnZFSFksqWENsBhLECcgxFzGg8dcTHD
TKKIRusQ6DMHOcnQs6KQgm5IYyqiSyKNLyRorkc5yb6cCPTfkh7XP5uh0acmx6XX4RXvV5ux5BmR
n18fcvZh0c6EuQecG85I0lamL0szegJ1RNNUNhNgDQUCJFYmgdSGiSBUE3aJVMl1A0wh2gchagNH
DAqBDQgAoZNORyrHz7chjKIbSCWyXLJYirmyWmOLyFomCGEpj1z1AKOTFwrKsQ5IqWFzYUml78zK
l2c22+pXJ0YmRpRF6BkbIzuqE26kqQcsabAaG0DcUNSh0g0eVCpMFQAAyWMQMkFcMadmbQWiiARZ
NkqsylTEUCqKJpMzJ1PP5/V8yJSRThluLJN2Ym+hyX0Zjh0YG8mRrBMUiLUHVpzbnnOxaCklXJld
AmIbQNyFKEad/mdR3YdnKY3tIwmhWxTTM7aAchVhDkKkZUsE0AxFEsqKRFFCbYkgsVBFhJUDVWYV
qECZTihK0SaIysYABnbEyTYzzMZ182N4xZToCyComx3nZqLlNVgjVYo3nJrs8qTZ5hcyxb4bGcdO
YxAgQJhI0CaCaDN0yenn6T0+f0MTjrrmuWeojme+hx11s4l10cb6gxnrzrnOhnJHoqOK+mq5DpZz
LoZyLrI5XvVc8dqOTTTU5o65OfPto5nqznOmjkrcMV0s411yZHQHLXTBit7OW+rMwXTJzvrDmz7Q
4I7IjLzvf5zxn16nNj6EnnrtZxv0Oc49ttDz8O1LxPus8+u6jz770nHfdqeYu5HBXVJlOrNcfRk8
9d0nnnczgO0OA7g4Z75OFdyOM7UcfU+o/9oACAECAAEFAP8A8sT/AP/aAAgBAwABBQD/APLE/wD/
2gAIAQEAAQUA2Wf8nm85vObzm85vObzm85vObzm85vObzm85PObzm85vOTzm85vObzm85POTzm85
PObzk85POTzk85POTzm85POTzk85POTzk85POTzk85POTTm85vOTzk85POTzm85POTzk85PObzm8
5vOTzm85vObzm85vObzm85vObzm85vObzm85vObzm85vObzm85PObzk85PObzk85NObzm85vObzm
85vObzm85tObzm85vObzm05vObTm05NObTm85vObTXZvftD/AOnExMTExMTExMTExMTExMTEx8Y+
cTExMTExMTExMTExMTEx8YmJiYmJiYmJiYmJiYmPjHxj5x+jExMfGP04mJj/AEGP9Jr/ANfZH/04
+MfGJiYmJiYmJiYmJiYmJiYmJj4xMTExMTHxiYmJiYmPjExMTExMTExMTEx8YmJj9OJiY+MTExMT
ExMTExMTEx/Bx84mJj/Ua/8AX2R/9OJj5x+nExMfOPjEx8YmJiYmJiYmJiYmJiYmJj4xMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTEx8Y+MTExMTExMTExMfGJj+Bj/AEev/X2R/wDRMTExMfGJ
iYmJj9GJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiY+M
TExMTExMTHxiYmJiYmJiYmJj4xMTExMTExMTHxj5x/B1/wCvsj/6MTExMTExMTHziYmPjHxiYmJi
YmPnExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMT
ExMTExMTExMTExMTExMfrx/B1/6+x/8AsYmPjExMTExMTExMTExMTExMTExMTExMTExMTExMTExM
TExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx+jExMTExMTExMTExMTEx8
4mJiYmJiYmJiYmJiYmJiUD+fsf1/jHziYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJi
YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJi
YmPjExMTExMfooH8/YH8/HxiYmPnH6MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMT
ExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMT
ExMTExMSgfztgfz5iYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJ
iYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJ
iYmJiYlA/nX/ANfEx/AxMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEp
H86/+v8AGJj+BiYmJiYmJiYmJiYmJj4xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMSkfzb/6/wAY
/g4mJj4xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEqH82/+t/CxMTExMTEx84m
JiYmJiYmJiY+MTExMTExMTEx+jExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTEx8Vf1b/63+lx8YmJiYmJiYmJiYmJiYmJiYmJiYmJi
YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJi
YmJiYmJiYlQ/mX/1v9Hj4xMTExMfOPnExMTHxiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYm
JiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJWP5l/8AW/hj+CP1YmJi
YmPjExMTExMTExMTExMTExMTHxj4xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMfGJiYm
JiYmJiYmJiYmJiYmJiYmJiYmJiYlY/mX/wBb/SD+Hj9OJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiY/
RiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmPjExEH33/wBb+GP4A/1W
JiYmJiYmJiYmJiYmJiYmJiYmJiYmJiY+MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTEQfff/AFv9Bj9I/wBjxMTEx+jExMTExMTExMTExMTExMfpxMTExMfGJiYmJiYmJiYmJiYm
JiYmJiYmJiIPvu/rfxR+kfA+R/teJiY+MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTEx8Yij7r/638EfqH6B/uuJiYmJj5xMTExMTExMTExMTExMTExMTExMTExMfOJiYmIo+67+t/BH
wIP1j/fsfrx8YmJiYmJj4xMTExMTExMTExMRR91/9YfxBB/FEH/AsTEx+jExMTEx84mIB91/9b+G
PgfqH/FMTH6B9bv636R8D+CPkQfpH/Ex9bv636x8j5EH6R+kQf8AEx9bv6/6x+ofI+R+kfpH/Dx9
b/636x+sfpH6R+kf8PyAbv636B8j4HyP9CP+HWWJUtnZvdsXf1v1j9A+R8j5HyPgfoH/AAskAV7F
NhM7PaNtuvxG1d/W/UPkfA+B8D+MP+F7OzXRXr7vr2tzt1NZuJmsc7d39b9WPgfI/QPgfA/4pu7a
61V99l7lHld3rr11e96ehsrsu/rfpH8EfI+B8D/iLuqLvbf5Fr2kBmaMTx6L0JrJv6zNd/W/gCCC
D9Ig+B/w4uixtrXWN2Okss7vXEv7q4y7evtjWhIrG1WJi8VbRPKU7CVX3f1v1j5HwP0D4HwP+EGW
7VFUs7nVQ2d4uLu02nL3OxJM8CNcYzHDb4W0mqyvNunc4r3aVqL2Py1tga5PYXf1v1D4HwPgfA+B
8D9A/wCDWOES602Pc5ULYLFyojXgQ2YX28ilDNLdTK7ugUTrtrx6hclgv0rtLbrsO2Grfr712abv
636R8CCD+CP9/wA/wiQB2XZUmvOZXptYH06dKp7luUFYyVGL4lfgPdXix+Q26n17dHcS6m5RaLFs
osqsXar0DZR2d39aD9Y+B8D9A+B+gf73n5z85+dzsKtUbO/sXktmNaqzSssa/wDBqJ7BRVRta601
lsANK9rijF3FYmzWl9WvY2psphhs0C1K7H1bqaq7Nq7+t/CEHyPgfA/3vMz85hdRDdUILqjAQR8M
wUb/AG6oLNgM3OPaWgOJ7CJqbzUvbh172r+WxzNVqvdsU+i19MLovobNY0ussc9/1DcOnuWyexkm
/qtOhuVN+7+t+sfoH6R+sf7nmZ/Qzqo2O2qrl3YbFha5jDYJ7J1O/Xxu39Sk39/Ws2ey2NkswJfK
lbmyzeQ8L4YWZnXbZGx2tYbUcZjBoN92T8vuq01e2FwXQu2Db1lIp3tK/Rv6fRHY693Wa9mmmq+v
2139b9Y+R+oQfqH+45/TdsVULd3Pi/cuuJszGs8M/j2Q2QuwBsKsrjjyGbDmBswnjA3IDyErRCag
Zh0ba7n8jSwzRkYnrtGqmnr9mvbo7ToK7Bqb9+nZqejZXterr3tL/HuwfrN8EEbvUa+3fd/W/UIP
kfwR+gf7pZYlabXc5ll72E2Q2xrCZz8FswNgFuJ5RmyQ0LZVbCI8J5QDABi02OLEep9YVWqqpCcz
Xq9t3ZEU0dVe1O05bj2HVHbVNjc0beo7ZN1P8r6oo3+Ndr+bqD63f1v4o/UPgfA/3LM2dqrWr2+w
t2XL5JfELeS3ktFJYuVBL+WY5DEzBzkZYcVAnJsgYJJzRc2ub9zY2GPJnRQs8ZIxOlo53b93u2an
4WVP7KsqZ3HVpt1g269uhvUdvqBr+h7jXuS+u7+t/BH++Z+M/F99dFe7u2bNucxmxC5E9gngkJiN
bhFbJjeYownOeYWIByxVeMwcYCgtAfIKgAqYcQsqzo6y+nvdc1IPg9S/s0TUCVBnd9SL1rtt1ru6
v1Oz0P8AG/8AIX15d/W/gj9Y/QP90d1Rex7BtqwsYWzOXk+JnJVVrFl2S/mtPoP/ACSTA2FOYCQM
EwIFijAZj8chFIaDJXOYWwTOnut5987qxHjoyG0VfLfSb/ba2mmzsC+0jI1ayna3f1f4A/gj5H+4
5+e73sRm8ljDnJyIWOUcK1tvIwFiB4AOFEIEIEwBK8YU+WOZxJhPwpAighWYsAuA86zYOtb2tK2V
V6zWTob0c2oQnbdy4rJYkkSyq1qdOkfmXf1f4Y+R/p8TBmD/AA8TEwJgfxs/GfjZuFFF9r2OSYS0
y05ETlyjDz/0D4n/AGYPMxmY8kCKcQfQ2AQ2Pn2NAQwqX7i3MhYRHlBCkoLKO2/+dejZl3Oy7OrW
pdzY1luD1H+PXbZ/D1vx9nROl3F39X/SD+MMTxMiZM8mcZieJ4+M/GfkYmZn4zMzP6c/rP6O/wBw
V1Fox8gzPyCRCAQBj5xmDAOVnNIzJGcCe6swMhhU/C1ljjhWoxMwmWEJAxNPXbiWVd7rnjpbB19r
sLdi7b1uotuo6voNbWnj47fRN1139X/YswGCCZhYn+JmZmf4ef19xcbN4mcoZ5yScZmfIM45BesT
2jBZjCPOBAoMdVUGvMFSmesAAssVkaLxx/7OCITCeMYRBnV1Dnqlb8vRKkW00U6FlCF7/kfW3+r/
AKDPxn9GZn9HmY+OJnEzExMQYnKZmRMiePjE+kJEyJmZ/g5mZmZ/TmZ/Rmbt9lFF9hdyYcCAmcmn
IzInFAptAhLMcQKYPEAgBBZgsZ8k5iBni0PnguHoxFdlNdivCZnjBnLTV+6nqyT1XWX+vW7HXCbO
uj7VSqEX5H1t/q/Gfgf6jMzMzJmTMzMzMzMzMzM5TnOUz+jP+hP6SAR2+sNfcOIRkn4+sJVIxJPm
Y8A4mckEEBcw5AbkSKyzaf8Ajm9sij/FtStNrUfUuxmeI1YeFShS2E5OYfM03CvoatlfXalfHr6d
BcA4HIzkZkzJisOV39X9Of0ZmZmZmZmZmZmZ+MzP6czMzM/OZmZmZmZmZmZmZn5zMzMzMzMzMzMz
MzMzMzMzMzMzMzMzMzMzMz/I9flWwGSRCYFzGsAn1PmDwSSYMiAgjiZ1nV27ZGrqipum65jr6Opr
zJmZvaVW5Xt6l2o7QCMAY68YrlZ4I+g1DjY1VNWotlCgbNJJ2K571M9uZzMbZpUjsKRdcf5ufnP6
M/qzMzMzM/rzMzMzMzMzMzMzMzMzMzMzMzMzMzPzmZmZmZmZmZmZmZmZmchOQhYCWbVFZ/ctWHtN
aHtNbC9jrNDua4Db+qs/cdUDd3NPZ1X+rNiBozlv0fUYOACYqTT1aww7SpAe2AH7sxn7rYY3Z2kH
d2SDvbQN2y16PqjlZqsB9CfJZMRWKkENEPGxaufXl2U88znie3EGw4lu8RH3qlmttPbvXH+d4mZm
eJmZmZmZmZmchOYnITmJ7FE9onsE9iwODOQnsWewT2Ce0T2Ce0T2CexZ7FntWe0T2ie0T2iCwGch
OU5TlOU5TlOU5TmIHBnKcxPYs9qz2ie0T2z3T2me0xrsA7lQj9ioj9jYZ+bdHvsZi+VJBhM/75tF
doWUxsZHLN3h2IBJJ+BkgIcfaByUT2GJyaUUAAkmZM5TIxygZYPJb/zmHj8MiE+urF1JrjIYCRFY
NOm2vdpdpr+q7lMzMvu9aHnYU07WmjpIm3sXEX+4z2sYbp7zBdme2ezM5zmZzM5mc5zM5znOc5zn
A85znOc5z2TnORnKcpynKcpynKcoGnKB4HM9rT2me0meye4Ce0mewT2CewT2CewT2rPYIduoT8ym
fm1T89Mnf8tv2GNczQvPZOcD4hcGZAnLxyEKrlgRD5i02GtiFllyIj2ljkA84pJhfAJJIHwqljq0
rGZcljOU5TlOUzATK1stJqtEIxMxa0ZH9Qrtq9kep0jpmZIPSb4qv3lW3S8/D8sJrrAFWPYqLq7b
Nu7VuNr3T2w3ZFn2J7Ibzj2sItpYiyzI9zEvaCLmntsntshezHseCx887Zztgssnstge6A3GZefz
ZzaFwD7GENrYFphNgAd883yLXz/On87PJsc7McrSossge0zjsGH2CexwPfYF9rwPbhrSJ7rIbbDC
zwEmcmgLmaOu1zLq1Xrt67a9i12FXRljTDmYaEtAzCK1hJDlmNgJJnmBXau/Z4xmZjiEQKBCxP6B
mUUZCq5gdjOR4lhMz7olVlkbS2lqHKBnWEsZxcDz8fX4/wCn1uUuobLBlbU7qwawbMwZ5gzDkDYv
Nh0Dne3LqfyGvqAN9ZCbFQaw02JbbUsRqjPdWCraGBZTht1Q7bmYNoELdUA+xqE0Wa9r7OnqVV1n
Vl41vVU6rZsDUAF9KkX0V2G1WBuxPeoQvmNwV6qWsN2v6YNjE/dLxD2TuDvW5G84NW2WY/jpSuxW
5GyTBcpL2VqF22pKbw4m5GcWXhaBZcdfXraDXVK2rp/Hst1BQdi4H8vAN4zXtUmlrVZ9XfSka/a1
Vvs72nejXqQbbeXMs44EE4U2IB7rMC13YNYYOTuuo1yjTShuz2avYZg4AJmQswZ9PgKZx869HsYi
uuM7AKfKFmlhKsl68jsAFL2LNvP69S7TV7trTFetZSqWb+qNcgvGrABqAFQIf8itT+TrOCarBt6P
tLoynr+wOu1NtFg9qexLdax+0vVaadSy46GlXr37Zf8AMZipFYYgIrXXU+heJJRbDt9ftaqKXBVg
akAA2vT69bqnvrv1qlp29bUp3Njd1KIr1MRavM3Oz1FnssYIVAcOgQsQoS1gNXc11ovsRrWIJs3L
JTZrMdPdpp17d3W2R/LIUoIy1EpRYGFW6WbRtMbRrrKdXmgVjnqa9KVV6SbVm1qrXv8AXalbpsa4
1bNHLP21HOl6LUCDLvrgPRVrNatqC57kWz2j2NaAaaLml2nsVp/NS2/mzW6d6TYRqn/IoCIFZkts
RebG9bnE0Wy2r/57baRScsQkOSDknwJgmeBORhJzTSznS1KWqu1PVd/KYINit6d9ErS2rYLdj1lh
p7PWqqs7Brmo3ksTbv1Tc20jobFK8BaCVVRzwoNgDhSvFjXcgZ6XtmP5l/UvfZsUPTZq71uvDdZb
Ka8KwNr1t9tNQa7dvU7Rw1VZosG5Q9UrDmobFQCe1Ax39gWU3PBiLt6mF29apn7ay1rL7rWvtu9t
XtuZdPaJs0dxbKNBiOu07KrtnptzZ29bq+LValI2NzU2A+xZfXULHRlqtse/NcKH16rh3IpQWaYQ
XdY1ZXrayyad5rur2AlW491j3kJeQ9euK1lv4ZpqdcJs0rsdm4HY9fuU1jsblvfX3W1Xs203KRZt
aiXHY2Nhty40ejM17rNdrr3upeqxitGwHSzbqB2b11ht2e+i/YsvtZLR2lms1unT1m5NrqgKFqyy
WOkStmOivGzV+nb599VWYmuCtlmYOTFtcUhmJ+deh77GpWpNS65q2O1Pz7FtG5xfWvY27Gveltp+
8pWIQrSuprEcBIgQuUaHZrXU9rg1vkMLNbSs33sb3WCaPaeiza2Ktlrbbqxbt7Nhcc46FTr3mh0t
rsrQTkESrdNm7t8BtVW65P5lVerU97V2VpXVbsC46mxS9Wt2VOuNfsK1u27abdjCWMnX2cr+svvZ
6NiqIXYdftW6d13dB7tjsG2TbdaGXdsSHtGew30C5tj1vRvay09mNYpSiOX6/ZDHqNp1/FFFvoVb
bKVrQseR7HYtNi0u6NTR1tuzfToab1qb6zYmkaq6E2TbLtuzjdubT2LdYbLLWssX2m28XU1V3WMu
vch1+ViVX7Lqy9hbWR2K20qtBqRq2Den0128XrdqZbdUE/IqEG2imjbbhsPTcvU7tWuNrsarJq6h
sZujHos6XX416xov1DOzqD7DXV0i297TVRZc1jV6asxcmBSYAs16vxNVX6ddXQamu/8AL9EbmXYE
DUc1X7PaaWxXt8U2blArCBlWzify2jX3rKU95OtsVxer2GfRo11t7btBsqCc4xNKr3XFSG2HBBQr
AMxKDcL6HpbSZ0et0dNvbdpoKPyd3iu1faFbWV2ezYsQbDW2hK2zVX7S+u1bHiQGWy/WFa3O7pNV
yL9rWG1rbHU7y22dTu2BdTUppXqNLaXY6EcfckbWpeW0FqzZZWa7SCu1YXXY+6ru2revttCxO2Wt
Nqq22qa2vvdlXtaOzTLLLHsK69Etu5MtQtFGkiCuypat3r9uizS32pF289j6lj223UNWaW2tew7G
3TdvbF+5amnsWItG+JX13alWp2CL9Hcaa2h2lTjrLbi3TUoidO9gTTt0JVZsoeGwA35SO4axhQtd
m172emjcdzp9sj0V9nrzY7h7gt2oVrv1bTqH7u4BNxBMROT3sNcMxMAJnhYTk9bqnZ2bdRLKajpx
tSh5doa1VaWKzaX46izr9e6J1t1UFJyhsUXM61m8hP3XSDJ2GsJZv13PsIlGtrdrcmk+xcw+gUT6
zQ1BXRdslAbEIdVKesiHYp11u2xaOagi4mGwkaHne7DNl1jMx1Szs6a3squKqluuspGu9u1r9akW
rWB3GqoNTqJfvPmvcv8AdrX3vTubbHY6U8630NZ5dTTU6kBUObRarHnsoz027Na9cvq/mqPcQFsg
YSy8Oq7FbCjt9ium/Z39ilGsyxuE/NrsiWN60ucnnZkHYYWIBsbGu7tdVdVat8s2buF3Y32VLZTW
F7VvZd2+uy0799s1ex3FY/5P/JT/ACO1zX3tOG7s7LbdvZaRq7jtCrrf2D2VbdVabG4SNvfQ/uNw
NmxtWBX3iBvbQ2bO1usVrddiNLTuWjWqrbUP39sSLWXEVfuuYfihC5bA+MZPT6no1t/dqq02AB9j
CL7LZ2PW0VaFWxVVTr7dgpzaBTbalm4KaTd3VjU27FtpRFcleJ19q3Ws/e7GFu4HTkM4DFhialXt
2bSlVV+xzY+YrFSbGaOOSrSk9NJVq3oYOGPXOo3rhpO9gVTq8wg6zs2b9q7BT+09iQeo7KDp+xI/
Zd1mPQdiwuqt0tnZv1qj+bzs092mpNvaqt2ur7nT00H+U9UUs/yDQYv3xbZBqFtKdduU+nZobslO
vursssG3W9boyvyIhY5QZHksmwVSvfrrqpZWUPytCINikV+rjWsKoQhZTtrrW02WbDbO7VuVnWdQ
9V1b2uy+xLBjjqZuoKWAbONd3A1tbWtZOvrplGtQGu1tGwFgJ+Lcav51ba+mz119bU8Ohcr6fSl6
93r9Wuwb9SVvRs22qti187poap2rr9XRp19Q/wAztx9/HNChss/i11WZEwTNX1e7T3aLzsr7b3qQ
S7VexNajYpFz23U3al6v1/PXRtuia9lFz9jZ/PYAn1tF17XLauwCVKmYnGJU04iK7Vsd3ZuU+Zif
9gRh92v1YdDQ6Futv2p2PV39dd1rId7b27q9ylK9hl1lsZjbTDZaXr1t2xbtnYqs19q0Mt+yh0f8
gWyf5B/N7Hfra0nTcQ02qPVtMvGwEaIeHrmBTry5TrHNi/49epXrN8pf0119d3X3adi67ey3XsRU
ossa7S2a2OttesrZUC5MUEtqB6yaiDZU6TXZDTfdUBrPzSnJi31qiXaxh2PdaUWInNWGxU6Jbav4
7uXqLn8ZVlNDIbKrVup3dnYGt1djVB71d6DXY/ZbAu9lpA29gNRbtisV2pYe7o1a7ttWl3Y1sa9y
hWTuCoruFz2N1mrdt7fVsujaHt7Y+UbIb7gxMwMfSEzV643aQ9urcy7DzLwXFZ+ZeC+xc0yYHM5G
LsWqPJnmDMVrAdXub0WwhrBW5i1+AirCxmRPrOQUZUif9xiwau+5DqbFpbY3X1xtdjs7iaLIOz2m
pXfbaAVbLeDdbXsddTaF3l7jdIq2jbTTsLsW2PTQ+q9FyNWEXaeiVdhQRfu67L1+jsXVW3gnR2Wt
tfYoI/cbBbrH7119gAa7z8Zcd3UVdVKzja6Vlq5fsNhbTi8U7dS9WXYU+p0vSLbzJNVlVd3rqVfy
GoXbqfrdY2V7lNtOyQxNauFZqwG2UAHYqV1tsGypqLDUmhXVX2ehXOXX3VqlYt2NMGL2L0647OsB
ux13UlOVjrdbVQKkqtr2A+iQldyFlvUNt677Tt1mwCdbYWzV7DS1ad7sNfarp1qxtar26KbF/wCX
bTVe+xZq7lTNXYGIx8f99duLVOzrpFXIEGqkk01mCpIalgpENMNRnr8hBBWJxxOE5BZUgaYzOJJY
EQVkq3iLicCQFYRTmcTEImxW3FUsYhmB2b7iq7ljDRx+5rs6v51epr7T3UUBU1t707Wpspd63R8n
L1NhqLMU6l9hqfb5VVbD12dcrJoa1CWUadu1W/8AjisW6htBXuqWFs2apViuz1cF3Wc2u68m0pch
psIKWE26dbKzcYbhyRk5JYBZYld9S9UHn4+177ONJQrYKh6mLjNRA1t/Tr7Hr9lGptN9wCHmD/UG
2VYWO4F9wGo6tW1VZl3W7CXjStQ9fr3FNTUos0L9ZFt/Ef8AHbVb2pqMJQNnXqpusWjq9iqypdQ7
OwWqqQ22ha7dl9jOmB+Zrg2n2rp3Vps7HY9bSL9k+49hsLuU9lbchSoOSFLKIo+4fS+x2nHMTlPC
w2rGtIJdyTyCqWxlpyEABgwCSDDgFEJlWq6NsIBqmZMxkgSyq/m17Iw2AYt7hBsAT8ukhd1cIoc/
gsQukoXUpU799vXjZr2NNxdcpRe47DXj97uMw23savrUZd1+nrr6/X0du+rr1qss0jZZ2fd7Vdw3
hjW7XRV9PuumRde7R2ZdpU317PQoit013q1K76NgWbORZvxx2Fq6a1Var/hKFtrDG1GTslV+wVcv
l0RLQZZsFVp2GgcmX6SWz+lA7GLtWqauzvNen21tfV22VXvZRwNiCslSY1fKG72pzmpXWdda6rRu
W0rt2bVBfrL6Rae5o1aL9+tlbdPKt2tG4t2sTutldtn16tuxVHNg+tTs1n2vFq2zX+XtArt3s6b6
gLctgyIxxLueOXmvLs1LgNdWpCho1bVKpJgOYpGbDxeeDABjx8DEwIPE8zY13orDM9tKWvNapmTt
tgKDFcCIcG25a0r3rxYtOvtWtrddqjd7GzaLchFsywaoiq/iDv1traersvVRs2HtNr2U7tL6zO+Q
5CtBTrzTo13uTr9RU3up6/8AD/x5s7y2KWyCO41aBut+GIxqZ0rZLKntR+h7Xbeq7ttWgfuWnsWH
j+4enf5WjaqVtnbaawI1eFfFwgsoWvj3D+rs67mIWzlMB4VZSHAFTgqCHr3Qa7lsOV0KrBr6aV6e
vQjaAdPZfsVE7KV2wtVUgtW2G1CXC8qS7JYliqq2Gk1ss9ZMI8pWuUfWVWuw7b9jqrVshyo1zfVE
S4ts7dwNZNprvtpnsS2kFc1Nr5dqqjUiWo9KlLFs416y8K9Z/wAi3V2KzZq7QbV17c9g38sNBkwZ
wfI4zyACJgg4MVCYRj4LhTa1l1lfEHV2VqW3ur0Szatus5MQGifTctUkZM00qW7d1Na+m7SUxenv
elwtbeSSGE1KbbXt3VJ69C3ZbuzcnZX283o7K4Klj8uOWod6219qq5dzzqf4+4Xf7b/IfSOv7y6q
b+bdh9Vndepu/Gq6q9Wt6nTL6lC6wfVpJSrXZamUvx1AH3eqUp+NadZ6l1C9RR25WrXYlHbK53M8
VDCV3iU2UWTZoWgrsHkuxYZt0veSGRtTDa+/stq6Grg9ffYeZsMS4gkgxeALIpgRAdIYSxORamt5
Zrlq7NcqbKOMqQ2M1NbL+JWCdBDBpKAuopLLWQT6qrWUytNUu5656mTXBFCuy6IaL11101NSioW1
UI9tlpCGmyvWufXvt7R71stsc220ol93sAYQsTCQZmZi+CbKs81KguYgMLAQvmFjysv5xW+4GHPJ
lrJbYrUqoZWurrlGuNnZuroWyvRtsrpt2Ncvf7mu3LVOZxJI/HSprWChPYeuHHf7PWdd1qgB1XXf
mXLp9ai28advcvBs09/Z0zs95dfqqbUhyTTxFgrRq9NbXt+vwzhR+drmVdpQgt3i1te1ss6bCXG/
iLNpfRsDsr6xoXW36d22ElfZrVTtWLbc6KwooqL1dfU4fWsGtdTdZqLYSxsXlr+t2bTptGvhKVp1
tujWVF1tpONzNiIAAHWetMqPW1bMZU/BmdWQOIG8i7BC0tNXV1mZtTRmzrauduzrKFr3NU1pbrsD
v1oE39I6tm+lwD5mS2u1ZJW3WpQbFZFm6THtsc0U3vG1txw9dlZDuJ+Q7R7apdV9jKyqJjMwwnhY
bMTLMFHhFzBXiFCFPiWHgORnmAkFfC5ID3OTNTkFda51tJBvHB9fYasBrGO3atSs2ZVUCCAJxzDg
SoDhotx7Hstiy3Z+kXJJHkVs0RaSoq5D8a9oervtFn+OMVKMluGIWtlcddtgDQQC9tIQ0VZrSlA7
Wko62WX5qFm4y64/+yoaevUeqvpdOzo0FVkrjLWoVVsVqnUBirJ2lrAb1qi2ui0HV2AV1to2KbKn
oblVsbbafX9dZ7NTsft2ypMzgK2SXIgYwtATivYdTXtXI1exzVbvNbrORIJtmw9yQ4sLBAeLCBsE
vXZYtXNrahrsl+mUr1tS0L1tJK9bUGv0UYPo1mVa/BErtWu83+tKKGRtHXVX1nBRLCm+myWGISRA
STZ9AYpBA8yjVSunJjMTHdVjMWPmYzFUmF0WXu5IUmGgKK61bXswppur16H1xaLNJgNnYqpqdixV
TnDkem5omqxlFVVb3IC+vbVbvdlqFt4UMYmsuW09dHFehlLeuRfy+vUr2Wly/eaQi7dG5qrpUM40
tSpbvx9ane7ht/YXzMEFVzWDxhKmBFrv3mNx114tY4iM0DeWJmDlw0qTXakUWIbXBRdSuwWZVwJW
WU1bdiQ3JsayblNNf/xbujo7epr6vZaivtU6Ypa6nXFu2tddz4BCjifrzxB5lNjcUtrFpNjHTusc
syCs2V41LELWa6sa9ZUs2KStq5yCXGjj2t1f5Is6e9EfVsSyrY2qkXt9lDX3VJmz2HXW1U21lbt0
67U9/Y9e52b7CBnWV7V1c09461i9nRdRZWgJEziH6+ucROv1/fftWJa+Bl1JOxWVn1ioTC6LGZmm
AAyZlajlew5HsGWtth2h2b2lXZbNSP3lj12WexqNS21PwqzWlz+tdgcyAQi+1WsRV0rK339k8tlH
UopObFsdrf5VpvrCPuqsr3FZtq3dKg3LKF3SG7XeZr+x3NxKdNKGIUgEQKM//wCqiGqstrRi7WWv
UIa3EQFm9XllCgeW9gyNsrrpucY+0XX2gRmRmSypGNtIguryLErbqjRu1bFWprVJq67p/k49W4tz
A2Xmx7azYlmtYhatyi02hjXYTxeKHVVX7W4Z1rFS+2znUtgVqjltbedn9nE8q3Vq1eWaNTTR6itd
RR/LCn8m+xuLaepZH6qpjZ1Gwot1La0C212Dc2JYLSwGSazChEx8cuJIGMQkmAQKBNGzALEliumH
eyyxeWVVAGtJ+B9MzMazgGLEk4mPEJhMAiXWVxNtYLddgEqsldttTe1Gj4uNGsaOxvdvygDXZYyE
V3NU7NbZez6rR6KuWpZRr7L5v2NnVNbJeyHbpWzX1R/PI9tWr/Lo99FUTs+jA2O41WB3bDVqptbk
2NMatw+niDwcjjcThbCgclZW4dXUqoYAMcjEMx4AhHn/ABdnFW8fya9Zmr1/8wB/NDHAYyu1QfbS
x505L0TV1m2S/V201lklmuisdJ9l0SwxCWhUBvYMJYAll9rTS2DVbVu02sLGEp7dtWk3I6V7XO+3
eVaqbhYwb7Of3c/Da+raW6qoitFw+rUy2aCCHQujVWVw4J4LNOjmmxQ9LmZxMZOrn2VMtI2t19m3
LGKMmw+RMQ+JyxFZTH8/BXyYTPrMfAmDAfNVzVRrndkeB/Ggwbd2yRtDyuCs8iCzMe16bk2uS22K
tllzGxN160cEH8qxa/azzX2GrlW1Yq6N6G7a0FZkXg+mKG1de2rVs3Ng3lXDD1WYGreZfVsVhmfG
GDWKQFA45aOSqLY3JLjwVKirita1sIbT9Qr6bs69A7u6b5UNkj/JyhuIUBqiyqGBp1xa2l0unbqD
W01FbUa9l29cLLt9rULnFTsk17vQ5VciqzLalhlyWUszFgrkRbDnS2edWxYUjXcTTteuy3ZJmt2L
Usm3S9BsX2K+UViGD5VyTaHIHu+1WJcsr3nXqaHRrK6HWhxs9IltNtT1vjEyJXcaYz2WHGJmf+VH
iAZjMVhjEKAYT8OfOIVMKsAFxMQ/Tz8D6ssqVjEXA17B+47hH5asACysDLrFW3Y4C3TESwpZeysW
sJPsLL7mI9hylmDdaHsNg5U7lVVGxs0W10bVK0vt0FWtX1I5U7nY+2n8gV6abrst1fFlyI2Corr4
2fy22LKVrALqxwCQScmAOs5kLXfbWWtLtrXWXt3vrsqWo44wkyrYsrFXala0262lwcNyILeQfjOJ
nM+hW5s7BW+zjTZXv6yU2VeGrfiz25nuAQOTDiFwYLIb3xpbLFA59nP+VdaV2BcpgKlFCQgmxQQy
t/L17GCobvf2/W2Wqa8Q4EwWOMQiKIzcziASz622hSCWgPxiepmPqE9agshM9bT1kHiTODGCls+q
KiCImR9JrKf3Xe2sbz7uILbGi7K8NxEeq1bbWrD0jZQVOrYhANf1CgYLecnCsynn5LGKxAJJUPgs
Qz581qWLEEn/ANrkgAgleKZJFjLYWpZZVW7szEMUSxTU6zgxX62HwxOImwa2t3bdmvMVMjj54gDE
K+VLkspWALhqmxxIhAxPJOcT6QCKSTwUyn/H9vY02WxGJbFbMJy8N/6P1U5FVlaDV27Hs5Yqu2FG
yl9LywAoDZn3EWV3KbVIKUEBE4+yvjjtekfkanDYxGwJ5jZ4gTE8mXnAXJiLBMQALC8MJmCxWvE+
0T2Zh5QI2EqgVBLb1qWzcZ51rcuz3lB7B8k1tie5Z4yhy1YTm+rr2C7VWy23p9znX0PZXV09PsWW
Xdbti0hlMCWMEVmcafNKtj0rbUntWhLNVqnDKHScAQARCWWGzka7EwGqIPpDa2rTlx6r7aNXYa6h
0GMhGyEAytRcnVeV6VliheBUYPsFVIsOFyZYvrgAMrQCWeWVSYQQebCKVadj0ezo60PiKoKAExKL
inW00PdX/kmny7TT6zt0eiyp1SwHKzI4vma7ohKljRZ6rfyF4WKrWPSqFV2KzQ/IPkOpUvWoCVO4
qQuSN6ymn91trs2dlNu0oohU5IwLPoIqMZwVTYuV44KDwBFBhpeepgShgqOQhA4GCixoKcT0tBUQ
LLKqh+YXF11mMmdSW/ct/wD/AH8TOJyAJZZoWvzvybLd26hh2VNs91thW7Yy29eYNrSe99Trks3u
k19dOLMHssUa+8taEPYy3MGOxmxXSXZNWWDO3KcjkvkJYROVoI8R7dWuo7imkFywZ2hA4LRYyjWt
RBXcF1/a11ihG5DGPY4qrDUa1Crc9IXYQ5qBx+OKqOAaBQY9QhE6zXD37m9ft2Ms4CcDNfSusjdf
s2FutucftmYOu1hDoarHY6ilhsU2a7HOE4FWfiebGIzAG4hPyK7LTt2V2Lvc6tlAK0sV15YZGnuA
rstZxZs2FDZZGuYw8jMgQWsIbcxbKpyAXDMeAYWLhgCFqpIH0DYEwxgRsrVmFCimxpyaYcxsAPsP
hnOSztPWJ6xOrrx2fYf/ALxzGj2ZPKdd5sZQLN/gXisyGnfvyEUymuvg+/UbVvZqX09qpVaxS1wZ
URXjs6QrU8X1IrUWWn8Kwi7V9ZKEN6ooKs4y3rZz63UKTxrbAWvJ4Ayo1Z1K7DXs63qlGlqONxqP
c9dYDccCxcO9rRxYp5MSHJFib25ZX1d5B6fdU7j1BwhY3ka9BHjEC+SomCsG9sIaO3CSrs9J4zVv
BgxmPG4Jem11ToGwoYgkYUFaytPGZYOy5dECqSTKbyGZ6gH2nUfk3T3WGBmM4gwrxDxQSCSJ9Znw
HYT2sQCZ6fYUrVQwMKtBVBUDHp2OJXY42bFmPyrIu08bYtdmbwGsICqIAnEMDMTrMfuO/wCOwP1I
zLMcxOrJ9tljKdsnIRyCCDWcP4KG63XO3n8oOcUbNwY0LaV6A2V3670W5OTsGJtux4PYzpZMHBQB
gMNgc7UBnlSHIPlIFPOwABHy2t6RT7iL95V2EQuJZazMX5KvsVlUs9FQUmkEVdOLDR0hrWrS69Yd
3TQ9utm1rX9ZfXZrY1lQWWsmja0HVWWHZ6fa1NcwxyAAuYFOR7kle7s1ivt2le/qtE2aMWamvsJZ
1WgjNodYFHX9fPwtAFauuA9OlPVrsLK9bJZEUkx1yOPlQBCwwFaYUAmrKsCG4zCmGtJ6Ui0AlNYC
ZmTC3lGBiK1j6fXjXssC0tuXAtXf6lzUY3rRVRzKqgBr01XsyqpbIJdSOZE6plPY9k+eyJ85luPZ
Os/rXebb6iWOI6h4yEPQxzt22+raOb5QcW1FedFRSjsFx2FK1WVWaoFldJrhcKtljQNkM55Cwklx
jPgrkLWpmPtUERVDL55ZGKr7azqUc22fStv3TgCMYbWrW2ynr6Kq27TqNaP327cLW3L2rVVlZ2VC
1iLv0Vo+n+ZfV1FuK+u1KiHRJ3/3dUWgsViQJkxOIllkAbiVxFwBqbS1qvZ6dilab03dfbpmvbtL
NzcD3V/RrEY2Wgjkk4gz1+WNaxrlWHZOfynhutsK1OQFpSNZCWmfAiVGfQeTOQAJJmDAOIp7BE2a
dmvbo7jcNcfzCIzQVsYrMIOWEpOBWwLIYFwBTmdZWF7PfYjfZvuxmGhWP4omnT67bVzZaoWNlWrT
lHwUB/l2Ovrv/qyo4sq/qaf9DtRjtaiETJLNZ4LeAwhOIxnI83JyGYIrEwV+UazlgZDhR7A5+0Gv
JY7boTcXKv8AazjJtIC3Moqs9jVpRPU7QU1KOaCVa+3dK+nYyvS1KZyUQuSfOP8Aruba7OrMVfPp
ucDSvn49aT/4Vra6Bs/AGIBk9V16a+quhQZ2po19kXLlrXx7WMJyQjYVQTZbGYkHlAjGLXyIZEB8
iGZMroseLWiTmoiMjEjEwYF8n7Q9Zvm91m3otqb+zqPfufmMzqsKM84gRmAnLLJhgowUwDt2M8C4
VbBOu4L2HYf/ANA/c6FkBIJrIEAl1pDjLEr5Ny1q7OYrD1M2FvKvYqHkmszWrrbCynX369bsrHPY
UcvU74lehfbW6lCD9oOTZjiCSFI5Z81nLLiLZwZ2UqCTDyWVgtEZkl2GK1hldPWFtYkgg8eRRHzR
e9LV3iyudZVV6fYQeRhMLYIuQn0b102NfX10rv5JbT11xq09KoWpc9PY6+xTeFDQia9a3WP1tyxq
LqyKanTr9On8oqTKlwO/yOw54BZjFrwGsAi/dHtJBnICcjlEJByYa3gScIUUCmlQD4DLCoETJZml
Ovbew0qaEXQDTdpbRO32+5t1MqwRKwYVYR56uZ69aUfsNClUrAtXg4nBjF672R6UFmhWU7HsQw3j
ewYKGDKYM55ESxVDswRzaahYXdnqvqTWULrnBGvo8n3mpesnVasbVitq2+3Xs0KLtzZpbWco5XYe
wje179YkAraOBc8h/wBj68ouJW4ZrASxzKmUMV8AcQwyxDErnJ+4FFL5IgIcooSIxdtWspVKFFdP
LEbYTKVbt0TrqQyU1VjYYpRs2kyh/DN9xbzdvbOl2LMu2lvX2M1mpehdLKnpdnXXbnDgHUylqbeo
TsblNKbepZ2jPqFCiVVx7hms2NHc8nR+Xqsg13gSpC2wJ7myWyQcTlKay7KuZiMsIAiKYWSqVUbg
HadNVRp9QG2ev7q97d1nAiqWaqzr9mm/X/Dvq9bjb0eAQZKhVFC13zdofUFFVW2rJbWaLuJ7Cmq6
vq+SdnvVZ3TTDViYGTGUmMhAtVmZiSNWoXu6PY9elzrs1i019xteu/Zew5GfbYrVvY73sUXfeqwI
9+7XWKFbsmN9p1aqabXzMYTxAwlhE5njWGEbyDWyAsFZLKotxCpewBYOazk8ySGAgbMQrErHHV/G
RkZWFa8rFp2r2PXa4A2NakWblzxLijv2Lud9wmptb1QNW+qluwo9guRj2VXsFb81/b7FXW3dqnUf
U13lttX5VOEYcVI2bItF2HtrrNwe+bK42ODMzV4bwFL4JawxXcSwvPJPGcVh4xFDOlCCcIrWrFs5
KzkwVlpZs8Z1nW27uz2Pr0tbtu+uus6rsatXQ2LnvtCgRCiQZyosc0WEGoBl29W3WuXZDSnXvuVL
djXrWkJGb8lZU6k06Gvbs7gH5tdPsmzpGlGCQoDCJgxhkNTWxpqVX2bE1XOxdsNUdk1fkaKtsXU3
Iqhxs8+K02mjVuruT8z11C16rdi2osNhWq3NrW32swrq3hzMnAIwcSs4CFTHrZi9bGAYA8IBzQEx
3wVacVJAJb/yaELK2vZWNYEVVf1H3LmBYmEwtLt3WrKdg63b7G3UYazAjVMZNEw0aLRtejjUqoAq
wVVs1tK1wPVj7TCpWGwSu1+K3NG3ep0q93cF15udpWhctWBHsCws7HJgYsCMHMOSJrgBwwJSMwi5
Jp1GedpqWVytDy6DTtp1++2Vu3WYzp9GizXXDRgRBmUWGsF3MJOdW1iNhF2KrtYk07N2tZX235Mo
bXJ2aKbDwZ6VcTQszublmNs7tesd3tattMLPAhxMgwoDCiiL4Z3rMuW6izW23ZF3OAe38vZtobXu
oTdvqNtlRXeQ6f8AVayhkZzQ8s5EuzoxbIJJhGPgHz4yeMRipawiezz7lWC4uAwVvYBOSwuCTzUg
s0AaJYmNfHGpm46wzbkTY3dbXjdwHF21sXTHhQVi7F+CzglswgRgcPZWkX7hlhBc6wbVuQysMCAs
s9jme1EFu82Gudpmf913IlRNlp9KwqFK+WGBGXI4+SIq8jmIIbMRE5FFqQDdcvT0+1uy+nqepXd7
3Z2ISXLTDU00ny9XMfiCqMFABweaRMo1Lq426yltuqbmeu2l9bfsrNex7Hq2AguoR5oMP3DtbBTf
XcbHe0rNSwWUEzOYfowUzyIzEtc6tZsIarFOu/X7PZHY1tX1116unsbIevY1rXC2O1DAVE8gcjRf
rdeu6+n2bNaqBiHzPqT9FxmeMIRk65aGmxQVdYrlYpyQ5z7Mkuqzm4iXEAXJkWBolvrZHIP5Voer
d3mrNlbOqF5gifWcRMGHIHIT/o+RbTY7LlFIJhUz/up3Ci3E9wxbe5hZ3PrbBFaw2IIbTObGV5RL
LMlWrsFlDKhYQXBlVeRZGBRSB6mMVCZTrK0A2zs6vX86tKmrRq2P8nFc3Ny7atwWPjFHHncfcE8O
niWeUPmYgIEoRnZsaz3Mllfgy1fYtmr4rusqNHYAxLQw0kFm/wBwos22rC7VGsHvSpa9VcGEgEti
cvPLMrY2blrD0V1WW2m3Y1lrDW2p12xYcdhowbBYl1tVrAga3JWxBZs8HjALFYsWxnOJkifWCFgT
kk/9jIK3OILsgGpp6ajG1Bg0uqMlkBHFTxZmgd8+0qGtPJL2WC7L/kEOt1Zr1zdapRhGFOfxrSPR
blNOws3hvugBhh8QKXgKKbObymm6y3Z1dnVVika7EaxjMk/ABMqrJa5yScmKQDrsXRqzyKkRCQLH
RAtjM7bRrCb9zDUq29s06dWqGNetRu7l11jtCeRCBQYqkyq9q478nrbIGDLBxY4E0qBsWPq8Vv1m
sqrZ7J67FHiGpCX1WY26NqQXWoen37R2G0rHsq9ettsrSie1nqW+vLbFXMbKAi2ph4zxVY+HC1O9
ltbUWuHl2zby/drClfiC9lHsLRhkeAHvUarHJwQMiCA+PPyM4ErYgccgECeVjFgqvlUa0MLVy3pa
emliNf1uF4tYp5ockvgsXQKxgvYNRu21H9zYvVbZdV7KOYtODzyeAJRxOQh+i8njVjFNGxstV1Gt
TPyBWmxQdmvY17abMecCYgAJURmFdTOSeUzKrmqewLYlyglz4RWc8eC1JZc/W9NVStO7/PUX32d3
eaVdyxJyaeluKPkMEZz+0dj6n1titmBBobKhsS8cl9jTV3DVLe0rXXs2Nh5rWApr3Cxb6cGrU/Jj
Gys1bNirYFuOhqunZb2DuP8Ay9qh7HNuwQUd2IsHIXGC8cVuOFucFrQVse0Nc7swtYHkWigLFyVK
uZ9zQKwnrJHAgitsnWYldcT8dZ6VgoSCmoQpVONQBCguVxU32spVirZARoERVLqELAg2KAHyCWWC
0Z9ymFqMijXMfUVy+ieP4dqDhesryWNxJNthibNgNHa3qv7jZm3aUsm3UzK2tLNnM0K9F6n3HZFD
MdXp77Zr6Gtrj/J+pG5rMuCfgCVLybYsDt/38YBGm3ixjwwJroa600mvVKa9ate8qrrs7rk1/ebl
iW2W2sxxFUmZsELEgF819h2VU2N7stlCtqypiGU5gXlLOKuEENfhqw0rY1tS5rZHSxKFamzuKyqh
3MF7idXtWLv77BdvcxK3xRsMzEMVi8cMUIKjBsYkWtPdxX8iLYGBFRhWkgVVxqhOAmMwfbC3Ec8z
l9xsEVyYUsAaixXeu7glNoVaWYGm1oKHM9FYIpQBgRCnljWrOeJYBi7OsV8hTYCXUqHAX24I4vPK
lL3QJtEwXoRmloaa2jagj6RyKggTCxyVivhjbBezRbvGvv2UFOwpc/48dW2kEGcl5HE77UXV7EmY
+GPCnEx8AGf9UsAW+qGUmWdzTSmxtXbLYAiiHAV38qORXEwVOPKJkigIDt11y6+66fQqchGm1Vl7
VcQWMoWxp7FlZ5LVaUK3GbH86qzKEAmdXkdn2Df/AGbTlrkYFLMZJBmcxvE5HPgHlmZOCFB5niHg
PBfeZ+Uyyu7nOaGcVYmuxSlVgC02CerxroKi2wEtF4aZzPuwULMKFhprL4RDbyM/mFE5JLXVQ3Jo
X4TiSwISO4YLauH5lsMWxiKxZiqgVcQzWOj8mK+3EF9gg2Mz20tGrqeNrPgatqxqLYquIVJAIAps
4vq9ztaj6/8Ale3W1v8AlNR2r/8AKKrhuersNBh5JiA2PdYHsnmD6ZOc4lKnD5EACg2kUZyFBI85
85LEg/UFVAw0zBjH5DrC2fhAWZ0IlR8CEhhualmrPbYALMxFqJqNcx5rcwNN+smzicdZWp7LfY/n
srnYzwaynlG11npdQciZGfLfAYgf9AjgMieBCwLYEUmM7KV2Dn3ARbhn28jz8gMsuZ2NLAP7wYru
GUEQviNYGAsflguxQqpetQde21G0WC/g3EP7lgL4CYX1Ew3PhWzDaJWy8j90UCqKuSgwrqStNp5B
vAJgdAKrVsgvcQXgxnqeHWRg2pYZ6LVhLhvYwnM8U2mQ6/aWA3FXLEygca/kQwAsV+1bCA2S0YRq
2YhWUxkhIWBDPxmAK8QxLEEBB9TiEzXJ52ozlSQymYBlO0qLv9Ua1qH3KfILAh3MV4jBxuLiEYnW
jHZ9n67NsJbVdZpVs3rKqyrPUubaCY2uDDQ2SGWHEPiDEGAxYkkkzIIUHi+J9oZjyhY8uRJFhAGy
5NdqPLEV4laL8MHBrd3X1kQVAl6eTKGQBixr/wDP3gvZbWvrS9T1/wDMt0byQr1qlbuKqsmyi0xq
/sbgGckuUfCiwRrXZfMotwVIMSr8vZVCtorAWsFXIPFMM2rS9ssr2ap7gYTQ0OtrvPwVC/g2LG1d
iMLVDqBrYhHwIZrp5J4qfJqGXXgzOwUE5nmWO0QeeWZyInsYHNRIRGBRhEBIIwUYqxsZWt8lT4Bl
zYNW9ZSbDzcZjWMEVrbVLOp1dlkfZfNSPk9dRW2/2D8ews2TY3u4pTY16msqxUB8AtgBjWHhowHo
Ah10JOswjJYs5YBgJ4/9/wDeRjwAPM5QtkOFxWOAFuIHJgueLsNDcpIsyGbx5YNVmA3Vmqxwg2cp
7EtC2qkN5Crs8o6OXOBFZsPyADfcqIGb1vWioQWKSxpyxFyzV4zYtWB91teAjWZVvEpqW22jXs1L
LNvXfXVRwIMGcrYwgvdZ+WMe+pw9SOL9XE85n0g8lV4LdYCBKRiOGRkvRgricgIcGED4MzDMmFjB
YwC3rn/1OREJLBJ5EvYs0wIGoWtvaFFN3BqjhEIKOr18znq7D+49wxTcRwZrEF1CgnjCeA5CFVaP
kBmhCuTVyJ12Lem/D6ykNrDJ13UYIJ4xcGf94ghMzgBjAQGb6/byDAtyM9rcVsAAuUgWnC3KwLKS
1JcrTg83wbjxSxmntbPJ2LuvJbcC2whhfwhuLnha7/i35XTqAFCIW3doIEYAbBrXn/NW0GCwE8Sw
RnSyvsqy1pVrhgQqMCFVzwBJVYasgo4oZyiWcbYVxPoKwEjWM5Y5P0FQ/l4EKKQKwpCMY4KkkwCC
L5h+FEKCGsGfchV+Y5YgbDAZFxHNa2aMhyRmYhLTMY+K24vcqq3Vk/ufalfzfKvq558jjALcSSwX
OK88qQPZUCb8T2Agr4IXB/8ALV+DSrQ0PGpE/GVodexYVZZ4nnHwMQM3EEzJgyT/ANg4GfJPnLAe
xQKrmaUGoN7K1e1EIqTLOi8UUktrWYr1chtekCrVUyyoJE2LVUksTkDwTwUlaGM2aHShMcxr0Get
VgTEBcDmMKisBW2OLCfdFrZglas1q64i7FtUWxrU8EshCgchwzGOY3hQMmAYr+MExn4AZLY8wgYU
eTPGAIBmBcltcOPxrVUnyDDYUrD+TtEQ38o9wBrcOV1iZZq5Rqb1BU/HVj+59ogffK/zNdAFNzCe
ywzzMZJAM9b4FZy1law3oIl7khyx+3lnJwxISAkA1hh6g0FbkepTHppJOpZhqLFnkTOIRmeAf+/o
BB5n3CFyQGAitKvvJ1Vn41eBrIJ6kgQYShSRVUsX6S/ZopOtX11lH493ubqqnqSqkqrw/kGbzfb/
AP6p2c1IUcNWhmDlkYwVZJFiEvrqq2KrPbbcGVlJzMESpuLkBXwvHj5x9hjnJGQB5OQBkQKMu4X4
AmJ9ABFE+oInEYV68m8LDa5Nm1ay+zlBdiG5HrODGqDFtd67PXSWaz79W3mZstYaBZxJAJ6wf3Ls
qG/ca0sAY25/I4xdhCQaWFzVqA1rR2fFYJtXUoJFdSFcGLxyvERR4XzExhUUz1vBURFqrBNVQPHj
GRjGrzHqLg6ykHWYR6rFg8MCME5gzASB5AxmIwEoYl0JnkjOJ5B5CKthK1WsFREnOubVR2CfZW62
3wDtLZr12UVFEeWckW/TW8Hq2Bp12qRVwWawStK2NhqFg2/WjL7ItVpj04gHCG0gtcueYI4kzaLI
RYrQvmAggiYyWOIkI8jJNrcfgrP+wJjM8KE9XE2LDYmGdz8kwtAQW4HIJnmAYi2MsPBx6q2FuvSW
ppu5O/2sCrZnWf8A9LffG81mQ1TuF10i11qB4BBiFQLiCtTKrpahhIMXhhQ5iowCisTHAc2wXOGs
qIF1RA+4JjCqonECKQCAhY1gRkGPVxNlDYNFUOoGjatwmGUsxMB8D/1W3EqHaV67mCioFVpEAYQp
cT6nUrYgn2w+RsoBsVn7R3OsJVtpuN4jMRMOjWWWKW3MSvdInNXJqrZTQIQgiAoTe09jWr+LBSix
jqrH2iA91rEBrNX1sYA4lVljM+QQIVJKD7ox4AgmY8Y8DiCGqJNi8nC4AwJiZMBEPiLWGNWnQ1b6
AIat62IyQwBME8xLuSvayCnaYR7dcnZrqaEYnV//ANLtHzuowUI6k+3kTYSxYuCWyS5hXM9azkRA
XwSROdRHtrEOxmNc5H3kcFEwsGIvPIa0FbEJZPLgYKJggEEgqyDBqZgoOBUrwUkH07MGryjaFBg0
VLDSKFKgQKlEapTOXCexRE5WlqLMnlAKjGBWbWRevgscHr7ODWWsWW3BfYcP+Q7KHVjkLGd1le6y
GveDRdxST6Gb366g7oWNsOw4jJIEx5RBYKwAirWSFInMEkcoylSIgh+0AZAHktWkudygvewK1gAs
xCxJ8/GQIWhd8pUYqkNXeoNe3dy3L/cwYZc0tA3GZgMZ/HtGU8rQpMUc2dAB1h/uXZqx7EIwUIoG
DgKxARiTWwgVYTWCXAJscgl2nEiYWAZgzMNCUE91IB2kEG1cCX3LD+HeRXqrUACQD9mRgKCAhiJB
WiwGtICWgL4GcVrG5KX+4lCoDtFuUw5aep85cQssDeXvsCqigAIZvpxs9TmWI6tWDXHZiRYQxscl
WAljZnsOWtODaSqghAw4A4CmwgKAGZYxVh6jy44I5KKScoGaJwDMAxJRFYFgVIKf+uOYeKDadGmq
W5bLYlJArB5H2ZPLMJMMGGJF7Sus8uXCBrArVoSoCweQYDg5BH0lADWimpw2jU0fRDI+hcrMjVPz
d363I7PsVI7BhXjKQOFHNpysI4kHiMkDMw8Kw2UrDs1iHaMNtrVii95+MQTRVPWCtIAQqkK+Pv4q
MgIBA4SGwiFnIJLBQCvkRMkhXEKspxmeMiwmeA2AhZTCK3ACKC1yz2hgq0rDXW0KWIA9pfbp5ALa
kCOwbJVskrD5gPknLExcZyOPsJQlorlS15x7mMUNyywDOAC9mRfNdwZ7RPYCQWeYYTLq5JJB4m1r
AtqO7cQsZOCBLDK31/U3FIlrMB7Wia9oHruMdbM1lwuYfJ4kTE5pkAMuRnOfjMU4K2MsTbuVU33Y
fn65nu1Xm2KQOswew7Ff7gMGGAETi5jYWF6QPyahG2jPyNhoyW2t+KwI168nXQAMAFdMBhP/AGAv
kriKSD7iQHeZeYbCjJKsRgQKsyyk4UkZjOcewGK2T96FsNFQCeWGQoyqsVtMD5AKgnks/wDYX1mE
2hrGxKtlIRrvNisKzc8MpQAwHEEOMHBbBgAw3k5QAZChsBGKn2xbvD2hwCoHIBtdsaoYMEsVlFqt
BaeHtOFt+3KueKNFTiSpMYM4FDsOHBS5LhjFotgOFJEJUQFWi+RazgktK2dga1DVEcTjLMhIhBgM
8NFNYqf1Kr2rlq7TGyT1vjsuyNa77bFQjbRnvuY1i0n8ezP46ieqoR+CpUfKsqwNklyZyaYzAoIG
FmDPW+OMRAYVwQBkjwFJmPInMCNahjXDk17EexzD5niC0xXreBioD4JZFhKNCbAOREZa59zCsmGv
gcZnF2UhFhs1kNu3E2WAb02R9Opi2lcoaqxSVKkHx4mCYQQDkEHjGyRichgYMxCICTG8nYIpoXyx
gLcvd5V1KC3iNd0Y8wCNhhDaCMqB+QBAlZN2urSiviOdhd7GgqssQX2VCx8tW55PQEn3B2azXZ+L
EqOBZ1RkRkrRcHxCB8ewBar2A9uCLuQYATrTnsuwp5dl6qwStWMlRkAB8j2EHJYshEUEEZEACnBM
8zHniJwOOI4oBgLiKvEuoM+0AWKF9wguYRrC05HPnHHx4nrJArMFPgqonsxPaonJGlLkhgwIUgKK
8hnBZGacawDfUsO0Y2wTGscl6bnnod4uueS65EK2ghyIHVhZr0tPwq2h69hH17hOLKT5EyRPLQnJ
+kGYDiAEnUr9l+1b7bgIRADlsKV+nMCcwJbZmBgAzcZ7DgAOqNxCXHCtyZrTFby6ukNQeDWZmsqd
HV7FDW8g2ctZVZErKizgi+6wi1rc0WszW2IhHEoVglglWI1pJ6sK3Y9lj9wHID6sy+PBAzCATxEC
5hTE4gz1nCg4QAlwJxYhQhHKtT7ME3kw2GcswOICTPE4jIVoAYEzMJCywWgQ3NPd5zErzFrrU8eM
BDhS84qpNtaxtkiPsnHN3gqvYjXxPxUIFPjjxi4jKDOKg8VhUQouDUoIDAh2EDgTihL61DRtCqPo
WIGpsU8fP/qAEnGJgmIBrah/8mfWDEB8+SBx4nxD9Q0JLH7zF5Ev4KOwiWwXIBlhGscQvxAtE512
P61IepmrQqEVULFrAOYcOpVkXmjj1O9vtNdqhOQYiEAgjjMTqj/ct8j8/wAZIBgGSVMKDAXIRTCM
HAJIfipbAwTkCG4mGwzlmDMPg5EH1KPAhnBZlBPYJ7MH2MVU8g1gBLOBm1wzWLK1HBGxAvkD7eda
g3mPcY1sA2Gi0mV0rDWBMZnECEmKIfMUNCRknMAzAojKQSpME4gQquCBArRvYItuILEYNXU0bToY
voFTbRdnXpeyzcLWTBBJycyqiyyDRxDqDDaziEOsDeDkwfTJwMQMVhYksSCGHEYJWx5z5ElRFZsC
0BvZY0Djj6lZK0sA4oIQljHUUg6bh7KmDKoCAGDJitDPM6sf3PsVP544xQwmPuYDAxjiQAyzmoPJ
jDYTCSJkmAgQ5MUGeswV5grUQhBDaoCXiwF2PwSpPMAl8zLiCvIwYMrOBJ4EgVoJlRGfELmfe59B
M9NcC4gGJ9SM5+swBPrChMzg+TPOSwABfPEkKnjGIVJnGsMeJnEk4AnEGMvk1z0iGpgP5kLMLA4I
YBUOuhN2lzLaNwlGoqAnA5Axk5RgRB9LaPC5hxjzxJIjEwgAYJP/AEPEDEAEgDGWHEt9a25BXChb
csbSSltgjO6uLWMWwQhHlmvQ0XWQMmsoL1OgHkTqx/cuz4/nkhQtqlfYDOZzyzCQD9xg5TEAJgUz
hmBQJhVhsE9kLMYcgNYIrkwWEFnZgBkcfIBWeHChhFQghVaHAhsAhtyQWJNTGCtYoQQYM/6BxAAY
AIQMBRMLCwE++ANlhkziDCFBJOFE5icjkoSOIIA4QhjFQzgASoM4icfgqIyLFpDFsu3F4M4d2nIQ
nwTghsxlBDACA2ZsUoy/UnyQMfWec8cj7jE0tixX0dgBta8QqwIyRVW1h9IWNW+QJjiQSCXwBa4P
JvXXaQlb5D2uGex3iFmBoGWUzqj/AHHshy37E5oGs12rb2A8VgUmcWyK5wUQFBDYJ7DC7GAkxiQB
iFwp5WYYu0ZWWVfTjzCqwgQklJ4A5YnMRrYDYYFLBaxkqBFAiqMIMG0IYMCAwAzGJlcHIK5niHIg
tOefIDAnNcOygizlD9wZOE5oYGIhBMCCfQEuoFhzyEPkAGNhT5MGIQDLTxHEBWcCFyS8HIRTGBMB
wxH2lAQ1OYUsSNVYpKsJ5nkHJyCc6KIxD8i9TEfzEAd2n4NDhtH8cuOQzCitGoOVrtdftxiK+Dhn
aujAOAxIzyxC7Ely864ce03ww38eba0tXhZrWV2IVNontYliS31igEkrknEDIRk5+9oU4zgMAERl
XGCRwE4ATOIW8GzMy+AtrwVcDxWeIAIPBJBgAilYWXOSQFXP/kklhxBngRirEK0CKR5EI5RaxCCC
1ZIwAFCgP90w0NdcDAAkiAhj5hAyMfGczE45PDy2JyABtJY24jWZgbznAZcwEqc5C5LZAHPkeWIz
khbAJiqwWaVDyzrcyzTvUJRZyopupsJBPvYR3ORa0W5o1rMpGIR90GYU2lreu4RKnK+kZVUUA4hM
8RuIhbBV/HWgHs+wT/7+SoefM2jKpUgDYEXzLBkfYJnBHJ49WIFWKSIZzGF9kxmeBC+J7QRljFV2
hq4wJ5A8CcSCePH1iFQsyJ9sZsQENOBgUiMnjyCMzJgIWEgzMDAzxjBgE8QEwrAQJyacpyAnENAA
J5HwBn4KNACILAAzPATG2CWscwBYwECEz0tCCZh4RmJnjWuC7F2GFDNgHzOQyrspDswL2w3WRfY0
K+B4g8EiERDFEIIByCoyaNdMnnk15mxXU5o0Wub9m3wG6ntBLNDslja+3j15DqFiNmdX/wD0uzTO
6r1AHOQSWcV4PkJgRicgGATHE5zBynEGfYsL4Jt8ewwB2nrzBWIPACjJBEKJArTjklMAEwYB4NPo
PWMhAoDAT6zznAwMzxjwZxUwjE+0wKgjZBXlOLQgGBeMz5KgniVPBTAAIQT8f9AMxJrjO0D8yFKl
kUwf+bAVC4QcyW9hz7ILTDaTFJMOYXUQXwWAj/wEyATksRMYBLRMqchiLFEYAt94iQeI7ZgDsENa
R7szVDvY2tzcNXUg2Z7/AGCwkTLFlSwTX3HVU2AwNzQbJA7DaN2311Fd915rF/UWcuy7RQOwZlwF
cLxjKROImOJ4kzi0CQhAfYAPaTA5xmwwIcitYKyIK4Rg4gWfQkMYEJi8oQ2ORypOGPhS2A0YBpxY
AYn0hwZhTMHBGRwBgGIy8otbTAB4jBysDzlBkzBh8TxM+PtHweMBAhJnAMQnEtygsbI8gZl5L2Xt
l/MIIJBhJwTOWATgOYFYwKqwOIxyFBJJzD9M4gBaYIn1JrMCIJ7QoDM5SsLLmJAGSxUM1zhvyLCW
dmOCALGEW6wAXmLcwjPK9u2o6+4LQWGNrWeiyutxV6LJ0itV23YEL2TeYPowGcEkIBD61nNBDaRG
sBHNjAhMFQEVcTiDCuIFgHgeIZiAxuOQQIcwlQA4yx8qRAZgZVgIzLgMYQGLDwOZKgGHMHKFYJki
fWc4C2B5gCrCYAc8fJ8T6zyCSckgzyIWAnI4B8LYGgCxi2c5KutY2LiQWijyoLM5+5hCMxmn1mYS
cIAJ4EUzyI3/AJY4GSYnheQEDzkrQgghC0BKxWJPlg7cQGyfOQCSFGD9D9c5+FM8wHMRuMOy7B7L
ce0kNZidXax7Tsstv4ODxENgEa3ybMgktMMQtcCLj1mLXmFcTEAIgCmN4n3GYE8CeTMETEUxlLTD
A4HIQjlOBx4mQJlZlckjClYGMbBInIiEgxhxIzOKzHgzzmZzFYmF4jrjkphBmAR9JgQI2cCcp4jA
icmEV8xgI1bsjAq3KBgJ7IACWmMzM+hMUDGQAcYUlg//AJYlmC8QTgAZJrwSuIrYHIiA5ggB42HL
f9gZmBB4hhi5gWBQJgQfGYGlZqy+vRYnVadH7j2VjDsGsbLWeeLmJTyDUgKMCcfPHBUeQpnGYIPH
M4mHjD5gDZZVgC55EQDlOMAXJzOWYVMUQTIhC5+oXxPWIqqDnBP3QET6TOZkwzOD5hJELGHlFwZx
EIIOC04rMCZEMUnAwIQ2VDGEAENiZyDUDEUJNhiUNeJtUMjQH4zOU+vww84ghPgcjAlhnoZh6XSH
OeTGAHOfLARlnIiKxEV1JLEl/wD0ik/OIYRkquJ4mZmAzPxmE+RayHrNhLOx7Sqw9gtKEFVUg5io
wHGcBAgzhYPMHIE8MliIR4GTCAAph5Zx4GIWzM8phhA0ZnEU+eBhysPmZMIgKiE+c5mSJ9YOcIzM
KJnwORLKZkCDBUOqxQsdH5KHiq5mMAkg+SVBBBUxlzMNniTMYAIjYMUDA8AmfeR4e/xLa1dbUNb/
AAPoAYEaKrwUlgKVERKiQqAtYAeZYllEFzLPdkl6mhWog0tDyBH1ZQZkqVxio5mwpFuQABAJiYgH
wR8j6+IWhaFp9R1ij9y7IsN9UGOPFkBYhHmYVJgVhMLliRFyZgw4Wcszi2eOAvAQkwjkOOIFGCMR
SM4OWBEDEQtgZzAFEdmBBn/SsYc5AMJ4zOQMtCgILMsU5mDC5yQDONkQFJznM5DFp5jMwgYGENCr
ABzkPA/nIyc45GIxaKPPIZZ8KLPWDuGPsu0zzBXMABbjhuIELII1hM/8pUchBxj2CM8NnjnDZPYA
A+YGgsIgvaeytpwVi1biL9ppODs45V1NBWBPtE5AHkYWJ+CQZhZxhHwZxJnGYxOsH9y7FAd5qsBG
Tjk8mZwfVFPg4xlIeOT5HkFiABiKfDYYBJyMJJiqxniE4mYJ5AAAJ8TliZBgYGFDlQBPAgDQkiBh
MsCARMzOATkYacRBxHx9QQoIOAGMI8KMwBRMicvLVK0VOEIzMAwqQSFYHyMlYMmXtirbQNrnIgJm
SIPMrH8w8QWfMMrrzLGBNOZY4UO5hsheFyfj/oCEEREM4eeIE+2e8LDejypTmynzh5k5z5Pxj5Ig
nmeZ4+BDOsH9z38fnFsEqpiWYV3V4eZCuSBkz1nOABnxxJnHwFEAEZYPMIBj5WBjkKTFUKMqIJym
MQNCuSDMiYJDEgrZiBshhyAwI5YgO2SDgcVGRCHi+yEqYDgc4U8BWmQCScEkTOZlRHJEV8w5EYjH
LE+4wEwqYcy7mx+5rGRrFtpauw4EpXkzKvFPDMc/CVli7wYyjBVttjPymTMGYMwZ6ySoAgUGYnFo
UeGpzF16xPWk+1S4ypQiEQgGFTkgzExMecQj5+nx5EDCdYP7lu/t35v9qg/a5/a437XyP7ZB+05/
tk/tkP7XG/a5/bJ/a8/2uf2yN+2ZH7XP7Zhf2vH9tz/bMj9syf2vI/bZ/bIf27H9rn9ryf2qD9rn
9sjfteE/aZ/bcD9syf2vKftnI/t+W/beS/t2T+2Qft2f7fkftvI/tuD+1xv23j/bIv7bxH7XH/Ag
/bZ/bM1/t3JP2zNn7blP27Lftma/2vLft8/t0P7bB+3Z0/2vl/bZuftPBv2fKftHrT9pjftHL+0R
f2bk/wC1Yb9n5D9mn9o9b/s+R+ywfsmR+y4H7Ln+z5X9nx/ZuSfs0P7Vhv23P9sjftE/tGP7TKf2
b2t+1z+1Q/tM/tMP7TP7Tj+0T+0T+0ZP7RP7PD+z5P7PD+zT+zz+zz+zZ6z9q/O//9oACAECAgY/
AFif/9oACAEDAgY/AFif/9oACAEBAQY/AN7qPnljxK8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHx
XmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMf
FeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x
8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rz
HxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPiv
MfFeY+K8x8V5j4rzHxXmPivMfFeY+K8x8V5j4rzHxXmPirnxXmPivMfFeY+K8x8Vc+KufFeY+K26
nzxx4re+/L5n87dr78fmt378vmfzt2vvx+a3fvy+f527f34/Nbv35fP87dv78fmt378vn+du39+P
zW79+Xz/ADt2/vx+a3fvy+f527f3o/Nbn35fP87dv70fmtz78vn+du396PzW59+Xz/O3b+8Pmtz7
0vn+dsPvD5rc+9L5/nbD7w+a3PvS+f52w+8Pmtz70vn+dsPvD5rc+9L5/nbHmPmtz70vn+dseY+a
3PvS+f52x5hbn3pfP87Y8wtz70vn+dseYW596Xz/ADtHMLc+9L5/naOa3PvS+f52jmtz70vn+do5
rc+9L5/naOa3PvS+f52hbn3pfP8AO0Lc+9L5/naFufel8/ztD5hbn3pfP87DOZaIW3tQGnbO5Hme
oLc+9L5/nSSbBNCQJy7PTgXhHLErZ1GuuNBzC3PvS+f50ylMh2pHEoTkWiS54Ojt7YIMsTkiCwC2
f8yH9QW596Xz/Oh7zPlCMpEyJVlKG7DXFjpOIKhtiJfcNGW1uDcBMZRkRyLrc+8fn+c5lIsBdGUi
0RSI4LpovMiHJoh7jflHUAwkcBkoiE9WogBrLc+8fn+cXVIDmV1bkR3hV3Y91U20DM52CaJEOAqf
EppTJHErUWHcjKFWu103zQlIuAXZb3s52PXB1sbMB/ciPEhbn3pfP83mnMAjBMHlyX4e3XMlf4mk
ZBPKZJ8Vinl4JhQZJyaDFAM8HqUBeEkNyBpgV62wQJRrubeXJCIqSVs7+sTrpJBwGa2tyUm2zuQM
Bm5Botz7x+f5uymbRBKlI1cutQFE4PNVqukJ5GuSAi8iacF1EA+KLzLYheptlxiv9POpHlWmVitU
JMRY4EKU9L7tBo53IUts2PVHuXtZAvubO5CMhi2oLc+8fn+bjksApbG2dRlQyFgmBumkwJsCa+CJ
JBLOXxX4cKRvRk7BE6XJxQAwKeqY+C04ZIbkaYxKEx57SHFGMu45FaonTOJoQn/uRvzXttJIB3oD
mDILc+8fn+bbebcNo/WuqXT+yKBVTO/ALzjZFtRqaoV9WZvKRduLLRtdIJqc1tGRac4uckcexmeQ
Rm1BcpzjdaG5HitMqRlSSEsDZUHWLcVHdhhcFe09xtHUDu7ZlwJkFufePz/NOpAVZx8VScT3pwXH
a8iwGJXp+2IJxnknlJ5G+JVQRzTHwTAMmVT0mhetFt7kog+rICEcPvFCY+wwPYIb/knTVkc16YPe
obTtuS65IMBMSqDGq1b40RFWNHUvdQMNMQHjGhCOxvSOqIeIFzwTbW1pH7RrJetpIhI15ra9vu+S
U46ebrc+8fn+aDyIAGJWnaGs52CJMzEHAUTkk8ynKovQ3ZMXeBKae4HyFT8ERs7ZlxlQJtyTRwiL
IuSgRUHFMS/acR2bW3uS/D1C+C3DgzuqF1ZQhOAnubdIT4cUN6cSds/u0ZenP8LcP2gEDPfcHGqI
nOUixJ5KO8ARCR1QlmFH3GtoGhGL4qXtWpIdJOBzWxtTDShvwB/6wtz7x+f5natyQHDFNswb96X1
J5yfhgn7arNM97oxe6bFPknF0xxVbrl2EAXuSuksckCaEVUNgRImzbhzbJUDDNM78VL3W8NWgUBx
kUxAcUlHBHe9oNO5jAWPJelvAsKSBUpRlqEmeOQGCltANOIfbPLBS9pvltrcOkg/ZlmqLZ9x5N7a
nGWofaES7Fbn3j8/zMM5nTEXJRjsBv3jdGU5GRzPZmuC5dhp4ogGiCLXGKPiimuFqitQrmq+CAi7
8EDpkDKgpdaNykh9nFTjuz0EDocOCclZpRuuCjEVqtr20btqkPkoh2EqELpR3fLvtT6ijAE7c4mo
XpzaO/G4/a5Ie/2RQ0m2BzQ2tw/j7IaT4jAoLc+8fn+ZZ3Nw8hiSnkWiLQFvymQiMLlFWQw7GCKc
p37k4xuEwvitcG1YOHQO7MnTYCgHgnkSTmVdOOzWRQKcrh2A4BRlkXUJixAKuCUdyAA34ih/a4LV
EmG5A97hS9vvN6pi0458QnL6RJiP2oFQ3dsvCYBB5qf3j8/zKO5uFohGcjT7Mch25qoTjwTzojpo
LBOic0UTkmPYxL8FmUwrLNUvgqlyqDvKBPYyzTnwU5jpMgQEPTciXmJVC7XOCgJGtvBcMlpNWsUf
dbAbdjWcR9oIbu0TGcSo+5cbfu9qkoG5HBbf/wDz9yOrVuRjtyJsJFmU/vH5/mSZyLRjUlFi23E9
MfpV1x7WxKdupMKoEWRzKPDskM1TsYLM5pzimFAqLNWZAhM1U0anNObrb2RIiEjVlCAJ0tYJzQJv
2ZFGBocDmqo9Qnu4QB+anuaRDWXaNlzXtSLevt/1hT+8fn+ZP+lgeO4fo/JKEjVUoOzSbYBUuiME
5x/IKL9lbJsOxs0Kpo0Hx7KramAC5avFDekHjEOBmSp728NG1t+ZseAW7tRGmNJRGKkR5yXB+SHt
9k6dwhtyWXJEkuc1dl6m3EnbdpTaniva4n1tsv8AxBT+8fn+ZE90/ZBKluSLykXKur9yuql+Cr4r
gEfyn/IrRdI7yrqrFUpwVcFSkcAnVL5Ky2TKwkHQiKggM6HtonpPVLOR4oYAghs1KIpvGggfmjKV
SalaY1Kjv+7eGzcRtKSPthADZIbSFsbMS8fV2zE8DIKf3j8//wBRdXV//wA4e2jedZcgr/l0vimQ
7KKpZeZXCqQnBdVJVJeKe44duZNHXDsonNTgFCZu62tuQIeI0yOJyQ9xEORQqG4Bqk7CPNTG4NW4
7CMaqW7unQxYbf2iTmhvbsRPdwBqIqnZ7X3MA8tvdhr+7qCn94/P8yN03ETpHd2uO9UxVcPyNRos
2XSKK6qqBWTkVTkssVSS+ldQ710mi5WHbnLAZJzU5rkVtTHmj5eaeQcmJBH7wRFiCyjMw1jdAfeN
ZAyUjEfhYniD+QFP7x+f/wCot/8Annc24GZF2wUpG5JJPE9lfBUouSrimITktkuiLc1Uv+VxX0qi
bFVWkhPC2Sy4JrHs1HuTmpPZOKkcYE14cFuykPwxLBatrqhu9UWW3tyDwERqliCMEIxDAfkBT+8f
n+Y7FTjENCXVHv7H7aLOWSclD8hj4r6Uw8VwQEQSTgEJGI2oHGdD4I+puyluEUIoAVLamLWOY7Xx
THxTSqM0OXYylG+JW5CY87yi2RUoN1SJLZrblvVMBSODlMKDIK6v2iuIU/vH5/mRt+4H2TplyNu3
guGaaHiq/k5FVshObw2BeWJ4BDZ9GB242BDpzsgcin2dqMTnj26Z0mPLMXC0bgphLA9rZrMLMJxZ
PiVEGglQqEZyEmj5uCAExRMJheYeKuPFXCqU0pjxW3CPUTKMXwqVP7x+f/7+pTSmAclc+Cx8E7km
xCrLTzT6wq7ifWFubWrzCnMWTKieVk2GQ7HCfNPinXFVQ3PdA6BWO3nzQjDbIiLCycQ+KpABWCwD
5Kk1WZWjdOocV0ypxRlE6mwRe64JxZOPBOMMFGWRCG3El9IMSiDgrq/ZQpp7h5OqHUV7dyw9WFP4
gp/ePz/4l1dXV1fturq6v/wL/lXV/wDg3V1dXV/+A8pMF5/BdIJ+C6QI/FPqTmRKcqiY+KLnsrkh
qVBRMp81W+Sr+QxKLB1QdgnIch9Ku6qrJlUKyeJV3VQyuyunIBKI0gIVcFOE4oqUktty8tvpkvUA
6J17/wAjibKgMiVVgtiRLkbkD/MFuDKcvn2XV+yhPZdX/wDzbq6urq6urq6ur9l1cK4VwvNZXKxV
AVSNF0gBPIv+RdMbItSlUxomHeqXVVdivUkHg7PxXPFGUjaw4oklnKe5TMrBdN81U9tEDO3zRbsr
+TQoiNSF1U5qpHZqMwMwUAC8nuqGowwVR3pxdZEIbc5aROhexUibxqD29Fyn3OuXGyYBk5LL24jQ
HdgP5gt4fvy+Z7LpmW3K+sEtkxZUCt3qoYqyoCqRkXyCMTEuLhqryqkSrJ6snqqO6qCrFUcqxTsU
7FYhclUsmlMA8KqhpmqFXqnYkGxTSoulyE1TkqxNA55KoLJ0+Ds7okVILGKyRZWKqO9c0epXVi2a
xXLsq/ZZWRlQCFSDwUoMAW6ZZI7ci5YVHFOzg2KrHm+aZqlWui4oEMjZMQVQFUixaq+YQYE81Ufq
TRkwer2RjAuc08i57XNFSg/IzWojpTgGnwRo5zTkUVlZUXTF2ujumHQMeyjhXJTsfycxknjTgUxD
STEMQj7TfLxPkniOBTivbQIkggBADyhe2/zYf1BboEHkJyd/vFDTttI3eyJYDIoamIC2pHeEXB0i
QNaoxiXYtqFk5kG71QQIFMfFD1ZnXQgQFuanPUYiJHgURsymI4E3+CMiZGR+0qyIKEtwz0nEIHbM
xmDVR2yZRMixkbeC1Q3XmPsk3a6Pqkg4RGKG5sSMZC0C7lD1omQxYrVsTID9QOA4LSSZPY4IQkfU
peJZNAkFdUijpmTbS4TEtmChHUOYqGKAgDMSsQKJpSA7wjpsMQBdVnQWBAQMmfNkNMmJREz3BEep
KoZhjwQkfcSB1NKBiXHcpxMwAH0y03WgNiyYkjMhR0TkSfPwUo7QcSix1ICe0JAFzU2yRnCLxAcx
WsbZIP2sEZSjQXQjqNnJTTk2l9FaFTlOu71OAXyMfFRMf8cA0Nr49yciJHIJjEFy9rIEAAHJTB2w
ZlxGf1hOemlNNicl+KC8gRTii8TRbkjGXqUEWwpdRhHVFgwEavJSEzIAWBFViWuSGQlrMYfaLWKI
JJwBRBrL9oZoaSTIm4URCR11DZ9yk8pA1DIRBJe5Umk1Q6EpSBF2NVLb9sGiS8mzy/IpU5pzU/kX
ZB7YyyWgSBagEaoNqZDUC2C0w2XfgVqltadN3FFqjAdyEo7bkmqbb22e4R2tyPRjEhNOLA24Lc2/
SedQJstO7EanoUdswedhJk70NkFQoEh2QE9sNwRAiA2LICYo1aKU4SctSlUYyDEIQ3Rq28DkhJwQ
ahEGIjEfaK88YMjCB1avtMv2Y/tFbJpuTM49RwrgvcOCR6k/6iqhwRRARvkmkC71W3Fjqi9cGKEi
5i9kIxaJlQDAlepuiIi7AAvVGjEqUTwQwK29raIMgcESSIuWqA7r0ZgE7ZizrbjtiLRbXEZr/AB1
OJSiBRlI7usxw03OQVQXfFAEmT2CMYAkk8/FS2pDQX6hxQEWi1DS6IPVIYhARvxKyNmKG1uaS+5F
wR9nFTO3SOo6eS1AhxeKjt0EMWovxg4ra6aOyJVLmQc0Uju7IEx5TDpuhRjzWlwzuK4qxJNy66QX
v+tViXZjYlkZ7hEjO4jRmzQO5LS9hVS3tYYChshGQIJLkHJCJ2gZkagZVdf/ACdobZEaCFLFH220
4gGJBwBCnCRcRA+a0iWoTjqAyTfun5rajEtIz8xsKYqUomoYPF+vBHb3Dol+9gomO5FpUviyI3Pc
R24RuRUlAQP4epiTk69OMQATRf8AtrGjhuKEjEkSAIdCcIawSxILsgDAx3BhjwUJ7cZSlINIEHpP
DNbUp7bQl5sSeaO2xlCQBAIIYHghtR25EkHUZHwXWdANzcUxWoGJJJAGKE4tH4V4I5lSBuApLpfX
AaWwc4riU5qnIaKyGXZRVqUwp2O1FpnPRWpZ35J5B4i2DjkjER3ATbCKjTWIHCoZShKLbkmYNQIx
9yRp2+r0wBVrqT7B2yKQMfpRh6DydwfrTADaxBjRDb915HrJqnvR9AHTGzppvwaiMXAAGN0Gnp+S
0gvzRODc1EwqRdNIgHAFWEhwKENI1M9AE8TGMMHYH4L041n4owkBGenVq4qW3MNKK0P0HPBOZODZ
apdwQMvKLBMwIay2STpEZxNOa3wNmDjckNQvdlPeOzEbW2wkHxOSI9NiCXnE2CjKsoyiJA/WhLQT
EvyLIRiKHBCeggv0g0qFr3IymDXUahCUhKgaiOpxpu1yoj03kDUk4ZIbkQ8ol2OGQXqE6XIbT5W4
qUzuaL3sGXq7khMyAYixohHajKciKgIbcds+pnbuKEtyEQcBIgOpz3j1l9JcaXZGVZkCoBerre3Y
gR2yX1TLICW/CTX0l1GMN0SYMYkNqdEe32dvdizMRih7Sft4wkC5nj4q4qaAqMIN8FIaol6MLrWS
OoFgDktE7fFNGbNhS6E5yA1VuojcqZMzHBaJSAlcOTZPDTKMaEgOhNyIQjp6RcCtVqBajKbyxJuo
7uukBYVJdCW+T6Zi4BevJCe2QJ4B3PevbuamNPAJjMBokGuLo7okDtvFzyC3iZh2DDOqhLZl5YtI
mmK1mOqOlr4laNQ25u4BxKkdzbjKI0ikg93U/cbezpE3AkcCzAlehu7QlTS4aJccQgIxALMHz5ox
3NqM6MHDhS1bcBGnUA0u5A7UDJ3BBzdMAXN3W3KX+GBpYDJGesajMj02sG1OUPdM8qBuLMo7cpsX
BEhYVUdo7wO6C5IFEN+e8QdsCJhEVIBrVEQjvNIM5NmW4YEvEOHyCIJJ5ZoxnEaZ0kCLcXR0yrgX
wUhIvNqqSPFvl2HdmdEI3fPJaRYW5LSBU5J94tNnEBfvVKDLtjtxDmS9PZGobXnlnL9Slt7ZYmqi
TATlHEm6lrEY4SAFE20REHCIZGOqsnDFatwnbcUpQhCDxlS4DNzTmQB4LzBwiIyjPg7FaTtkHE3X
lkY5WR3dMdGEZFEHYBmaRkMFWoOCLUeilvGhdo9616QJNp/WmBbkh/qIDe2+IqORUTtRMINSIote
2REjHFPPckTzXUX44pj3FAkaoYhCUS4NlW6MzYBe3AoDuwDfxBb5cht2ZJv9oqYkSRKw48lLaj7c
GRBBmCx5rTKQMJixqwKBLGMKtW2S1be3GEYijCvMqUPeSYSbRM3BU9sg7khTaALRvdSlux6SXlAG
3JS3dkGG2cPpTRAcKG6YEAEUZ1/8cCQAYswbmj6jm4YjJMfKvUpIxcVW37jb2wJ7dZB6FHc9wBKP
2QME4JhE+WIX+JKE6VFHT7m7M4UyQBMzE11AgDvQOzMgY1q6gZde7Kkg/wA00YgbpDiQLsjtzh1E
0maIw24Ei4ILp5bZibBiHRjuS07kcDxXqHcFTUc0JadA3A4JD6hwdOZSa0aOo6qsWEpUQO7LTuSD
mINKr3ENvcOuRjpg981D0503hLXA3iid4tAj4oDY/EiAdRBPmwBT7+0aca8U2ztEQi7PUIxGy0rM
AyjJpREAABkyGqRrJyV0kzJPlCEYSAk9AbhA+5cRlQF7ow1O4xRhugHcBcTeoCaUnhuYkebSiNqW
kWJe+NkDIjcH7J5rTHahCV3AejKU/WabtpIun9QRpioyhvj1nrE+VuaYGG6WqDxUtR6ZhiPinqQa
HvTCIbkU4YSzqtRrJzL4rclOTEzA0sTQ3PctALwJyxQ2dvdjETkYGRB8pjf4r0obvqEONMaEgI6p
x25yZjIuwxdCI9zESepi51KZ1agzamZ1JCuCGnqkPDvR1Yl2Fl0jpFzgFH0WlOYf1PmnkXOZ7Mua
qXyUdxtO9v0gT9mOMkPbn3ESWrIXfEqYbXthwCMclJ9sTlMkxeVkZkgylWlgncGQNxkh7gDVGMnL
L05QMjkRZRlCJ0EKM9vZ1arFP6DEXdU2ZDuTelNhwTT2yQbMEQIEHI0UoaRXjZBhTEhbvqF47Qd5
WdDY2R+FC5zPZVdXljUqOSMI1zRdVtii9BgclpmORwKoXjjFOKNgjCNIj4r28zIavW22jj5gt9jq
1bkn4dRUYhwGfmgSWFx3YFEaixNmADoSkDxk90CHEcVolvxhAVqCjIGMgMXFWWuUdUQHLfSjpHSc
CVH0wX1B+C6TQbcSLXW8AGfdAPGihKRLxMiCKn4In24O5tkOSRp5o/htLMF1CPvdqYlEPqizvxUN
6BltRkAdPNTnt7xJgCYgi7ICcep6OnqDeifanq0fZN0xcFanc4BUNAn1E5kosNYZnW0TuiP7QOBZ
PtSMxudQ3DijuAAmJF61zXqe4OnbYiGrF+C1NTbD93BBx0mtboCcTKUg8a0oojZ25RIuHJdDW+qz
uy6pahlwUtoDS9hSqO5tyfal1Ri9uBU4bhAkcGsVoOkzYtIUd80Nrd6ZScObeKkYkSiKgFnUN6G0
JSiekmNHW7Pe9qN2W4H2zppCWaG5uDSGEdIoA16Iz24kxAyvyTx25ANV8VCWgGEaxBkLHgpT9ByS
wYPVa5bRgMSQwUd2DRDXvQhDa3DCD+U4PxQEt8yncmMSQyeGogLc3fRnuzlAwhR9JP2kY7oILUEw
y8wEqERPGpQO5tgRo8oVQmYyLUi8bhRIhPQTF4DEG9VL0tmcIObue50NrbidUwCBa6MJbchKJoD9
CluS2ZxYVlYNimlAQItKP0p90y1vUufgn2ZlyANJ4UdS5IEFqW7AHYYlRO2x2pBgBn+0qqipU59k
YnyRrM8Apb0x07e2REc/1JoxgSKMVQaX/ZLKW4TICNbunLseK6Tqnxqy1xOgnGNvBHc2N6liSQuq
RlIUNaIxfGgWuQsVqmRGIxUhLcFe9SkZgB3Dr0tgy1ykGIojMk6gWkcSeC3pUMtttJORU3kfxD1c
UyfBVugZ0M6lGAqXvwTGQiSgAX4psMUBHqKaQBAtwThguiVcUxXtm/7sP6gt64l6u5wfTIsgbiP1
qQDMC51FhRR3NxpxI0zgDbiF6cIaohz1GgdEHbIzYo7e4ZbG3Kvhgo+nuSmSHlVxVFt6URQ2ooS2
pCep6gMe9RMaOL4ogRj/AIQFVuyenqAjhRBt2MSCaydr8FCHqO1JGJLHktyILxBeOdUTPbEsaoR2
t07QYNEVDLSJa2jIP3ImVnN0GyoEZCJAJwWshpRxxKJ9RpRDmicggHHgtLp/BdUnzChGrQtyRgWA
Iv3I9EfThEDbfgLqBMh6Yix088UZbQ1yjQn6lpI6y/mFkIDpkwjShIiM1A6t3hQFACe6f4Qr7v8A
0hRMp7uklouA1FqAkZ4mxKEoyEJxvHFQEZ6nYgnDmm3GYsL/ABXpAxAFiLoQcRIDOomZM9wu5wDo
PuentjKL0UfQMpbg8wIYKZMhEQAuM1Oe0I7kdoOXpfFkBLYB3aX8qaQ241ZgHDZptyA1C5pEcEIb
AGzD7cyxk37oXqbXuCdkmjkOMarWN+BiGBcC5Ut3e3wZQjQ6cDgwQkf8ONRIY8VMRI1RZxSrpiZU
NQyJl1El6lSNsWsygYQJ01EgPpUZbzylE1B5o7cwTGQYtZad3YJMegNJrYobkROAOBIKjubcyRZi
EeSizeWxVKA2TWQGBLd4TCq0xtic+0TkOvcqfuqWwCDObxIyThl524oxEtQxChuCkttnIx1KUA+u
X2hRlOG05embKxY3FUCCQ5qhvbk5RNxAGpUtmEBpOMi5on3JE8MFpcRPFMcENzbZxmHTT2YSzupQ
htiEZFyxOCchUWkd6hDAmqMizRFETn2OCnRBXVUIgRqbFOQWNk1ivbDE7sP6gvcQ392UJxnuEUd6
4FUdjcGik8WlIvDVijI7ReeHNAHakNRYc107RY3LomW2RxJX+G4PEKQhES0s9RR00tsMMXFFD2+/
SbDwKjKUnmYAabBSBmBESGkBhREbm5oLmwEsV6kJmUAfMQy3DvSJ1WAquqUok4aXXRuTDR/ZACG4
DKW2HiAWxRO4TW9FLbjMx9xEfhGNKNihtb8To1DrrpON1OMSdEmlHlIOjpNDdS2yPMGCIwzQa3Y7
rMr05DpwL5qO25YAu3Fa4NWpwKEpF6EMpMzB2eoUTpj/AP7GVYAZtuVZPGOqI/8AcIWowAiCK66f
rW0Yy9Pc24kmUa6pcSogDrl0gZoH3EDEyFO5D1hqhiMUJR8lmKkwalC1Ex0kEVcXQ/DiXdgQ2CkN
kREbu1qLSACJcua3/UoDABxzXpmWokOCDVSjt3kzGQdlL1dwFjWL35qkhtyFpAr0pmUnsAbmwQkI
EAeYYlfh6o6qasgiN7cM6dABLABFtwwlFnrmgNr3DyLXUdz3e6J6urSIgeJWmE9MjEzO2ajTEPda
dpmHlD51R3iI1L383FS3ZQOq0dJwWmUCADqZqsUNv1TtxPVU5IR2pASgKVcy5ruUOIN1IksARXiu
GJ4L0R1RJxzR29ukcTn2xO55AXlyRjCmmw4Kc5A1kWTGMjyqm2IkyuXotO4DEnKtEdqe4ZQZ9JpZ
FogA1AcKR3WjqNBcpjJCIkNQqKIvux3AzdVUWbu7AIxclMYSpwTGjdtk5pl2CcDpkLELTObxFxn+
Sz3QnKeoGumNSm2dkRynuFygN7dDWAAsgNzqgfLMWK9sXr6sP6gt8M4G7Nv+orXuRaJdyz14IQiT
LS2jktO5uyEwakysBZlpE55jU+KMtuUiGpUh1Lb3I7mqJqCTdDcmJ+nEScP8EPdbZMIuxAk/itG9
C1NbZ0qo720RIRjF9NbKL0DD4qnYY6SY+YgfNQ2tzbbGzFpcVIxiWjU8AhExLkO3BdMDQPUshGW3
JpFs0+3QshtT3JHaxj9ToxnEamYSOQW5tb1JSgdD41QG2xdqFPEiRxDOyLmMeJF15YzjhIBkdwQL
CiBnExlIPVXQD3TTk4OCEonpOd0NyUWd2IQ16SXN4VTFjd2ixYJmABzDhTlPSdvWwOktRrBaAYuM
AG+CMiYiUSCM+5SG8W29J9MmrHBGsahdEtJNwbL0ZyAI8EZa2MankVttuk6vKDiyPUQ4YjkiRM/8
1HTLVrgSAcYi/ev9XsSYDpLZgOtBIhOI8zY8FuTnuiNQx4lzVS2BHVEybWRUscFtkEagRp1C5Rga
ZRkpTmIxN9LsVM7cQNNCMnqtU3jAioAqoyNYBgDjyKhtygZSAai/1B26yAOYswBX4ntdsNdgyExt
gEZkrTohKAsGxUZT2dMZFptUMRSq2zuBnhLUAHYcQox2GjIVGCIidTCrKHfQo2715gTwouKquKqp
7gLTJ6eQR0l5xi57wn0EPVxJVmY8HdeeZ72XTMjvdNLcke9XqvMy8xTRmRmnd+zFAgkHCq9P3EfV
gzCX2h3omLsTR7p0z1Wf5DpwX/IqjomYvkUNe44GdVEwDkmhwXp+4aW3kLhe2iMN/bH84W8RGRac
6E/vFCMIiIA+KJEg8qGWIGKHuQT6uyCQH8wBdijuzn0bcDNpFwTEdITHdMQbMAPgiN3fjHdiXJMX
kRkUdvf3RsnTIHcliX4Ld2xP1wW0zjSPgtFp0JJYCuCjGUAPpWuUCwHTEYI7W5ERc0mTQd6iYkep
E6TEDplHPUpe43JR2tohox+1IfUtwGR9WM9O2f3QjExeIHWy8zNi6EYgGJkA55qNWcX7l1e7e78i
g/u3YMaqvuyaNf4rYEZHcAg2u+KcxJyCm3S13+hRjId6aJvhwVXYrSQTKPlzCMY7jM2kEfArROJE
sCcQhEhiMUzsP2jZaTIUcM7OUXO5HQbRIauTqZE5yH2dVStIL7d9WSO3uyMfVGoT/eeinHejLUAx
Bu3BVxsgKlrnJVJpwXTRaJwM+JwQgaQxa6MZEjif1LdnKY3J6SYRL4KAG0ZlgZAgAO2HeoSiDGRD
6RFzHNyVp3NkmBJ642ZsltT9rHUajcDNkxRidmYiSBORs/NUiXqHLWK6onUxqzseCEydRDVZrJ5y
6cx8lHe223AQNRjwwXRj9nELSCTuHywFW+8pDeA1GQAxWmTEBmotYIAqIxZm4osQRwKEJR6jQKGx
HZnuRuZt5pfaKMIxlsCQpJh18CjCZG4IYYSbJT29jbB9QvKT4YBDa3OjTfG6PtfbkSkR0k0HxTTI
BcjhRVIIOI7Kpkdrck0CGicitv3MT1boEebBNcZK1V0h1ZWTMrKyZuzj2OqxJWqwVEAQQDiyZP2l
g6JZXqnTYqMgxfBUiVdlGr6bJjHqOIXtTY+ttv8A9YW9H3WyG9SYEojKRuidqAG1G4DRl8UYQ2yN
7UQIvWPNCAhP08hxRht7MpBgDRdcJwkP2gUdLknK911Bq40KeIdSEQQYgy5shGMp0wqpjd25GANJ
yiwZEmkjg1E+8HI8gwdSLkMGFWV4g4kyR3xKJgaEA1RloGomvJdI+0K96iDZg/gmjAlyYnuRMNuQ
lIHwCJ0zGqPIMFsx9vE6CDWWHNO8XA1XxGCrKFwDXMXQM5iIqWjVmQ4ZoFeoJMDcJsLvwUonzVAO
KGjdabsXR2BcHkGUoS6pRoS9FGLWs9kQCoxW3wAWo03oRJjPGlwhtt0wqJHF0wDA3MVLXuaYswjn
wUTGwKI0QPSQKY5qUpiMS1CAy6ZhrIxkQZyuwcoCO5okT5iFLb3PcPolmxIK/wAQnnJb21rJM9s1
JoGqVLb3o+pCMhuacyAV6sYREZwMTE0aT0KO0BH1NT63wyW3KMI6Ijqi9yt4zg+ofhNgU0YHXie+
vwW7OEfT3IR1GRuGyK0FhuaQSf2nUxth9Jk8haIdCMX1yBBkSzEGiHWdb44BDXuCMaCUhZgtTRlI
OxN0BL25Eg7GJLdSjo2iYwpDqtVDc9xsGWFC3emh7c7kmqxovV2tnRLU9K0yXrgSEgaUZTG7GJiZ
Y36rokESjkT8lomHrcZKlR2HN1GBLwhYcSsimIrmqluCpF+KoAr04I1LqpVVVUVQ6t2CIuUDIKci
BanPs4drwOoH7IRjKJiRcHsMSKHEhapG2KYEnmjtxherp5Bh8Vq8qe5zXtiI1ju7bt94LfluTkJR
3ZgiIsdRxRG16syMXZEQB25guZycnuQ29veJiTj+tESJBJckFlGIMxI9RkDq+aM9z3QizAtAE17l
ES1+4kD1TA0sOCG2IziJAkPIHDkozhsAS2ydJekgQLhTnLb2wJBokBiK8FL2cARDalpNaFuDJpTi
JXJFXdEe42juZTFCO5aRvyGqpEom6fY34z4Ah/BHb3Y64Gum1e5SlD2wlJ+kRJIWqHs/xcnp81Hb
3dswORqHRA9oAAbsPG6H/wAaIoXtRaZ+3hpIqKXwUY7sRGREhOIqF07UZEgNSjrq2YaXODUwRbai
HjQgC7qcSWiGoL2RgCSw701nRMqMmib5IFqgXZCUgNQsVExesmkxzq6MAXIxVL8FE1LWfJNVmAiw
yXVEncL9RFOpNLBAxLAlskYl9JNSWVqEu9MEJuxjgjQAgEsmZQ3aCRiXLsoykXi4q63IxlGQo0pF
wWEcUaxkCWvwW51xA9OeIU4GQMpBwBfJAPKYdy7CuaJjChpW9FtaNsxFgZHpJsiDF9qddYzWoGtm
wzW5GtmNc1AQNSBajrQJaRJ+mJ0tTHMoxl+HvCsZ/tc1uGe4NUK1vIuzBS3dtzDbYzIwezoYk2DL
QYhxemSAMXJDnmtQDcD2PG61Evn2MtTvwTiLGxinjiLYrVIckTn2VLhfulOLfk1/I1znEFnEcUCQ
WBohOOdCn3DqKj7aNCansqHRaxTkOUDBonCRqyO57j3A3NyV36XXq7kdRFREVTRAht4ACqoqhUMo
yGBr8V1SJL0HBTjqbcFIhHd3JEwvpFyva7enRGO/ths+sXXuDEBpbsixDuHLuh6ZG3N3MDY8iiCL
1DrqgD3JztxrmFGIhoiaHSSKLQxIpc5LdI2Q8YExOIooD92XyRgPNEAmlE4IIe4W4TMDUdRc4ptc
SVp2zdDHFaoUIsxZTh7o64wbTK8q4Fat3cjAYh3PgtW372O3EAgwBa+Ncltyj7o77gAE2l+8jq9z
EBz4ZXWqXuxI1sb/ABRjL3Qdji30r2+rcctLq/aXVudOmNeAN04k51Es2LIDUQDFhS4dbzBjSvcj
IFjjmi5RLsWRBPcogG2WaLuDxRgOk/BSjMVGJ+hPEvwwUJiTExBMclHymIuXrdS1sYES6RdmdlKu
mLm6iPPEXUSJaRjiaoOTICxsmgW03dNKLjEIaKZhR2hWMbFAE0wio1cikom9GP0IHciYxJbwoV0F
yQ/wdNJ0eq+ajHcgJGP2hQlDTL8OJeMMkYzJIOD0RGggkuC/BNDGkiUGg4kGEjZDeE9LPUAkHuQn
tiO5FtJIBDEYLdnOAGkPTM96Pp0BuLgtzRO5LTuxJMYgMC/JMb5hCE4Akmkic0YHbYxpQozDjTcP
ZPElwbIxMScLLr2q8aKO3twIJpqHUK8lKEoEkYxBIR07UpZ0xQkduUZDzRapW2wYuXCrZNmmKuHC
LMxshTmu5DI9lk3YDdsEdzcxWfFEEUyTbcREC2JR3NyWqRxQJDP26BhdMFCe+NcAeqKG5sMGiGax
4I6RpliEd3blGbCsBdkzB8VUsCqFwgYgGManVQIgGUM4g07l7WT09bbNfvhe4q+nemWPCRZao0I+
0KPxXp7x9SH2XuO9R1eSWOQKmIl9I1c45r1AOmLajgomNdQcHAre+5L5KD2EZfJDb9tIS3aiVKR/
WvS3JNtk170ZtKQcscwujbL2wTsBKZ6cwgZblMQMk+3KfFypCO7OInSdSnlHWLuautWzAMKGgcIR
iBGcOoHAoz3pbgkC5DkjuTbUZyNdJJN1MDVKcQ4D5r20dJbqEQbhPoOkQDB8HsmhDq1X4tda5bZj
0k/GynJ6kDjgq0OKyREKA0IKjCdADXitQk4yxqmYsjHVpJxQmJgyAEWzbijGQYh1tH90VWzLajE6
p6ZOMEGo8T8lIHPtaVQc0wDDGqeKd6qZNTJRzDGuSqO9enE2JLniaok7Zi+Mcqp4zBoHBvVMO8oA
i1Auo05VXnHgv8RB5FsQtIDBmAQjt0Mcc0xhpnic+5R9USEftaS60+mYgDpmLvijpmaVDhdMtWJo
vMI1LHlUIk70QwBJka1upDf3gzhpCh5of6fcMo4TtXghM7gLXJoTzVDHWa3XqEAiMqh0DphGQwiS
S3FGUZN+2QVE7e7I7kmJCdySDc9nBDh2PgnNBmvK/wC8qFjkrqpPYzVT5IMAGQsmXJO2k/BaT1ck
JRqCmNTkF1jpJcjgiNqLRFEd3bYgFmxTxJGYuFqoDijDamYjFsUXq+KYDvwWgdRLOTmtILRyCL2w
XtR/723/AFBe4nOJjGW7NiQz9RXS5UoznHZERq1TsVEb/uPUkAw0UC/+Puyn7Zw73IxU4+zkdvYn
ERMSAZFs08NzUSz6g9lubZkY7kokDTYlYxKrdDVUKEt8tIxLiJxwUpEmIhY8Vx7HkWGZR1FiKNcF
elJztCoNj8Vq2AQD5hd+NEBIdIsSGICm8BSDQc454IGW0NGJAarqUfbtttiA1DWqbcIm3lApU8lu
7oiYz2yGF3B4KR1tJ82IUh6kTqjUSL1RnEvEi4r3Ko70RuylCAArEOXUpD3EY16QRWiG5F4yJadM
rFR3gTNnEhkyoSDY8liExfuTEsZXxLrbjYiIChtb41gFwHN0YQsHA8FME2kad6qtXgnIdO9DUKpc
ZhEyYDBAiTAkErUCDknRR1CgxWpg9HPcpGUALYtdG9HtLJAbZk71D4A3RA25GeAcIjd2RrFiMVIw
2wdIeQJNkNOyC/Eo7kg2+HbbahyqjH0oxADmQv2QaTGM5N3gJ5Ue5FU0JGRPmOadznZNANhVPIut
UJWLXZdQ1AcQmmDE4Isb3Q1AUuRSim0DU0rgoz23IOGSLgi1w3ZRCid+5ExDcU5LqqAFSuKc249l
fMbclWqoWK6rolEq7DsLvowOCcA6yVLdNAKBEfFSg/TL5qo5LSPPK/AK61S8E1h2VK1L2ri+9tt/
1BbsdwgxhuTEQ1qkINjZMASeCAeoUmIBjcEse5OSdWAOPgujbMjkX8EPR2tBxB/Wm3DEDDFlqhNi
ASzU8StBqxalQgAKYoRjF/UOF34rVKIgM5EBPue52otdpAo7cZHdBBc0EV0gEHinMQZPXGmCGjTC
OJAqhtxnq3DYYrVKuBC2xtbhINfTkx09yE9+Y1PpDNEsM1rjEbk41AkXBXuDuT9I7hFJZxyR3ode
6aPEAd7JjKT5aB9aPVJxho/Wo9QjKTgxQLODlwQkKEFwm3Yxmc7H4IxYaSXZrclGcPwtyuuriRzG
SBjtmYNXDEKm2QCaFqIeqGOSieCjv7cRKYmI1yKjM3lU963o/vy+aAFSmOGBWbKqGUqoHwHFFNEk
A3GCJj1RfD9a1As4QBq9vFdJuypQsFSQdAx6i1wtc3MiS78A6EQ+ogFr3RcNmgbtmtW7AMS8tHSW
4L09qmqVHyFlHW5EsAa0xQgTIdWqvJkTDdIL0DiyYyJycMhKJLiycwB4ihXTIxORWnUQRVxYspRB
Bo4NjdV2tRd3IBDIznqgQHI4oSG8WNqBE7fXEXIFu5RG3AyAo4qHWrcB0iwOCdlRVqgBcVTHFUr2
etulpS8sewZBZlEyqSqV7K24pn1cMEAKDIJmQMjU4IAWxAomiht7gLy6iRg61Rq9QQqf80IQI9Rm
pVinJcnFBBoFsKFl5W5oiUgDgM1+Nt6+dwhOEX243YMF7WFAIbu3pAiAfMFvEEReRPBbezKIYyb1
I3HNROoxmCxkLlfju+Qp3kIGQeVqgkp3LC7RCA0zJNrAItsHpcVIqgYbABJzW6dsikCJRcUUS2IT
mItQyP1o70tMNsB9SMNp47EAWGMjmVanZqxBTkOMl0hgoe4AGuBcfrRi7Edbc1KUiK2QYMOH6u2i
uU+onvUfVuXr+tRlsTBGEZWqm3NnTJqTjUIHanVrGtUdvEFieS+lPEkNZlIECbhupQ2uiW45oCYy
j9bqMNMhKNJRaoQ2t3eEQZamBALjmownuxoQAx1H4LdmJNr3JEcibqDtKRIMDzoUZb0RqJB4WUht
+Q1BQYXAKiZZWQAOKZ1REmwsOa1MQwI1YOc1CQlXSHibOSyI3A7VvzZkdUtIDXwTeqc3UombgxFe
L/SqgH4G/wBScBy5AEhcCyBEKGpF+aEZU8xPd5UTpEWrdGRwDDmUN4TIlWOmVQpRgBMu7g15MmIM
ItchqhaozkKs1/mgNyIlR8ivxImPxCbbhp3TTW5RzFA9HQEQJlmPALRuxgYRFpRCls7cIw2ZHUWA
ckKhLc0QJOJBiDkhIAkYh6L0t6RjeshqCMtsgxOWC5JhTsrRXZRjI9IrI8AhppCIYJhU5IRLCWSc
XFwOxx4pgNUs8F1GmXZUoAKpsvThED943V2VZkrQC8bshExESAxld0ZHG6O4B0C5QP8AqCMQGHSV
6W5KgtO4ktJbmFxzWkkCQ+K9OAJmC5EqAL2z+f1dug+8FvPVtyQrzKoapjYj4pw8iSMyaJ9wGBMa
AhkRUuXQlpcCl01nsGxK9PRKMXOo4n9SJg4FiQtW1I0Go1ZmT7m6ZkU6qrb9vu7h9OFIxwRM9wk6
agDNCcC8MCqiqK4KgxJB5LTIh8lOQsfkhpss+CZmGaoU5NlTynFNgh7aUYmESTFxWqcCmV2QDXwC
cgjB+KJc5rqeQxC6XHC6Zz4KJq7uWyCO7GPScCA6lu7sQIRqaIThEaZgEFsCo7caQEBIABqlCWqo
qHWsngyAjcZqLs+kMoilAgWCcgBMEQRlTvQNNRqQgSTGQavELXq6SxfiFplJ4sGIVjIBS3XOPSKA
EqG0SxqDLB3dSMmA/ayAso9YkaAyGVV1AFy9RYKgMSSQMe9DfnMMSSb4FaYG0nfvda26C9eNFEmg
NCCA12x5rTPbAkzuKKM9vcII8uqo5KZiBPVUEFmQEoSEn6nFGQjtyLlmIKMZgS031ALUYgA2YMGV
irqo7X8QtUbHsyHY5/WVu4GQAHJMA7rXuB9wh4hHcJeR+CJkb0qiZFMKRyVO2y6fMU57Po/I4pok
gZL8SAlTkrmJywWnag02u7BaN2LA2KpJjggJtqsCvaEGMwd3bsbdQXuCA4G7IHxK1s0DISmBlion
ZiBE4lCWtsXjcFS3d8+qSaai7Bem+qES+kWfJHRSJsCFIbm36kgNW2RRihHVohIiM5Gul0D7eUt3
bIJlNmarIQkWD1kobuyBPQGmYhm4yUXFAWKlJ2LN0n7IspbU6kSGmQH2Vr3do721c6JaZRRJ9vMn
B5GqlH2/toRJoJSD/MqG2OkwJqMXUhtbcZGI6iweqMASQIjW4ZpHD8kglgcWsmFiEMjZOQxCAFyX
7k4FQmB4jiVVWTp7oSwW7EF4xIYZOp+1kNIlSUrcVt7cmeIERW7clsysZbduRTDsqg8gWovMFWcQ
cERtEbjXailuyAEYhybladYrwZAbW4Nw3YA0bmjpl1lyYmluIWqESY2cVCAfBAiz43WgGn0qLk9O
rUQb1oiJSJFr96AmT6RoQbLTCREjJ2OQouZamAXpS2xOAzv1FTEZVd8rKJdgxcYWC1SAJcgjkUNJ
qYkjko4gSup1bLwUAWOqhUtW2AY4inyRlszMTMWNXdQgWOkaSOIUiYBwcKIEEgmyGku/FNKDnMhV
iX4K/ipxLF2WmVsDw7LVTm6IiMCv9RO0S0QcSjMiuHBVPYwsPyKsibMj2OfyGTdlew6CxKeZfs4r
2wlf1dtv+oLfAo+5O3NAvzUq9JGObp8EXLMW5LUJCUqvxyTkNViMgvVjVgYyUpQLxlQhDZIeEqlG
tbhAQlpBoWQcs1AFLauCC3etEvs0L3R25z0xNQSM8FvS2KmAdhaT5ImYYiw4rWQDuCTWDsynuwAG
odT2U5zk8CCSxpRCUCAZFmshKgBuXxQIYk2quqJEfg666uGGSjFqEMhA1NwURbGt0wLkBXBJNSgR
QCxwRiajMpvi6bUKlMzjBTO+QBINEYut/wBSMp620CPBS24jXuSDRmZNSVbL2x3ZNKLRlEUIGZK2
mOqIgYgu9XQs66KnFdQbmmiKYnAI7+5pEYkgmQJJZSMdmEgLOFKe2BtGQalvBU3fUAq2HgtJhFuX
xRahQILHMKM4gULtgUSCQ5JrxTk6uSeEhLgaFNMaZRLGKrXJAZVCIxAdxmogy69Rd8lQ1ZwFOYcw
nhkUZDqFmPFSdqnBQo4iNLcCoMWJnbvdbp1DS1Oa2pYyNVuAGou6hkLFM3SSbhTvFrtV/FByC9Q9
CtuVREUkhGhiQ/eFISiCRlROHiPFEE0zZGMZjWKwLY5KW3OkoljzCrTsM4+Yhguo07XxNlXsYKpq
ex/yaIBVr2V7K9moWxVqZoMHK9pFq+tt/wBYW9X+5P8AqKaxzQD1CCiLRmWmMzmiw1AjwbJbgkxD
XQGttVC+S1bZYG4FqJxgqlyFpNnp2CVQUJ7dyKjihIPqFCRRHTJpGgcVFaokgndJfUclGMiQQakY
grQKgBqqURc5oNUDA2W3sRpoxFH8FAwm+89ZG4HBenvEyiQQa1L4rplqHj3KlTkhJ3L3CqalyDmt
swFWrmUSJPMsWGCM3oM8UHBYjAprDMJrn4qlZcENdXqyEoGgswsolmIOonF1E74MduQJJBwAwXtf
TgduLTABvQ3TzwwVLLMZJogNkvTmDpd2FnKMdRjqxRc6hndOqIt21p2y3Nyk5XI+pRjIA6I6QbFh
xCiNgmW2YiRN2JuERwKiYlg+rSMEzrTXNOKjFkSg6FWaqbUWUYzctKhwC3MSRVbYsxTCxJUwe91E
XZO5jLMFAEgxOBFfFSYGPEF38UQCCONEwcNkhMSOlrPiv9VtQ6gPxB9KrdVTntc2F1Xu7QUwunNU
3ZREkMOKqQFgVcK9Oyysqpn8FpNSbqzBUXtZagfxtqn8YXuARbdn/UVZlWTDMXQ2iJSoxJpXmoaS
TvRPU5DacPij+HpNgpOxBDEXQ01BD+OCkZVjkVqu5RCJNwaDs/S6c4IluTKiYWQaxzQvS4VBXIos
nIdkJO7WCBMqH5ogISJ82C0jNxwdA/siq0Z2nwyTxrF2REOoXIxTNzBTikrniokV+hAgWKAFQtIH
MJhcAO6BuAGANq5KMJlxtA6c65rNkOOSZUv2UTAsnkBIcKFUJHNUYpiGQzxXDsZZkqptZU70NUQT
mpe62qH+3CV5jFkYzDSDuDgi6ZqHHsBwXAKMWpwTyLh6R4hSMxcMhEOCDhkjFqA0kOKLEObhR9Oj
FFmIGBUBIXsQpVrkVKlCgAo0vdVDo7/tBqiay2xcck0gxGB7GPgFSgR7aL7oVb/kOb/kUXUe5Wfm
mACoua54qlSjKXgEwpHJezJx3tv+sL3Revq7n9RVS/YwBCeJHIqoc/NDVWI8wiWLI7pERtjyxnIm
VEJbW36cWZohxzLomG0Ywu8ulDdhtgQNQTIAFNLcht1aRJcKUIbUpAHzRiWI70YyDHIrknhFwMQh
GXfgjubU3jHzAio5svQ3oRMYgDSQ/epGJbbJ6T9C9Mx07gL7c8TzUonzRuAmauITBv1piwIQIFDY
oA0UhKjYhAGtb2DIBgGBq5dyaOeC2dwQJ2pgjdc9OpjiSKhEwI0xJEWoSETsGWsQM9xwNMWHxCGo
gSDAgWzTSNY1BCIs9HyQHijqIAdnddNQ3mf6FNidG0H3CGpxqsxwTDF10D8Q01GwHBVTtU5ICVJN
UYjn2O1URgEcs1RVsqxrwWz7iYeG6AZDGBOB7lRcVKZkOn7JuezXGDxu7hep7okbG3WX7x/ZWg7c
obUaRIwA4I73tN2EPdM7Hp18C6ls7oMJxoQUTI07HIThRkfMLpxUO6ckgEUF7oyNTG+GKMgHfFUN
8cF0kt4hR1SAJuFAtZEhlPSWPApyXOaB1nUUZym4GBZ0JibgCoGKMjFpEXCZw6p2Ad/ZQKsq4Mj2
FZlUDyVSArgqirRUFE7ElOQyoHOaqW4BM7LqqcApADSDQZgrRIgjPs9p/n7dP4wvckf97c/qKqFw
WrwVTdSiTqAFHUjFwaVUY0lE3BQ9R9uQDA3C1Q3fW0gAB3+a07kCwOBsoiRjDbDO0Q7ZqIMd3ekW
jGUpCMaqTbft9sSBj1S1nUcVq2d0+6mSXjAUHgvwzDbkLxrE+K07rSGRqfgjER0vjfu5IyDSBrQr
QR05EIaiW4YI7kRqlKhKcga7PwT/AAT4qqrfNGjiVCgCaYFEydqPmoCHWJeZyaFRjGDEEkk1uqSN
aPwRMi70IKcgmPC69WMTDbLaQX5OCVr8wmWBYobmkmAOIURAASemkVBWvdmalpQEWJ4nNagXBNIs
uq7UwQExr/dJYp/SgZ5mvzTEiGGmN0Zhy9S90DLuXrb9DL/D28TxPZ9Kd+z1tym1sdcjysFLc3JS
Y0EScFUOqOFSqBEXhe4r8VEyaDP9rBRgN2MNuOAcnmvxN8nkF5pHvZdUdX3iSidmRhK4GC0bkdJw
OBTE1wCOsnULBUusVWYpgV1AsTVkGkY2ZESGqDsEZRjWPFQ3cw9M1CUpMaHmnLaSPipRjTjnyRAG
riFEl4shtnqAtmmMm4BaYWGKeUmHBUVKp5RVQx4pwX5LVIsMAiXD5BEIYkp5JohVqqRZVLck9TxK
1ELJY9hJNeCIhTndapS1HJN5Qq3XBe0J/wC9tt/1he6/zdz+oqqdEYdknsycgkFnQMQwq2fY8SQc
whCctQJF7+KAl5Td1I7raj5RWgGSlt7sCAJUINVLZ9vvGMZfZJa6luyaQGRdamIIxZVjHV+0zFPr
0y4inwWmRE8vtJ5AwPD6kAJGRawCBEdxsKN80zgH94ingo1BcVIsmNERiMVIYIDIIuat8kXFLoxD
scgmFx4LVjiEIG1+ChGMJRkSdFaM1VPcjuRMjLQYk1qOCMdwHZ2CBpEi5lLExzQMJw32LkRJG4Yn
7LKOmIMI0Jq5+8DktWogl2ihUklnH1lAmNrmvch6ZOhqiycuRmr0QDVi3lrZazAxiKR1UACYTBl+
yAT8VEbkRCMi2om3cvQ2Kw27zxlLEoCIeRLd6j7OHm82+R+1+z3dle14khlp1mQyNU27tiXHJBzo
PEL1NuUZNdjh2HTcYLTOIlE5r1tnrgPs4xVu7FUo6q5dYk5YJhI6f2SgNAIzCkA4LuVpq5NXsvTl
NojDBekY6m+1EurES4Jo0CbUycyfgqhh8V0lMDXHssmIbsuukppB+SeJqMEJnpzzXSO9N2VWfBE7
W0SBcrXOQhEInUSEwvzQ1SYcrJhIyHCiaRc5KpYHxQYOiSWAwTdntP8AO2/6wvcuK+ruf1FH4hZI
gZ9khwUhgRY3UXTsmIUeY+aBg0nFMVCG5ESEmiKaSCVufeKZ0ICbxJYg1Xpg6RLEKO5t7jaouAQT
ZT2peeHmZArSWnzCG2G2w1wMVqO4ZRwRYkBMa5IZ2RKdkTbihIlnpxQq4qw5oECguUZVrVaomliu
tzRgVHc2jH1ICXqRmWMgzNHxovbw9ps69oMaCshlKSGqUIy/txjInRiYyTxn1wLRYkMxu4RIGkXz
c96iC7xPdVdDs78E0jpzTxia0e7ppFnyuunalMHE0H0Iylo2Yi7X8VI6xuygQ9XHwUtvaGohtMYD
UR/0qAhOOxKXm1liBV6BGOz+PHCURU9yPuNwNuAmO1E/tYyPJPEGcjUldZEB4lNtEy7l6+6YiLgN
idXZdVAJXNN4KhI5LzU8QuuAPEUVeglPEsHYlqIxlCM4yrb6V1wbmSvIB3ldMATzJVIh+ZQpF+9O
4BPFYFlSL96OiOkHJAJ06b4qnenYjjZPKQGeKqSV0xJGZKsFVuy5HJdMpJyXC6QqnsqEIQFZFgEY
7gBnKLxPEXW8TR4ggYIwB6RfiVKIgCJYkOraTmtLAn9rFUoHupaauOoy+YZHbmWLOJkgUCIEnajJ
mdAAVGKAsvaOa+tt1/iC9y1vVn8JFFOjz7JN+ymFmsVEmnC6bBVKoKOoFwLL8QggbzxkKhjgtw3c
mvZDmorbGp+nNx3L3I/dPyURKNQAD8URE0oyc1VBVOLdlU2OKuswgjCgkKg/QtEvBVtZNF796axF
HGKBN7F7o6S+oV1Ib8ohrmLkAOiNgFhauSAd4m4y70TBxxKBJNOCjHbIEiWBmMeaB95vgEB7gAhR
GzD1JfaYfSVp9rtCMXcFn+Joifc+4Z6mIOr5URERKb3clvCKIjP0om8Y0+S1TJkeJZDTBpYiNvFH
ejsOTn5R40X4khAAPpiurrLO5qgIRAoxRkbgx+aqrsUSVROtIQcUknCqpw3IvGZHwUIQbaYddGqq
kSGaM9htyGIbqCjvRnCL4MX5ICLSJD9MR81qmABkQ5TRgHzZM/gmdyVkOaeNeKqXPBdMRzNVWIKo
ABwTXOQXWdKq8jmUwKt2vLwVBRVoO3JPI0UZCWkRLgqG/tkSJHUBhgUdoFwOmJxKfs6alOVpJ7kQ
JEPdkasXsmAJ5KoIGLpgzGyqV7T/ADtvj9sL3TV/G3P6j2UT4o3T1BZk8paaOCzqB1A6rLSS3NEk
PpqChVgUHrQIRi7O7FS59kTxUDg4qtsXLHuqvcDOJ+S2yA5aqc4rS/Yyog+VUCgRYqqj3om6e4wX
UdONMkZY3dANUXCMpx1Ouk2NQMkYQdrtgtUmeSECznHJaSGIxCpLuuqyrgQow3JyIxL1+KHowiTg
ZHUfiuuVMlUd5TAPyoujbMQcTT5oS3tzzFmj+tEiLyBua0U4xi0SESTghxQegC3NuEtU41YVtIY2
VCnXRCUhmAWXVHRnqK/E3B/CEemUt13EiekjkmjEAKvazIeuPxN4Of3RgmlKWocmR24bZeH2nu/B
PGNTinKY+CIQenNXpitMbYdtVemJTQHendX7XNAnuVVceyq+hOQhDVpei070CAbSHlPehPZmYthg
hOVJC44lcck8qDJUsuCo649mqNCMUY6aZhSJNRYI6qyOWC9qLk720x/iC91/nbn9RRAqEGKeVCcV
9SoykAdLhjyV6hCFycUduTEgNEprD5oVaiBuxClpi5e6AkCIvUgLo8gI6jgCcU220mLQNnPB1t6x
GUoSLgG4OLYrcluhpGNfBB/KMValkJx0kG0SWl4FSjK8SxUjYgpxcYKJxK05KtQjE1CfDBNZGIDy
aqFKG7/NaQ3egYmpuExNZIjytdaiO8IdLVqumrWKESxHxXSHjxQenHBUlwQMnpZrISjj8+wznEGZ
diyiwalVegNkeKPJaYvOWUA/6kNERtAYyqWT78zvbmEXp4JpAEHBqIjd2ACMQG+S/Bht6s5Bz8Ud
ujkU02R1xMRPqi+IVbhcl6ROknynini0uSaUSO5AkCwqtqZFBJ9JqE866iz5JlN7MGVOzVuU4Ypo
BvmjOZ6R8VpAYZdmfY8i0U0BTgsBzKqR4q4V3WpqnNMFVWQHYIwDnIKMvdT0+oSItgRmt2A/w6dc
rPgxCEdysjghtbsh6cQAIgZdgFmxTipzVaIIGMmzC6yRI0fBf6j2kg4rODu6cBiF0l1VSlIlrgRz
QhFzIUa1eLr2gN/W27V+3Fe5LOPV3P6iq7JbPFOImHNCxCJZsgEDkvKTKRcYBaWZGYrImhQlK5CG
uBAlUFAyFSE2ZTxBMQRIhj1B8ENrdJOjyxiNDHjmtva9TQdvIXJxUDGREtsdJFQw4IHe3I69wH09
dAGuVu+tudAi0JQI831L0SQdAFQXBdCTHSbFqFbUI7g3WiAGuCcFo3WjMgSABei1YG6jppRPl2ui
bAW5oZIsKG5URcZIiQYnA4KwbMJ8RgtTF2rzC6gQbhX7kxtRAgVVnQdanvUhAAXsgCGOPZtxx0v4
1Qe2a0xJmcoVXTAbUTjKp8EP9RuHckbRJo/JNCIjyU5RLERoi5c9h59kJQnIbc42Fq0KHqnUwDk1
wWvZInHgUZS25MBVgoTsZNKOfBAEEbgpKOIk1kYy6qA14hMLYBQ3JA6AXNCqSiDxp80JlyCfsMVL
fEDtxwnPpFMXKMZTiGLXdYylnZMIDmaq7RFyy0k9PFEAFeUunk0eZXVIy4YJhEBrFXVe0PYKluyn
Y6Go9RsFCUJelt79KGrHFRO2TLchWUjituEgBHbkRI5rcjKWqMDpjyCYXyQ1G5sh7f3O3onGkd6N
xzXp6xuQNYzGRWjcDg5r1dsvD5JvFMChtGQjM4mii0gQ9wvwzp3R9nNESqRRDBepAfiZhe1hJ/8A
G22/6wvcH/3Z/wBR7LurJmTOycVI/S61MQVpmWB4VW3DSTWmZbgmEZTb7Ok9PNbk5kw0jpLdJOSk
dvqjBjKQZgjEEmcqB7AcF1mgwKEgeRRlU6nBfFRjEm7COC29+W1XbIcNccVHf2iHn54CLCGQfFAT
ltxjGPQSdIAjkBiUNIMZgAVIIfFRN9wgBxV8gpj3oO3IxJ2yKnUPsyjcIWIZCXinOOCDiiAiKBaU
+Bsqd61A0zNE9Cbmr3RvzyVXBNiclLW7EfoUCS2SL4YoiLHhZVIJyTkOEZPS2kBHcd8BECqjKRlr
IoDFPGoUY5kBPCI24MwJqWHwQPut0zP7JLDwC07O2KWNkz6RkFHcNWIJqtMBpGdytwksGA8URE6z
+79a64mPxUuoxrQkFkBGYkcsVCQ80D8/1ppTMHwr9C1Qk1CQxYqftt6EZbcnAm4MqhroS9QhgGjK
GXJeptyMpTAM9MaarLUauCLVT45qhWr3Mo7UThIapH+FD/TQi4vuTDnuiKJ96cp8HYeAU2DQiWAT
iyz+SYmpwCoB805JTOr0PZU9lyhEE1LOq1OaYFiqFVFRdUT2Ga0bIcjFAGTE1Mv2QvbbROvSQxsW
BR29k6NqNOJ5rd9xubwluB9Gy7dRxZTnYyLmXNcc0Zk9VohOPFG8uQK9OdxZGBrGVCj6ZLXHJadw
aZZrVtR1jMEKW17nYM9mVxIMxzBQ9x7SeoCpYdUeYR3ItrHnj9KFLJpL227Bo7sN7bk+YEgt/wDz
J/1FHARqcT3KG6JCe3ug6SzFxcEKyorKgVQhSvCi6ZHWaQNq8UNnYfXGu/K9chwQ2NqkQCREBn8A
vS3I6Pbbcx6pIYOf2jdR3NvYMZCsBH7RsyjGMNG6DIze98ZFDqrghEly7abd6juSg2lzHR5qftL0
tyJO5IGMSJaanEqfs/SgIyGmVTcfaoqEAxLOKhCeyDGg1Al+rEp5P68W0SHBbQ3JS26Nu70zqPgp
RjLVGJpLMKtWzTggvdqIodmrEUZc0NJDDNWetSD9alHSdJqC1U4BAer8eK6fMLBAm6Z5NwqE4qLu
yGgO13VMFXuRO0dJpUioUJAPhKPm8ECSa4EMyjTEJgdIyFE5L9umUxqOAuozjtPtg1JLuOQUp7jz
iAJ6TbwCMvSBHAgFeTT3/oUxiR+mVVQyi/GqaHuZDg+ofzIfiajjYB1WRjyLqu4SPtdIceBVeuJ4
mKasW710yB4WPxVQW+CuGQ6j4uq1UZ19zvkA6X6YniVPe0xEpl2FhyVSuSeRTQ8Vfsb8nWbD5p05
7GzT4DFQEZNE+C0bYecizm5UtzeGmZcAZBS0S1RiBF+V1QOc17n3nugNyOzDp2zjIrKtFVXRYCud
Vcp3qFQ1FwstyNgiY0lktUJGJF0BumosMENzb/D3Mf2Zc2XrbEhs72IFijuR88S04/UnK9uAf7sP
6gt8f+5O/wB4oTkHqHiKk9y2tvZHp7cB5SQ7nkuKuq3V1UApwKJ2thZT9Yk7h8pgWFc3RG75yBXU
9G4KGxCMSQSRqNJk/UpbstiO1vbbaJAUlVjVVgIajaNQAVKEDrEbysK+KB26xJ09RDOp7flkaSGN
MEdmOzEFx+L9qijGRYSLOA6cVMPMDwW3LajpJHVGvim24u9CxCYvGQzQbBMFx7eCLH6E1+5MgR3v
ZUOoosAENRVBTig/gEznkjoAPNWL45J8HqGWh9ALOP0CBjNxjTHvTTkJSzZqKPj2fizET+zc+CPp
bZ4GZYc2TSmQMYx6QmsE8SRyoFpG7KMbEA0qqkHj2cclU0Tzkzp4ur6VSZqhqkSMinHZ0kjkV1NL
mB87pnsmh8FU9tB1YphQIglyMOzgrKtGsU3ZwxQjGwTlM6eS1SIbFAbUNQsOPghue5kYg2hiy1Si
J7rPESrJ+SMYfhxwAyTyKp3JpEgzFkQqFAliSHBd+xyKJhtjvQnAUF0CLFEi0qhPDz/NNMGMggJG
maG5AscYvQqZMBHckzDMI7mzQm8F7cGh9WDj+IL3O446dydOOop5F5G6IeyjM+YhieS4IsaKqxVK
jJ0NuLapUAJQ6dJFABU5XRhPUTQx1XYr1jER3QHjIA3jQCijsyH4j3oAB+6jvbkpQILAftDJHcgB
Dbd46nEZ8AjGcjtmNQIHpddbkk+Y1d0WnTJk2Q8EYysbtQrch7p5bcgNJAeQrVslPa2YCMDLVHdI
eZGAUZR3NcpVkGIb61x4qnYFW3Ds4prE3KeMlgqghc8E5YlPYcEwNBjX5IPU8U8rYp6gfNVdwqip
sXTAsQHNbppb0WZzhLuehWnb3JwBoJmzdyIG8JBrE6ZdyPqahuYmQ1fELoIm2TH4JpUOR7aIuq0+
AWQ8P1pggxAbvKERU5rNfUmCAlTLkrok2F10inFYlOaLqLnIJwK9jBGcqk0i6rhigG6szmjJg70C
1ZppDpTCuSZNmqXVU+uIlg6O1AxkMDGyB3GMpPUWU9z3DQAk0Xv3Iw9ttOf25fUEd3elqmfgmTLX
MsAnGHlQT+IT3YWTqtFQOg9IYoGP+HL4LqoRYqnctM2lk/1oy26kXib9yx5ICfghomxwzXt9fnG7
AiQxaQW9s317sn4NIqcIUiCzICYeOPELaMYAAmUQzXCqGZEjuTJ8Fk/FT1h4bcegg0fDJToBIl7d
XioQmQ+4AYnhxsp+2kdUbGN/iobcKbkjQWYqezuGMZO5lMue5lpEidn28tRkBqAEsWUpkCUpODqG
eIyUAYCJAY6XrxKzIzVLHJRlIPEEahSoU5bMJem9OHA4IvcCjJizlMK8ex7dvDtDfBXfnVVAOaqG
RY3yXTI960gjmnNWRBunkQKUTDxQizHL/mgCgBV/gmiRTvdXBJOCrTMgYZKDjUxewEuI1Cq6TIRH
/cbch3A9S69kS/e2pGP8s6fFVmds5bsTH4h4p4aZA2MCJJ9Jpc3+KbSS19LE+Joiv0C/Rld/knKf
4ldL7k+GHennLSP2Y/SUNr223KZNoxBKbfjokSKODxwXUSTwTRDD8kDDE8FSwpEKvZKDvkjA0KZu
a1WNghqrMgUVKBAbbxlcyF00ht72RnFpeIZ042NuO2LzOoD5o7kdsSmWcjjgpTcQABMeMuCMtyWo
mwwHLsYJscVQJ3YJgInmHRk3hRDBMURbJcUdXlAqgAzBSMR5avhTmtD1FgqgsqqhaWf/ADTkiQOK
1QrwxTVpgvbQJpLd2xXjIL3czYbkhHUP3ipanutAhtvgYxEZeIR2gA0JkiuYWiRAPPFAOAMxZEHH
FM7EXCGL2RAdrtgmNvghsyJq4GmoAQjMHVkWdaw7P0mzHgoT1SBjarkHvW7AQhp3YaJ0NT+1zWor
TlY8FdXTA1UdmPnd5m3KuKxTqvY35D9l74Zp25lOL5siTFuKLhxm9EMDwRBi44laTt1TENwTBwtQ
NTgUZbkQZE9zItEmODKTg8AmjUvggQACQxddZdjU/UmFAaOmjIgULYFuaqdQbHAqR2gJMNQEjkK0
xTzhHaINZwkR/T9KaO8ahxDdD1fMIicJVv6Unp92TFMJscpgwPxonIpnfsyGdl+HHV+8aR8V+LLU
f2RSK9P222ZNhEUHM2Wr3u76kx/Z2jR/3p/UvS9tCOxt5QDE85XKltnzS8p44KW3uxMJRvEhim7b
9n70/knVVeqEh3ob0O8ISGKAHNFgSyMiGQjAapE4Ibu/Ebm62Noo+znDSa6TGj4sxRG28QD1kA1b
AfStvYn0gVbEol0wqUN71tsOH0El+SMSGIoVpiHkbBao+3Lc6+C07sDA5Sp80ybJNdPiFZEYGpCJ
hXctEHA8UROZYl9L0REqbkKxkMRkq3xC1xsbqUcg7IwJMdJxKAmI7gwzXVtd8br2ko1Hr7fPzhe4
iD/cm/8A1FHF6hbsjJ9BLOjHi8m+lEvTBV6pCxRlgMx8k7cEAHAJzqr9yaIqM1riWkMBYpzQ8yfm
jqDtVHUGdOQ/eyJHcE7UxTMeCNFa2Kz5rBsaXTAhOa8E5+BTk1wTyd+KqH4JtJZNbNOAK4hA5pga
k0CMY1AqXDGq16+kYF7JogsKElmWp68bpoHWeacuP2mTRBIs6EZdMjimyxVwRxqgKP4oiQAfFAxA
JwLouSmjKj0dagQ4xK1EUFV1+W4kmgeDYlEEs3xQBnS1a+C9OMnANNQexwKcmO4HcuH40QnHomS8
5RLB+6iidRv+IJZcFqIMzcareFkwwXre4E93cq23aHeRUr0tsDa2hbbgNITAElCW5+HDjddEXljK
VSv9TtR/H2BXOUP1IjL8ivlFSU8bdle2cDk4QGZquSJ+1JREvJjiSUTAegIgvuWrg5RjuSO9P9qA
0g83Q3AIxkKxLGUhwcstO1KUKVlYnuC1bszORxkXPZqtkvM44LiLrVE6TmmhvE86rRuzExkwX4kS
BgWomwPYxRgbhVoniuKaVjihIGn0LMFExrEo7+3R6TGacJgSvaxdwd/bDEZyGK9wZCvqzAP8RyUC
POLoiAYjqJOaJNKoualanJKcVPFOCTi10Ii/BFx3J35BU8U9CU5DY0orOCqH6lW3wTCiZ7pi5Gad
k+BomBqqmoVOrFwMENUb+XitJBcB3AoeCOmJe55IEwMpE2Zfs4gO6sx+BVWBFmxCaU74NVPGJL2I
/wCSeAiKXN2UYzLuXOl2HFkGIMRmWPgtRNMAjp0mWQpRMJDkEwABsHREgSiJuJZfrUiQz2YJwLZ0
QkS5PmRiHFbm68xINnVQD8FUEIVCcAImNMgypbJETDSwxQLnJ03GhFm7kWDxxz+KdmTRdjdMZdQq
MkNTGLeU5KrCOYqpzg0tyMqnEAilCvgtL9WSqt2O2PwpHVA8Dh3drpsZ1PLsp2ZBUsg3JNkurvQJ
sAhH28TKf7Ug0RyCMt6ZIwj9kcgsymVOzgmNPkuCYJr5AJ96Q24/s3l4BN7fbD/9ydT4L8SZmeP1
J0COw7oNSECJOPiERE6zibeCOoKvxQaybBUKMDitDMQnXs3/AO/tf1hb4dz6s2/6jwRjcAN9alEl
gQqF6qqD1yGCEcboRenY6t02ZBiRxVJF0I3zKJF80H7k3m41TkgAVYINhlWqcSY4xR0mlgSaJixk
bBVYlEuAUdNzdaHYOyEX8Fq09JLckGtRwVQgRqgRJjci4LZOizgm9l5RqLi1XRkCRpFGoE7GJxJq
D3Fan1asC9kDp6eCs1KEGjLpq4aqIPmWmVlrB6bM7GiJID86oCz2GAQM6kW/eCJj0krrGoind3KJ
YxFahFiTS9+5NI8ggY1ZnV24q7811RBXUPFUbkmiaO6JoTxVYkrSb4u6NvvFxUpjRrE2VC5Asv8A
48pbcpNqL+dswVOe8NZkQYwPli1OfxWzvRgYxjH8WBZzLgckI+1hIgvrEg0m4M6luGAO7EExD9QI
7YxFia8kWsKAdtfgvo7AcLolaj3BEDzSXEJnFMOwfHsZMqdjn4JoHRyv4qvYAqpsVRaZWxK1yrCQ
6JisSqWzVTVdZNfBaYFzx7GVUJRxFUxK9oXYjf26fxhe4aoG7Nx/EcypTIBhlT6EQCwWqUBI5jp+
a6ZEcCnZ/wB4Gyq6cIp7lfJ0+KOZVSyf54pz3YD4IFmOeCo2RbFZMFXknNBgcfBceNFg+IugqyJy
KjTS1sfghLh4LS4dCJlqJDuLK7xLhj8U4sLDlwUjHpmLGx5KzvWv6k4i5FCDQVWrSZcBIHwQcEBq
4MnjpMJVfFODYMwuUHNA+DlkYyABiHGbLSQ7XzZagwBzw8UZOHCEZgSHG6EdsEkY5ckAASGYvmna
rVwomBaL0zWgkucrWRnOQPzRMbM5HFCeDVBWiRvYlcOCYImRLvQAI6JaSLYOql2VR4Ko8V0zpkrg
gWujLRXgy6nYD4qlj80HLFrrpL8QUJVLDSdVaIy2ze4xdMtzcOAYcyq/kgC5WkYCqbJAIDJdIcqo
bscqhVroEMRIOrMn7GNHxVOziiY1a4Ve0+33wJ7MqEFHf9qfV2DUi8opxYLyiXwVAIniq2TjsGR7
PZ4j19qv8YXutuRIHqzqAL6jigCCIGTajRwiRvd1T8k5e37J+lOx+AVA55lMAOTfWiG0yHH6Ez9x
oqxtj2fJZvibJzUJ7fJXf4LuxKBFsuwMKIHU5yZAi+JXSXa5dk18YnMoE0Fl9kkYkfSm1EYsCnF8
6IAFjgF0yGkVILuiWAJ8oojqID96AMhHTiPpR0yYSrSwVBagk7E9yMQxBuBfv1IxFAMhREmTB6HA
ha3oaCLqoEqfauFKQLBqDiqdYrYp9JBJao+SMnZs80DrGTKsgRgEBI1By+kJtslrMhiRbg2Cebh8
M0ZtwK0nBOtJ7k5oRVD20Tom5aUuTo7cpaQDplLChQ6gxsLlEADUXbUHqgcTdaZPwNgpgbjGDVel
UZS6oxvIVTSi4TSA7x9Kp8CUWkQOLFUkDG9qrpTSg/NREbSJJ7vyjLAIko8UMhVOT3Johh25LUe7
tqARkV5PAo6SQcimTdgKBhjVahQ49tV0VGINkZsI6i7AMFknOGaPpB6OeCI1eBQiTQpxUhVi44L2
sw8W3tst/GF7kkhvW3BfHUUHjEiNiRVag7WIBoqAgCjCtEG+IAQI+axbNnRpbKnzRtlUprHCiGqF
TjZM5HxTwIkjrif05Jrp0aBs0AbdjMAcSukvzXyXBBwA2CeIvgCnLjIMqm9gmqDzTRJJ4pjQ8UDJ
i1iEP0oqVHBNI0zyXTIg4F0wDu72qeCrQg2zWmTPVNYgOnFCaUyUi9CWCGmwFX4qzweoIduS/DIH
ctWm14iieAcSuMgtOpgbhmdaqdNGF0TEASFjg6fURIYq4mCG/wCa0mI7lUAjBDRE0uyactOZufAI
GIkSCOqkSOWmqYEnUaviuoVGHDgmBrg+CwUYGmo3xdS0gbw3RWI6ZBuC3Nt9G4YkMblA1fsNLZKk
iExAKcx8FgC7VuoRpprbNattznH8gAXKEfEpo2TIk2WqHlxGSaRqmNlXsy/KyTX5ppBinDEK6um7
argjExMp4F2ZERl03YrWYs+P/JXcp0QTUDs9qHNd7b/rC9zkd/c/qPYxAIOBRJp+6XIHFEycchRE
t4H6E1ycXqmFTyQBFDyQHzLrliKpqsODIln4lVIAyNV1Ri+JCpIgcQ6JDSGDLqDdypZXbsqi6YXX
EoB2Cp4lUctcofIur0FnXG6AchMC5xc0TE1zVDYPwWo1a4QkHPBFpsKluK1CXDvWtqP5uC73Y/Qi
RSrkIPJq1ODZqlWYjMrU+iWIfJESIIlRBgzDFFrm+S4sx4hMxlLhUqsTE5GhQO7vOcYwiSfEsE4G
praq04gIw29O3AhjGEIxDeCbO9V1hwzDNCYpUFXHF0xonFQoTH2S4cZL8WGnjcLcMTQksmLfKiY2
dXfMhO/cnF25JpEhr2TxLk4YugJxMSDYhjVPDc1T/ZYt8U9BO7psU+K1msjYZJsMlwXyR4lMqgEJ
4sqlMb5djqv5DHt6T3LLMfkFk9hms19HYz07LIF2GK6S4NQV7Qf+/tf1xXuQYj/G3KH7xRAs+FkC
hdvggQG51CqAGsQqkMLLUZ8mcqpJ5o6YKgATOe5lX5rAnkgIyI5gN3pu+6oBxMQmiSeEgmnAE8Lq
jx5h1qi0uITSDFZ5v2VTYJlmcAjn2UVa5InuVW76smBpmnc5MqUPFEGjCqnr6YkXA1VRjDcjpP2t
JY9yB9WBajBwfiFTcgKMDKSac9o1pokHHcjHXENYksqSiXyJ+pfiTEcqGXyQAJ3TmImP0rq2JSIs
TLSE42RDkdXihGO4RHIU+Sd6nMusyqmqpRMASM1KUgwDIPZw+KoZTzAXTtHvXSdObJiHj4JpRrmq
HvRo/NUp+lk5j3hahEyiBWhYd6/F3BtxGYMvkm2xOecpsAe4VRjty0xxa/jdSEi8md3c0TksVqiX
70x7mQBwv2cTZfNMoqvZSqaNZYnJOT2t23/IJALjEW70ZRGoC5CqGKFET4LUz80xYHLsZnTS6Xsq
kMvwy08jYptyDD9pcOz2n+ft/wBYXvJS1ER3dwkUakinHlNQjJrC2eCYQb4rL4KpPx7aRLZsmcA4
1+p0xm/IKgJ5lAQiwfiUxJlmDRWA5l3TxYjOKcu3NiidQ5GrKluAonIrxLKpAI8UWJlzDhdW2PFl
0y0HiniRJMYnmmIZU5AIKz9j+IWXFZoNRZ4VQzzRkzlE2BBBZeYkclctkU7IdIORZ01uSrMnuVvF
dLAcArpphyQ9sLKUoTE99gZABtBNaCVyEw3gz1EoBvAKEtuYG8QfwywjMi+hzQ8CtQjKXOlk23ti
LftFl54jkFGEpEPcu4VDZQnKNJC48E4qE5HejGhkFWnA0TAOchdEVpcFDVKUj+6GHjL6kDG0a8u5
AHcJBtF2HgFTwTCjp25IYA0PeiGdMDXJfUnbgqqmCr2AcGZNVlRaYVzP1KyJz/4BD1HCipF5ZugQ
WzCYSbAsuoVwKqHIQpXFVTivBaZQLkO7hvGqAeUZfa1B/ki9Qe5SgZNIeUHLJNIVWm8QXZUAPNOy
9p/n7X9YXuzGRGre3Hy8xzTdMhchv+adtI8B8U0pR8XV/AfWU5BPeI/0hdMI97k/FONTYNRV73cr
SSGZi3FWfmXXlAbIOnZxZ7fJGwOUqonHHSKImIJBrkjQD71SiNX/AEhGkjxsiNIIzkiDIRGDfrRe
cpE5U+SfQ74yKcQiBmKpzKmQTxkXT6BLN6FV6WwoV0yBe2BVYllZVuvoVKJsFZOzC6NgibNh2Vo/
emsuk3umryVIEjwTEaQM11SA4BUiZdyDR0sCCDR0akSiSHBTiZJelU2ndlfAgIbe8NMxVr0PJVAO
SJZ2zqnJY4FUmG5IRqQLJwGPCirbiKrXPd0v9lnNEYAkR/eavzC0QmIRFOlge8iqeOok11XCaQAG
BCcjvC6JMckNUebVdMag2ogAHPBOzHB1GbEiV2Tk1wTUK04EYo9ozPZRaYnqxK59jDtqtWumTIKx
fir07a9jO2ThXsmx7WuDcLTSL8HWmYjEx+0DVkJVDhgb1zVZExwlcNxUoAg4JpBjl2e0/wA/a/rC
90SwHrblWH7RR0SqcQnlZeZVd/FOQQBiKUTxZjZz9K/xCHwZFwTIYoEhxiM1cRyCLuD+6iRHUc8e
9dMYxGRqmnPSMg30o6jqJ/TBOIFuKcAVyTmbcLK8ieDlMYgHNYHmmnKowRABI72RZgP3rptQGQFV
1OTnZOIhuOCuDE4AJ4agThgtUoxkBfNVEovlUJ4TBC8rjgqxNOfYyHFUbV9C6YnwVWjxK6p14VTR
gZ931po7YjzXmAHBO/qfeouoafkni3cq1W4MAfmvD4hAn1XYPGjO3NTlGJgIsACXPNXrxREi0TSl
Vo2awZ4yPl5IOzlrAj5pyDwyQjq0vUyYGSJluSkcyXoqEscyqxEmo6ZgOBCeBYD7NwgdD8j9a6Rp
FiQapwL3crrkG+KdtR8V+HAR5qvOiL1MTQrqQ0k6QhR80QiUyAwHZTzn4Ds5LiVV+KoT4OmjUi7r
rDk4YJhQKv5F11FgulpHM2XQQZDBqIxkKhVoVpPiO0/SjDSK/FAV8UNRJOb/AFK5riQHB+kI6Jah
hmmOC9p/n7X9YXuw/wDe3A38RTnwC1APHiVQCDJpF4moMUDFy1CCmEQDgmJDHIJjJXc8AgCL5qlB
wqgdXyCvXvKtXNgmA/T4JnIHCibVTiSyuO5ZlV/TxQ0Anl+p0dTAYuQK+KAjJ5YxdO3fdXifn3LV
XkgRGo4qrAqrnisx80xIB+KNCeSOlwcHP/NWEhmV+LCA5FExJfIVZMZjkaH4oERBiO9eY8rKz80C
OkjELqYg4qpfgKroIj9418E5kS2DU+CrBxmPqK6Ok8CypuNwIBR1FyWLpsn+aKk5yLJ3etAizRa1
aoTq4x5oxm7A0KL0iA1bDmmj1D5ITienEYpib2CfVpOIQEr5FAgsPtCy6XLUYo6dvTxCclh4LUSQ
6qU9wnNBgyltXF1ZmxBRm7fu4phc3zVcqFlW/Y+KfE2Cr2dRrkKrVAMBd7potGQxGKEwIiZoTXxW
o1m3cESTXs49lEacwLjxQ1nB3v8AJameJvwRAm0ci9EwIMR8QqBmXWHAX4bgG4NWWk1yPaWpIfFE
ihyuFqEdL45qciQGqgRJpZxRJNV7P/P2v6wvdAgN624x/iKIHiV86q3wVFdjiFV+f/NdUgR4/AKj
8gE4i3P9AsG5K/x+pVKqK/pmrP8Ap3qlsh+pVcP3fUuqURzqV5jLkG+botAnmfqTQhEPiA5WkmVb
YBPMgZuXWsbjk4NROIycZlWiVcj6Fa+Loix4iqaTjjgjqYjM0P1I6TTK/wAVqEHfF2WESuok8QqM
3G6qREYNVASGocmTxBHJXEuY+lWPdVdEwD+zIMnLniCmYHgV1Rb9MwuiZDcaJxLUMzgnIcnE1CrE
dyiY1BHyTgU6schVMRcAjkUZFw4airhmtQbK2CBlKhqU9SXoPpQowxBQY2DBqIAVIvIIQLhsQiWi
wLVFUKMHpIZ8Uf2sA9xwXlGk4FZHELEhMaDNMJUGCpUoP0/NMa0K6TUfJPc5YokgEZKQAY/BMSMm
KYpk5XWRF800ZPECzIj7IxOBQkRU3N0Hjym+KJgRMcKMmAteqsrqpRjBnGBKcRINmQluh2wREKRy
NfiokyjpOAqUTVygcbHijn21CoVGMiwxdWiY5ALyiMeF1oiTGPFNthxmAyZ+pBzq+hezfHf2m/6w
vcl/7244/iKoa+Kz+CYDxqqADkP+aZ+5VNcz+gVTXmqEnl+hVm/TuQDAA4/oy6pBuJ/5rzP90E/M
qkSRxLfJHRCI+JQYnViA4Tks+ZTTkByqixkedECALjwQwHC6YEngbKkG4g2VwSryHDJF2bNM7jCi
cQNb4JwwRcyplRV2wf04osdPAogEvwsmNeJTgsMQE4clOA0s7FNM1TF5JxFhiTVaot3JiSTkgADH
i5TvGYyN1WFM4l0wFb9SsBxBXSaZFCMgIg4qBkaB68wgdoggWBrcMUDMVAEW4BFg8o1OSc3xRJsO
DrgFwRJqnFFa1aq1QXWk3zV75JxUcUACSgCek2Dq7D4+KfU+HZQ8k0xzIUpAvERNUKkd6Ok1N0xl
wJt3FMSAB9q7IcbtUI6wxJoy4p4UK6i5QjPbdsQbonbcarDNEF2FWRJo9COSB25u+DVCOMhkriIx
N1qlK4oxVZCI+K6SGTTLnsy7WeqcEJscQq9r4p3ZOZE86pzDU18F1AgZBHQxIuJX+KBhDTuPU4Mv
aA39faIP8YXuiafjbn9RQLqzr9P1J7JjJl535OVSJK6YAc6q7DgGTws1yWXXMD4oAEy+CcQtnVMB
EdyqUWDpxQpiaqgdBqHNNdM6Y2TiVMrJxTiVgmeqYhNhmtT0ysnjGvHFYROaIJfMhMB40VaRzunM
tTYD9S1bYIlxRBYZhCMi+RTwq94rVGDEYvVESkxFwy0nqyKcAtiF5gyYAvmERGVMBJPOGoZhNGWo
ZFdUW5LoBIzTBgCalabuiBbFNaJRZMDb4rIZJ8AtR8FZkCBXFfIrTZZ8ExNcAES7ZpiW4IOX5XRk
DYeKnJm1FgApB+QQFYzjjmuk27kCMfFPSi1CoOCcNxCuxsCCnNQcVLU7AOHt3JgXjcBGQL5g0RND
kLgLVIDkjCA0yOWSrOl2Wl7KpVe5ECpF0agAcUwI4MgXJzCIa2JyRjSL2KMSxIKchlSnZTsYoRkK
4ZoS2yQcQUAQPpTgA0oV1XXtH/7+3/WF7okj/G3KfxFUc/BUi3OqYU5Bk8gSMHXUQOaBlJ8wEAIk
niiBAB8Vp70XDrVEAJpUVCVavFVFUzvwTsVRlVWdWZB6pw7q4VS3BZsqkBUJPJCnifoTYcB9bIVJ
7/qVSB4fS6q5+X0Jm+n5LTIStclMKjB6FVOniE5lq4FOIkHAsnkGjib/ACTzkdBsRZaouT4qwWmc
9Jyb6US2oHHFPBweC6gAuqa6R1GzLQGBuusUdgUxi74hdMiDxTAiUcU0geCrTs49gNwydUqSg9hR
V7uw54dlO5Mak/BOBw4rb2rEBzzKJFk4LMnBD8UzMMwiDd3TimYRLsZUZARoXaQP1phYXHBah1BA
wDi6cdJKEtupd5B7ozgGbBGZiTIYNZaTFgA/NERB8Fq1VOC0Si+mxWtmPBMWbF1rFQgwNLg8U4bq
FRdwtYNThkjKJOsGoahUSesyDr1GMQ9V0lVT9hEqjPEJjUcU5AJ5IRNhZk5717Qmg9fbb/qC92CW
/G3D/MUPtDFkYxg3FAUbEhUldVCYBhiuo0wTivAp2onKd+atRWZB1R3VQvMEzVRDUwJVacU7q4Cd
yeQVh3n6AqU7vrWJzr9SYU8P1qrnx+lUp+nBYfD9ZTm3H9aYMOVf6U9T4D61cD4pi7cKLpjXM1XV
fgtLBhjZZQOIqyeM35rSY6SP0oqdQTgaV+IV0hyqMEQZOycLUHcBgnlWWJN0QfLwzTiTNgqkFVBH
EJj4FatNeCYUTiToUdskTKPJVu64IfAIk9yCqnwxKxtROLoGRcR6pDkifsqtAqV7A3ei46eCOkME
JAMaELWBQ3GRRMqSwCA8uLK5YYogyfFXAOa1SoBiKhExLN4LUbAsVIRrFAwYxseITSmWNhkjGRYD
yyFloZzgjFyCLhDR9kV1ZqM9wu9aXWodJHxWuUSAcDZDTIODTkjEEaXdDVEAYMtErHFEB3GdkJEh
Ph2P4rUSzYKgXtJWPr7bxNvOLL3VCfxtz+oqlsF1XwZOL9lVWizVk5Wa8qyVQ/FAgMc1cJz8VRk4
B8PrTUHe/wAlU2yDfNOQTxJLfBlS/D9Cup24/rTfS/8ASse5h9a8v/V+tXHIfqTkH4D61Vv6vmqE
lsLD4Lyj5qlF5lieKqmkK54J42xCBH60xwRJkwOAWaYAAJzLuXSCTxXUdKaVR8Qhpo1iqs6Yq3in
bw7Ldn1p1SvNVCY2dFxROzOg0m4J4l0zFMmsyYIg0ZAIz+1ufJOcT2ZU7Kol6IA4lU+KoSU5qcE5
umOCpQqgc5lM98DZEAC1VQODQ5hOCwOK4FAg3uCuliReKEzQtdSnE9Rxz7k4bVjH6l6e5EEPQjBS
ZxHAmwUBKjDE0pxR9QCRwGXerNkFEa8ag4J4SBIxzQ1ARbFGEw4d3XRQYAppeKa/Hsde0f8A7+3/
AFhe6f8A725/UVS2A7a0Ga81VZfQq1BTC+Cs6qaK7/FfXRUryDr/AJD61X6T82VzX9PshV+P61X6
f1Lp/TwdW7z/AP5Ksu4P9DL62H1q4+J+ao/y+SoAEXKcVCINCnAdXZEHEM6AKY1XS4BuCmJomqU0
R4qsm5LpBJzT24J5SJKfT3qg08QqmioEyu6pdVIbLscBVDK6qG5KpTYKteCdlbvCoeQKqAVVxwKF
kBKIVKEp4ScitU+nmyjCQIiKnkmgHjGgAyCrddVOygpicE5lXJMZ15LpIkOCMJBuaYdjYpm70wDv
ZfJPKqDjBMR3pnpmq2FEbEDAp4BnsgDfkg4YjHBMzEWIWiTS7kfSNTdaZkSGRWqI4ECiEZB8K3CY
SaOONUImQ0G0gjCYtaS6Gl+0CtThnVFpPcUx7k2S9p/n7X9YXumP97c/qKrdWobFF6FZ8FZVI71f
UeCp8U+HAfWyb6f/AEq7d3/qTEmX6cGWAPcPrKq/6c0w/T/pVac2HzV34Bz9QViOZA+Sq3g/zWJA
7vktUQ3NX7GTXTMxVW5rplfKy0kIAhwfgtUA2aqR2WqrsnDlfsoaiSECL8VZfMJmdP4q6wTBOQ/J
Up8091WisH4qzunsckXKYUTEP81UlslQPxurMfgqlu1rqtChpNrphX4LqGnsp5p0HJAAsRUGy1A1
N046lq3WkcIqgAAsOxwmKrXmjLb74pxcXWZxTZ9g5IHNO6bNVTs6cYpimxCra90QSwwUiRbkmFAi
KjSnrIYOhK0roSFc2TSGh6h7IaogtVwniCCas7p4y7sQjqLk2Kc2zTeCZe0/z9r+sL3LGvrbjj+I
pzQYoaXI4CnirV4n6k7gch9aqSf04Jvq/Wnb5/SyuPH/ANKcn9P4lQE+P0MsBzIH1q78h9a+st8l
cDuf5q5KoB4Kh7k8rJh4o5DFBg6pENimdVqnjbJfQq1VC3BObpldlQEqzc05L8EzKngrK7KzpxTg
uoA8VSid27L0VAq0VFXxV6ryrpNPBEk+CpdZJ7NmmkXyP615WKpU5LrZvinC+hZHsYBNh2WYYlah
SIpHkncFPIUyTWGXbRcFUdlPBGYFDcdjg0ToDJUw7NN8k4i3OiEdIPIhVgaZVTEMy4haQb5riFZE
WK6lSiHUSRdUl3pwXPipOBW6pWWFWXHJdJaWATFwbF0wk8l1DqC9pn6+1/WF7kxDEb25X+IowsZA
gLTKscsO5age4OfkuPcPrTgP3E/NXbvA/pWJ7v8A1KuGZ+pMC3IKxlzK6QArpgXOQT6TSidMbGxV
KAWdEChKdqYoqvcUxrxCcBs1Wq+Y7Lrp6iiwZdR8FbvQNk3ZmE7EHMLMohqoA05rPNXVFU9lnCII
TFPdVVLFNitJCB1UNlW+dnTFmVKp3bgmFuKcMV1DvCzWXY5L8FRdSoEIR80/kgBXmrK6epQLMiqU
CZPdVo+CaJWmQJBRYFlZVTp0SMKqUiOoMYrSVQ0VLYpjEHmKqtJG+mi1iZMRYELVjj2BwnjVM1Bm
mItcrU1E0RTJNHFVLlPiqhOFqOCFGK9oR/39px/GF7qtPW3KfxFdRdaWrgUNVYmrYFCQGl8AGVA/
OqYUHAKhoFfsb4q6ej4unh3piQE7lsQrd6aXcuk1VvyMk11QKsmHBZg4p/iqBZJwrJ8Mk+CeNM8l
wXHJdNc1aiqXVfFdN/grgK7rMcE/xQc+CtTNOKZhMajLBdICusGWovyCYADJObJhdVCoVUAKg7ll
2U8Fq1eCp4ok0ARmLm3LtZcVcumw7BTvTrMpohsymRdmVgQqdJXTLxTAOOCYxkBiUJODDGuCd0wT
25JnZWotBxQiLYojLsotUdsyBC6oEPwTSaIyKrIEC7LpHf21srsgXvgrBe0OPr7f9YXuXNPW3P6i
mIfigY3GNk0mIN15gwRES6qgcMgnojpqCqsB8VVyEzLSe5OLqgfgq0GV1VU7KmqcR8VUpxUKlFxW
RT3VRRUNCrUNlWo4K6oKYFNjmqpnqqXTJlxT359jOuOfYydZLMYFF+5OE+GfZknsmvzsqMOSzVL5
JiFSoRMqgK3enFswnlHvCYdyEH4y7LOrMq0VME7FW7DIlaQtIumWommSLp4lk4kmBTAlV7Myq3yV
exsVmUTjkq3NU2JwQMz1YAVXQZczQJ92WrgEDCLEIja0g3aRZUECeEgqbT8iCurZl3BNPamGxYoR
kNOb3RF3sU2K9p/n7f8AWF7rqIPq7jf9RTtVaolk0zQ5Jo3Ca3ZQuExTY4p8FiQmQ1dlBZO6oCXx
WQKaRcqzKoVKKtldW700r8FQpiEQKcFd+Ct3JnbgqVPHs6Q4TEdy48VZUPcVVfSnuFUdmfHsd1VU
XFWY5q4HKqoFzVKcVUrpVqJndOG053VaHPBVonavBaRQcESS8Udw8o80+abtzWQ4LzMvMSmiH5pi
w5JxcomV8EAmATtVWTpwExonjMjgV5gVyT9jtTiiZSd8AmjSKk58U70C6WHEhBjEfBVamK+Ukwun
EmTbpcDEISiXCd05NBdSkKxFAeCMt8/hbY1EFTOzSD9PJe0B83rbf9QXuZX/ABdx3+8U3yTeCrdA
gcwqWXBEDscl07pwiHAVKphRVr2NgnFuyiqqeC+hOAUyuOSrfsdnyWCqnF8wnuslmOytVS2SqKKp
KYqlCq0WYOKoWdVTBcVSid1fsr2XonNeSYUfG6vyVKHNVNcsF0inBV6llzVUNsWFyhGPljYKvaw7
KoOnHZWpTGyonwVECnT4djE0V+9VLrpDKlU8qnJBrZIJnoF0HS2K6pFuauSrqhLJ3fgVkr0ThMC7
rTKkkQbGhTkaoEvRTI6TPDgsl7WRiJA7sAe+QXunqDu7lcjqKbBNK4ThUVXVWVAsAqOVZgqm/Zaq
c9lOzguHYxQfx7a14hUFFULLis81SgKYhsiuODKpWXBUoV5W4rjiqeCoEHomfxTWT4piK5JxTgVW
qp2cMlmqKnZkuKyT5+CbBUqmIdk7lxZPbiFW+aJmNRwUjFqp7kmqc9y4YpgFdcEO0snkVSr4rSVp
7AOwAp05VAwyVKhVsumPemKIVLlcexlx7adt1W6cXCa/BVNMAmOHZ7Ov9/af/rC90Aw/G3P6ir2s
qmqYVRFmVS6oFlyWZ4qypb8qllRXqqV59llW2SoycHuVbYpw7KzLiFyV02GBVldPcfJUVR3oHHNc
uyq5ppW7Ga3YyYqngnAp2ZHsoaJirXxVKKtOK49jJ38U6pREuzVOSMxXWXEeCMZUI7KdrCyrfs59
jHw7HTYoBME5TJuzimK4dnFE50R4J1kO3n2N2Z/k1XVFPAVXti5EhvbZ0nhIL3QDf4243/UV1Fk0
Q5xT2TmpTjsf8h3YqvimTEgLSKkYqgqnZXY4jsyK44FXTtVOKjHsoaK79rx71TvC4JwW4FVqUWDJ
/h2OLZKleyqeNeyts+ygTGqyWaqG4q6omv2Fw/BUWYWkhwiLrJWVDp4I1fghAUO4W7k0cMChM0Ev
ms/ybpvyWAdeUhAuDwdOYlVVAnVVQ9jYK9EAtOdlIG7p8E35DX/4TxNl7QSpP1tuo+8F7ovT1tyg
+8UCzrVHkQgALq6qqpnTMmAsshmqLSuKa3NVqeCy4qruq0yVSsHCbHAqqGJTCxsyY1dV8ex3TFPY
rEp8MEx7imxVaFMyqrqlVknFeCyThVsU4+Ce4KoKcV1MmXFZH4JyuOKcUKZmOaBdjw7HVQnkPiqW
CqqB+KL1G2G7yrd6MZ2OKMX7+xuyg7ME4kHFw6czD5BMQSRiSqRAVKJ8M0wuVWVVVj3Kzck0ZMeK
cFxwVR2UVbJ7ocFT7SYJ/wAh2/4GXb7T/P2v6wvc5etuf1HsIwNQukUFHKqyonFSsk+OKpQYdn0p
jXiskzOmn3K1FXuTZWVSvpT4dlDRAEuFmuC4dlTTsfBZqluKrVOPBUBV2a4CpfNMRVE2OapTkmKp
Q4JtLqpbgqU4qteKt3q6rbAqoVKg4rUcMlS2Se6YdnTfJVuvmqluSrhmjLAB2QlIkeo8lRXTG+Cq
hFCIVexo0Tm5TIyzomBsmCbt59tCq15pzFjwXTLuKqHGa4I5KJCqq9llQJuxm/4PtP8AP2v6wvcg
/wDd3P6inBJbBPQLVAODdMQ3NVJTEVCqKK5K4JkxTEOqBcVW6qUxoyvVVVCnHZRVFD2XoqFPlcLM
KhYcUXq/Y4HimTHxTOiTTim7GVDRdVclYBO7jNVtxVC4Vk2GScVTG2ScBdNOCqnj0n4J7lZdlk5q
U1kxVbZogGsyIrjC3YHVMLJ8+x8eyq1GwQD0HYwwTuuPY35Absv20JVY1zC6TQoSNgqW4f8AEtX8
j2f+ft/1he5cufW3P6iqI0umlcKlCKgq6bEXTGoKqU101AVZO7FV7HHh2vG2Ko5WQRrqzVFwTXBu
qWVE8bZLSez60xVU4VfFV6nQMbDAYKlU7sDmnd+CZdNVUePYw7802CeHgUCSAVavZULqpxVLp2pm
FxTmyrUYEdju/JWVGTTriyB2y0T8DkobfmO25lzRgaA0daZYWPYSbBFrhHl2uaRWkUCrVGWSc/lc
O2gVlZMU8iHyVCByQES5Qe6p28P+Ixovaf5+3/WF7jV6+r1Zu2hn1Gyr6/8AIv7/APIv7/8AIh/j
v/BZU/1H8i/v6v4FX13/AIF/f/kVfX/kVPX/AJF/uP5F/f8A5FT1/wCRf3/5EX/1DfwL+/8AyL+/
/Iv7/foVPX7tC/v/AMi/vv8AwKnr/wAi/v8A8i/v/wAir/qG/gX9/wDkX9/+RV9f+RU9f+Rf3/5E
X9dv4FX1346FT124aF/f4+RFvX/kQb13/gVPWv8AuI//AGP5FT1/5FT1/wCRdPrv/Aq/6h/4FT1/
5F/f/kQ/xn/gX9/u0L+//Iur12w8jqvr9+hf39PDQj/jtj5F/f8A5F/fbF9DLo9X+RV9e/7i6fXb
HyMv77fwLp9bV/Cv7/doX9/+Rf3v5FPT/qND18jOtxv9RreurQv738ijr9fU9G0OyH/2H/gVP9R/
Iur/AFDcdCLf6i2Ghf7l/wCBV/1HfoX+4bhoX+5f/wDnZf7n/wAap/qOPkXV/qn/AP5r/c/+NH/7
P/jX+5b/APmqf6n/AMa/3P8A41/uf5F/uP5EP/sd+hdPr92hf7n+RV/1P/jR/wDs/wAi/wBz/wCN
f7r/AMaj/wDZfjoX+4/kX+4/kX+4/kX+4/kX+4/kX+4/kX+4/kX+4/kX+5/kX+4/kX+5/kX+5/8A
Gv8Ac/8AjVf9T/41/uf/ABr/AHP/AI1/uf8Axr2uj/UavW29OrQz6gzsv//Z" transform="matrix(1.0269 0 0 1.0269 82.9258 0)">
		</image>
	</g>
</g>
<g id="copyright__x0028_replace_TODO_x0027_s_x0029_">
</g>
<g>
	<polygon fill="#FFFFFF" points="847.811,672.482 801.299,703.475 847.973,732.689 894.504,702.262 	"/>
	<path d="M891.449,704.658c0.476,0.582,1.298,0.996,2.054,1.418c0.246,0.137,0.649,0.254,0.673,0.486
		c0.034,0.316-1.021,0.9-1.307,1.084c-11.106,7.225-22.025,14.174-33.153,21.381c-3.245,2.104-6.71,4.393-10.055,6.541
		c-0.422,0.27-1.164,0.824-1.533,0.824c-0.395-0.002-1.11-0.563-1.531-0.824c-14.561-9.014-29.028-17.896-43.656-26.873
		c-0.315-0.193-1.329-0.656-1.348-0.973c-0.023-0.418,0.909-0.697,1.234-0.861c0.521-0.258,1.008-0.41,1.346-0.746
		c-0.734-0.707-1.678-1.158-2.616-1.756c-0.254-0.162-0.728-0.396-0.748-0.674c-0.029-0.389,0.831-0.832,1.31-1.16
		c1.51-1.025,2.871-1.945,4.371-2.951c11.726-7.854,23.346-15.717,35.098-23.586c1.952-1.307,3.827-2.771,5.831-3.926
		c0.472-0.27,0.896,0.105,1.31,0.375c3.625,2.387,7.352,4.793,10.951,7.102c7.869,5.055,15.55,10.057,23.472,15.1
		c3.343,2.129,6.872,4.406,10.28,6.617c0.321,0.209,1.366,0.809,1.383,1.01c0.024,0.307-0.542,0.514-0.785,0.672
		C893.202,703.471,892.171,704.102,891.449,704.658 M848.127,673.184c-0.102-0.195-0.312-0.283-0.485-0.41
		c-0.325,0.063-0.355,0.418-0.747,0.41c-0.297,0.215-0.514,0.512-0.936,0.6c-0.161,0.299-0.559,0.361-0.746,0.635h-0.188
		c-0.309,0.328-0.719,0.551-1.047,0.859c-0.404-0.004-0.404,0.395-0.822,0.375c-0.232,0.289-0.511,0.535-0.936,0.635
		c-0.281,0.24-0.485,0.561-0.934,0.635c-0.264,0.246-0.527,0.496-0.933,0.598c-0.133,0.332-0.573,0.35-0.749,0.637h-0.187
		c-0.38,0.316-0.681,0.715-1.234,0.861c-0.202,0.256-0.53,0.389-0.746,0.633h-0.224c-0.249,0.252-0.476,0.521-0.898,0.6
		c-0.178,0.281-0.52,0.402-0.748,0.637c-0.449,0.07-0.618,0.426-0.934,0.633h-0.225c-0.248,0.252-0.475,0.523-0.897,0.598
		c-0.104,0.094-0.119,0.281-0.336,0.264c0.02,0.193-0.104,0.244-0.298,0.223c-0.141,0.197-0.328,0.346-0.637,0.375
		c-0.057,0.068-0.064,0.186-0.111,0.264c-0.377,0.02-0.418,0.379-0.822,0.371c0.019,0.195-0.105,0.244-0.3,0.223
		c-0.199,0.15-0.304,0.396-0.636,0.414c-0.09,0.23-0.313,0.336-0.598,0.375c-0.047,0.137-0.107,0.264-0.337,0.223
		c-0.056,0.066-0.063,0.184-0.111,0.258c-0.268-0.041-0.216,0.234-0.524,0.152c0.021,0.195-0.103,0.246-0.299,0.223
		c-0.047,0.277-0.34,0.311-0.598,0.375c-0.042,0.158-0.086,0.313-0.336,0.262c-0.15,0.488-0.839,0.434-1.047,0.859
		c-0.146-0.035-0.094,0.133-0.261,0.074c-0.007,0.059,0.013,0.141-0.038,0.152H826c-0.279,0.217-0.457,0.539-0.897,0.598
		c-0.192,0.205-0.385,0.41-0.708,0.486c-0.01,0.053,0.012,0.135-0.039,0.146c-0.453,0.008-0.484,0.439-0.898,0.486
		c-0.005,0.059,0.016,0.141-0.036,0.152c-0.294-0.07-0.228,0.219-0.524,0.148c-0.09,0.258-0.292,0.406-0.598,0.449
		c-0.383,0.324-0.715,0.703-1.27,0.859c-0.157,0.191-0.28,0.418-0.635,0.412c-0.245,0.252-0.496,0.498-0.897,0.598
		c-0.44,0.545-1.221,0.748-1.683,1.271h-0.223c-0.215,0.232-0.49,0.438-0.711,0.598c-0.153,0.109-0.354,0.152-0.523,0.26
		c-0.287,0.186-0.529,0.613-0.935,0.598v0.188c-0.413,0.051-0.491,0.432-0.935,0.449c-0.269,0.256-0.494,0.553-0.934,0.635
		c-0.253,0.262-0.495,0.527-0.936,0.598c-0.211,0.313-0.512,0.535-0.934,0.637c-0.264,0.26-0.526,0.521-0.936,0.635
		c-0.16,0.313-0.594,0.354-0.746,0.672c-0.045-0.041-0.068-0.105-0.188-0.072c0.028,0.215-0.086,0.285-0.299,0.26
		c-0.048,0.289-0.383,0.293-0.637,0.375c0.021,0.193-0.104,0.244-0.299,0.223c-0.032,0.316-0.366,0.332-0.637,0.412
		c0.021,0.195-0.103,0.244-0.297,0.223c-0.053,0.285-0.366,0.309-0.637,0.375c0.029,0.215-0.087,0.289-0.298,0.262
		c-0.089,0.246-0.354,0.316-0.635,0.375c0.02,0.193-0.105,0.242-0.3,0.223c-0.152,0.199-0.309,0.391-0.635,0.41
		c0.02,0.195-0.104,0.246-0.3,0.227c-0.036,0.074-0.073,0.148-0.112,0.225c-0.691,0.229-0.986,0.857-1.756,1.008
		c0.05,0.234-0.155,0.205-0.262,0.262c-0.074,0.037-0.139,0.201-0.188,0.225c-0.051,0.023-0.139-0.027-0.187,0
		c-0.034,0.018-0.058,0.133-0.112,0.148c-0.249,0.078-0.455,0.207-0.449,0.375c0.015,0.379,0.736,0.344,0.861,0.707
		c0.19-0.012,0.344,0.008,0.336,0.188c0.195-0.018,0.318,0.031,0.299,0.227c0.294-0.059,0.256,0.219,0.561,0.148v0.152
		c0.175,0.023,0.32,0.076,0.374,0.223c0.422,0.1,0.673,0.375,0.935,0.633c0.169-0.018,0.278,0.023,0.299,0.152
		c0.348,0,0.509,0.188,0.636,0.41c0.335-0.047,0.325,0.246,0.635,0.225v0.15c0.257,0.064,0.55,0.098,0.6,0.375h0.26
		c-0.054,0.141,0.104,0.07,0.075,0.188c0.315,0.006,0.427,0.219,0.599,0.369h0.262c0.283,0.316,0.675,0.521,1.009,0.787
		c0.306,0.016,0.463,0.186,0.599,0.373h0.262c0.278,0.469,0.991,0.504,1.308,0.936c0.336,0.014,0.438,0.26,0.635,0.41
		c0.307,0.018,0.463,0.184,0.597,0.373c0.318-0.08,0.263,0.213,0.562,0.15c-0.056,0.168,0.109,0.113,0.076,0.264
		c0.408,0.074,0.67,0.299,0.934,0.521c0.195-0.02,0.319,0.029,0.299,0.223c0.191-0.014,0.345,0.006,0.337,0.188
		c0.621,0.117,0.877,0.594,1.456,0.75c0.005,0.27,0.465,0.082,0.413,0.41c0.293-0.07,0.228,0.219,0.522,0.148
		c-0.051,0.152,0.082,0.117,0.076,0.227c0.297,0,0.469,0.125,0.56,0.334c0.662,0.16,0.983,0.662,1.645,0.822
		c0.083,0.303,0.499,0.273,0.599,0.561h0.262c-0.047,0.123,0.119,0.029,0.073,0.152c0.438-0.018,0.41,0.434,0.86,0.408
		c0.201,0.387,0.771,0.4,1.01,0.748h0.225c-0.054,0.143,0.102,0.072,0.072,0.188c0.328,0.01,0.457,0.215,0.637,0.373
		c0.416,0.105,0.686,0.363,0.935,0.637c0.17-0.021,0.278,0.02,0.298,0.148c0.718,0.193,1.061,0.76,1.797,0.934
		c-0.053,0.152,0.079,0.121,0.075,0.227c0.168-0.021,0.276,0.02,0.297,0.148c0.374-0.025,0.437,0.262,0.636,0.41
		c0.316,0.021,0.462,0.213,0.635,0.375c0.196-0.02,0.32,0.029,0.299,0.227h0.225c-0.005,0.104,0.112,0.084,0.112,0.184
		c0.307,0.018,0.463,0.186,0.598,0.375c0.308,0.027,0.484,0.188,0.636,0.373c0.181-0.018,0.303,0.021,0.299,0.188
		c0.328,0.006,0.457,0.217,0.636,0.373c0.314,0.008,0.429,0.223,0.599,0.375h0.261c0.39,0.559,1.234,0.658,1.645,1.195
		c0.306,0.02,0.462,0.186,0.599,0.375c0.17-0.021,0.279,0.02,0.299,0.148c0.355-0.006,0.479,0.219,0.637,0.41
		c0.576,0.17,0.934,0.563,1.494,0.748c0.368,0.58,1.266,0.629,1.644,1.197c0.386,0.086,0.699,0.246,0.898,0.521h0.261
		c0.08,0.182,0.319,0.205,0.374,0.412c0.294-0.061,0.255,0.217,0.562,0.15c0.406,0.514,1.163,0.68,1.607,1.158
		c0.317-0.082,0.26,0.213,0.561,0.148c-0.056,0.168,0.107,0.117,0.072,0.264c0.532,0.076,0.725,0.498,1.234,0.598v0.148
		c0.189-0.014,0.346,0.004,0.336,0.186c0.419-0.006,0.411,0.414,0.859,0.375c0.056,0.193,0.34,0.16,0.375,0.375h0.225
		c0.043,0.133,0.129,0.219,0.335,0.188c-0.05,0.148,0.083,0.117,0.075,0.223c0.296-0.07,0.229,0.219,0.524,0.15
		c-0.052,0.15,0.08,0.117,0.073,0.225c0.577,0.059,0.72,0.551,1.271,0.633v0.152h0.524c0.007-0.059-0.015-0.141,0.036-0.152
		c0.418-0.043,0.457-0.463,0.896-0.484c0.008-0.055-0.012-0.137,0.037-0.148c0.598-0.201,0.913-0.686,1.534-0.861
		c0.247-0.264,0.523-0.498,0.934-0.598c0.114-0.236,0.31-0.387,0.636-0.41c0.237-0.398,0.822-0.451,1.046-0.861
		c0.395-0.006,0.396-0.402,0.823-0.375c0.02-0.129,0.129-0.17,0.299-0.146c0.231-0.293,0.507-0.541,0.935-0.639
		c0.264-0.258,0.514-0.531,0.935-0.633c0.134-0.205,0.32-0.352,0.636-0.375c0.303-0.219,0.479-0.566,0.935-0.637
		c0.271-0.336,0.724-0.494,1.045-0.783h0.188c0.047-0.287,0.368-0.303,0.635-0.373c-0.021-0.195,0.104-0.246,0.299-0.223
		c-0.021-0.197,0.105-0.246,0.3-0.227c0.159-0.189,0.318-0.379,0.635-0.412c0.129-0.193,0.305-0.342,0.598-0.373
		c0.27-0.266,0.504-0.566,0.971-0.635c0.138-0.189,0.293-0.357,0.6-0.375c0.405-0.428,0.949-0.721,1.495-1.008
		c0.091-0.283,0.405-0.342,0.709-0.412c0.091-0.232,0.313-0.336,0.598-0.373c0.004-0.193,0.116-0.281,0.337-0.264
		c0.094-0.23,0.313-0.334,0.6-0.371c-0.023-0.209,0.139-0.234,0.336-0.227c0.084-0.188,0.332-0.215,0.412-0.41
		c0.405-0.105,0.652-0.367,0.934-0.598c0.398-0.012,0.388-0.434,0.822-0.41c0.278-0.234,0.508-0.516,0.935-0.598
		c0.146-0.205,0.304-0.395,0.635-0.414c0.038-0.072,0.073-0.148,0.111-0.223c0.468-0.094,0.625-0.496,1.122-0.561
		c0.007-0.057-0.015-0.139,0.039-0.15c0.379-0.07,0.465-0.43,0.896-0.445c0.006-0.059-0.014-0.141,0.037-0.152
		c0.227-0.059,0.281-0.289,0.598-0.262c0.239-0.26,0.504-0.492,0.896-0.598c0.26-0.277,0.529-0.543,0.973-0.635
		c0.17-0.152,0.283-0.365,0.599-0.375c0.095-0.252,0.33-0.365,0.634-0.41c-0.027-0.326,0.434-0.166,0.449-0.447
		c0.459-0.104,0.611-0.512,1.12-0.561c0.007-0.059-0.013-0.141,0.039-0.152c0.395-0.064,0.446-0.475,0.896-0.486
		c0.006-0.055-0.015-0.135,0.037-0.148c0.411-0.162,0.649-0.496,1.12-0.596c0.254-0.408,0.844-0.48,1.123-0.861h0.188
		c0.152-0.199,0.305-0.393,0.635-0.41c0.139-0.199,0.31-0.363,0.635-0.375c-0.033-0.207,0.104-0.246,0.301-0.223
		c0.125-0.223,0.315-0.383,0.634-0.414c-0.019-0.193,0.104-0.242,0.299-0.223c-0.007-0.094,0.084-0.09,0.112-0.148
		c0.399-0.123,0.639-0.41,0.936-0.639c0.395-0.125,0.684-0.359,0.933-0.633c0.405,0.006,0.422-0.377,0.823-0.375
		c0.295-0.229,0.508-0.539,0.936-0.635c0.173-0.162,0.299-0.373,0.634-0.373c0.039-0.076,0.073-0.152,0.112-0.225
		c0.205,0.043,0.228-0.098,0.299-0.188c0.215,0.025,0.279-0.094,0.337-0.225c0.266,0.053,0.199-0.225,0.486-0.148
		c-0.028-0.328,0.433-0.166,0.447-0.451c0.475-0.135,0.697-0.521,1.196-0.633c0.007-0.059-0.012-0.141,0.039-0.15
		c0.423-0.037,0.463-0.457,0.896-0.486c0.007-0.055-0.014-0.137,0.036-0.148c0.524-0.088,0.709-0.514,1.198-0.633
		c-0.053-0.154,0.079-0.123,0.074-0.227c-0.426-0.371-1.006-0.588-1.384-1.01c-0.326-0.023-0.522-0.176-0.635-0.41
		c-0.433,0.021-0.438-0.387-0.86-0.375c0.007-0.105-0.127-0.072-0.074-0.223c-0.448,0.023-0.42-0.428-0.861-0.412
		c0.048-0.121-0.119-0.027-0.073-0.15h-0.224c-0.34-0.445-0.997-0.574-1.346-1.008h-0.225c0.046-0.123-0.121-0.029-0.074-0.152
		c-0.195,0.023-0.321-0.029-0.301-0.223c-0.209,0.01-0.366-0.033-0.335-0.26c-0.14-0.043-0.106,0.09-0.226,0.072
		c0.021-0.193-0.103-0.244-0.299-0.223c0.052-0.152-0.081-0.117-0.073-0.225c-0.999-0.246-1.44-1.053-2.431-1.309
		c0.035-0.148-0.129-0.096-0.075-0.262c-0.415,0.006-0.423-0.402-0.86-0.373c-0.146-0.328-0.605-0.34-0.784-0.635h-0.224v-0.15
		c-0.434-0.041-0.563-0.385-0.935-0.486v-0.15c-0.269-0.066-0.571-0.102-0.636-0.373c-0.788-0.168-1.08-0.84-1.869-1.008v-0.15
		c-0.475,0-0.48-0.467-0.934-0.486V692.1c-0.258-0.066-0.551-0.098-0.6-0.373c-0.182,0.008-0.324-0.025-0.336-0.188
		c-0.313-0.01-0.426-0.223-0.598-0.375c-0.449-0.012-0.514-0.408-0.934-0.447c0.012-0.086-0.006-0.145-0.038-0.188
		c-0.294-0.031-0.47-0.18-0.598-0.373c-0.195,0.018-0.319-0.029-0.299-0.225c-0.122-0.021-0.189,0.012-0.224,0.074
		c-0.088-0.213-0.423-0.176-0.411-0.486c-0.171,0.021-0.279-0.02-0.3-0.148c-0.336,0-0.462-0.213-0.635-0.375
		c-0.432,0.006-0.404-0.445-0.86-0.41c0.006-0.105-0.127-0.074-0.075-0.227c-0.426,0.016-0.429-0.395-0.859-0.371
		c-0.014-0.1-0.119-0.105-0.075-0.264c-0.114-0.029-0.045,0.129-0.188,0.076c-0.407-0.387-1.047-0.547-1.382-1.012h-0.225
		c-0.055-0.195-0.337-0.16-0.374-0.371c-0.192,0.006-0.35-0.023-0.337-0.227c-0.293,0.07-0.227-0.219-0.521-0.148
		c-0.192-0.406-0.785-0.41-1.01-0.785c-0.298,0.094-0.342-0.211-0.411-0.262c-0.163-0.121-0.454-0.137-0.524-0.371
		c-0.267-0.07-0.587-0.088-0.633-0.375c-0.435,0.008-0.422-0.43-0.861-0.414c-0.035-0.213-0.318-0.178-0.374-0.371
		c-0.332-0.018-0.49-0.211-0.636-0.41c-0.43-0.082-0.684-0.34-0.935-0.602c-0.169,0.021-0.276-0.02-0.3-0.148
		c-0.218,0.023-0.332-0.064-0.335-0.262c-0.307-0.02-0.463-0.184-0.599-0.375c-0.313-0.035-0.539-0.156-0.635-0.41
		c-0.43,0.02-0.422-0.398-0.858-0.371c-0.149-0.305-0.572-0.328-0.712-0.637h-0.223c0.045-0.121-0.123-0.027-0.075-0.148
		c-0.413,0-0.419-0.404-0.859-0.375c0.051-0.152-0.083-0.117-0.075-0.227h-0.226c-0.218-0.379-0.769-0.424-1.008-0.781
		c-0.489,0.016-0.496-0.453-0.936-0.488v-0.15c-0.267-0.068-0.588-0.084-0.635-0.373h-0.224c-0.27-0.33-0.778-0.418-1.01-0.785
		c-0.311,0.012-0.398-0.199-0.636-0.26v-0.152c-0.305,0.082-0.229-0.217-0.523-0.146c-0.08-0.258-0.363-0.311-0.635-0.375
		c0.051-0.152-0.081-0.117-0.075-0.223c-0.221,0.018-0.334-0.068-0.336-0.264c-0.516-0.158-0.941-0.404-1.233-0.785
		c-0.336,0.049-0.326-0.246-0.636-0.225v-0.148c-0.307-0.02-0.461-0.188-0.599-0.373c-0.184,0.008-0.325-0.023-0.337-0.188
		C848.761,673.723,848.688,673.211,848.127,673.184"/>
	<path fill-rule="evenodd" clip-rule="evenodd" fill="#231F20" d="M854.894,680.512c0.198,0.773,0.747,1.195,0.86,2.055
		c0.178,0.07,0.116,0.383,0.298,0.449c0.044,0.438,0.177,0.795,0.337,1.121c0.028,0.746,0.461,1.086,0.374,1.941
		c0.109,0.094,0.112,0.287,0.224,0.375v0.41c-0.034,0.123,0.097,0.078,0.112,0.152c-0.009,0.445,0.239,0.85,0.299,1.307
		c0.021,0.17-0.044,0.387,0,0.563c0.021,0.076,0.133,0.146,0.15,0.223c0.058,0.27,0.034,0.65,0.073,0.973
		c0.013,0.09,0.016,0.527,0.074,0.748c0.023,0.084,0.133,0.137,0.15,0.225c0.073,0.379,0.031,0.801,0.073,1.195
		c-0.032,0.121,0.099,0.078,0.114,0.148c0.032,0.43-0.102,1.023,0.148,1.234c0.039,0.252-0.096,0.328-0.148,0.486
		c-0.346,0.07-0.315-0.234-0.485-0.334c-0.044-0.121,0.058-0.094,0.073-0.152c-0.113-0.672-0.133-1.436-0.15-2.203
		c-0.362-0.188-0.114-0.984-0.297-1.348c-0.031-0.117,0.033-0.141,0.073-0.188c-0.23-0.697-0.257-1.225-0.373-1.943
		c-0.373-0.34-0.119-1.301-0.485-1.645c-0.043-0.117,0.058-0.092,0.074-0.15c-0.059-0.029-0.057-0.117-0.149-0.111
		c-0.004-0.381,0.016-0.789-0.224-0.934c-0.08-0.32,0.014-0.813-0.264-0.936c0.019-0.178-0.003-0.318-0.149-0.338
		c-0.046-0.119,0.122-0.027,0.077-0.148c-0.221-0.688-0.563-1.258-0.749-1.98c-0.068-0.031-0.104-0.098-0.187-0.111
		c0.02-0.518-0.36-0.639-0.448-1.049c-0.161-0.076-0.325-0.148-0.299-0.41c-0.138-0.012-0.174-0.125-0.337-0.111
		c-0.051-0.428-0.929-0.326-1.271-0.225c-0.05,0.012-0.03,0.094-0.037,0.148c-0.472,0.029-0.666,0.332-0.71,0.785
		c-0.151-0.051-0.118,0.082-0.225,0.074c0.047,0.223-0.088,0.262-0.186,0.336c0.016,0.504-0.298,0.676-0.3,1.16
		c-0.12-0.047-0.029,0.121-0.149,0.076c-0.101,0.572-0.197,1.146-0.412,1.607c-0.014,0.584-0.123,1.07-0.298,1.492
		c0.265,0.316-0.285,0.758,0,1.086c-0.282,0.613-0.109,1.684-0.375,2.318c-0.045,0.156,0.061,0.164,0.076,0.262
		c-0.27,0.434-0.097,1.008-0.15,1.568c-0.087,0.9-0.09,1.928-0.075,2.881c-0.179-0.006-0.103,0.242-0.187,0.334
		c0.206,0.215-0.154,0.568,0.074,0.785c-0.064,0.264-0.104,0.498,0,0.748c-0.158,0.225-0.053,0.688,0.112,0.822v2.057
		c0.058,0.133-0.126,0.021-0.112,0.111c0.432,0.467-0.025,1.822,0.338,2.355c-0.156,0.322-0.082,0.979,0.074,1.232
		c0.044,0.441-0.092,1.064,0.074,1.383c-0.015,0.441,0.004,0.844,0.15,1.121c-0.282,0.436,0.292,0.945,0,1.383
		c0.202,0.52,0.215,1.232,0.298,1.871c0.009,0.053,0.112,0.012,0.112,0.074c0.037,0.871,0.161,1.654,0.375,2.352
		c-0.022,0.572,0.055,1.043,0.224,1.424c0.031,0.117-0.033,0.141-0.075,0.186c0.109,0.352,0.293,0.941,0.226,1.232
		c0.087,0.049,0.036,0.238,0.186,0.225c0.224,1.223,0.587,2.307,0.823,3.514c0.135,0.076,0.12,0.303,0.261,0.373
		c0.049,0.861,0.491,1.33,0.599,2.131c0.079,0.033,0.063,0.16,0.187,0.15c-0.284,0.059,0.218,0.461,0.299,0.598
		c0.026,0.045-0.013,0.137,0,0.188c0.033,0.123,0.225,0.152,0.149,0.41c0.012,0.053,0.094,0.031,0.15,0.039
		c0.049,0.236,0.078,0.494,0.336,0.523c0.029,0.48,0.486,0.533,0.674,0.857h0.26c0.212,0.467,0.813,0.156,1.197,0.074
		c0.884-0.66,1.356-1.732,1.83-2.801c0.048-0.123-0.118-0.029-0.074-0.152c0.221-0.236,0.381-0.539,0.374-1.008
		c0.115,0.029,0.046-0.129,0.188-0.074c-0.01-0.322,0.128-0.494,0.075-0.857c0.435-0.252,0.116-1.256,0.523-1.535
		c0.016-0.158-0.055-0.4,0.073-0.449c0.056-0.141-0.103-0.07-0.073-0.184c0.199-0.363-0.081-1.201,0.336-1.348
		c0.085-0.439-0.024-0.844,0.224-1.234c0.006-0.105-0.127-0.072-0.074-0.225c0.29-0.475,0.116-0.967,0.223-1.643
		c0.032-0.197,0.192-0.373,0.225-0.561c0.067-0.387-0.063-0.736,0.149-0.973c0.049-0.146-0.121-0.076-0.074-0.223
		c0.22-0.643-0.063-1.365-0.149-1.945c0.021-0.396-0.005-0.744-0.15-0.973c-0.049-0.152,0.081-0.117,0.076-0.223
		c-0.23-0.439-0.151-1.318-0.151-2.244c0.003-0.676-0.069-1.391,0-1.832c0.039-0.23,0.287-0.445,0.075-0.635
		c0.054-0.229,0.112-0.287,0-0.521c0.21-0.258,0.115-0.621,0.15-0.938c0.026-0.221,0.179-0.449,0.224-0.709
		c0.05-0.291,0.006-0.574,0.076-0.82c0.069-0.248,0.235-0.453,0.188-0.713c0.072,0,0.047-0.102,0.147-0.072
		c0.031-0.543,0.298-0.854,0.3-1.422c0.261-0.313,0.345-0.803,0.561-1.158c0.082-0.691,0.457-1.088,0.673-1.645
		c0.142,0.055,0.072-0.102,0.188-0.076c0.076-0.43,0.497-0.521,0.597-0.932c0.327-0.086,0.48-0.344,0.71-0.525
		c0.185-0.002,0.352-0.023,0.375-0.186h0.709v0.113c0.565,0.07,0.738,0.533,1.084,0.82c0.066,0.281,0.146,0.551,0.41,0.637
		c0.257,0.729,0.688,1.279,0.824,2.129c0.005,0.043,0.068,0.031,0.111,0.039c0.291,1.539,0.701,2.961,0.822,4.672
		c0.125,0.086,0.074,0.348,0.188,0.449c0.043,0.117-0.059,0.09-0.075,0.148c0.077,0.125,0.251,0.453,0.075,0.598
		c0.114,0.07,0.02,0.352,0.148,0.41c-0.079,1.539,0.173,3.561-0.148,4.785c0.096,0.402,0.098,0.898,0.299,1.197h0.223
		c0.055,0.059,0.08,0.146,0.113,0.223c0.169-0.021,0.278,0.021,0.299,0.15c0.41-0.012,0.571,0.227,1.047,0.148
		c0.207-0.141,0.469-0.229,0.747-0.299c0.12-0.553,0.565-0.779,0.522-1.494c0.115,0.027,0.046-0.129,0.188-0.076
		c0.045-0.389,0.084-0.785,0.149-1.158c0.121,0.045,0.028-0.121,0.15-0.074c0.199-1.23,0.656-2.207,0.934-3.363
		c0.245-0.215,0.375-0.547,0.785-0.6c0.062-0.049,0.128-0.096,0.146-0.186c0.31-0.08,0.687-0.09,1.086-0.074
		c-0.002,0.141,0.18,0.094,0.15,0.26c0.513-0.049,0.411,0.51,0.785,0.6c-0.003,0.436,0.162,0.707,0.374,0.934
		c-0.06,0.406,0.204,0.492,0.223,0.82c0.068,0.035,0.104,0.1,0.188,0.113c-0.034,0.309,0.2,0.348,0.223,0.6
		c0.208,0.213,0.881,0.594,1.009,0.037c0.421-0.105,0.456-0.592,0.86-0.709c-0.055-0.305,0.354-0.146,0.299-0.451
		c0.161-0.088,0.438-0.059,0.411-0.334c0.239-0.074,0.622-0.002,0.636-0.299c0.322-0.018,0.598-0.078,0.785-0.227
		c0.459,0.057,0.926-0.078,1.309,0c0.237,0.049,0.364,0.285,0.598,0.227c-0.004,0.129,0.153,0.094,0.15,0.223
		c0.188-0.094,0.159,0.105,0.261,0.15c0.236,0.107,0.46,0.33,0.598,0.598c0.07,0.135,0.36,0.24,0.485,0.412
		c0.061,0.078,0.036,0.176,0.075,0.223c0.096,0.113,0.196,0.104,0.3,0.188c0.06,0.047,0.076,0.176,0.15,0.223
		c0.043,0.029,0.138-0.023,0.185,0c0.074,0.035,0.08,0.24,0.301,0.152c0.186,0.234,0.543,0.301,0.934,0.334
		c-0.013,0.064,0.029,0.072,0.075,0.076c0.036,0.162-0.041,0.205-0.075,0.299h-0.859c-0.082-0.344-0.663-0.186-0.71-0.561h-0.225
		c-0.098-0.301-0.41-0.387-0.598-0.6c0.025-0.225-0.06-0.338-0.262-0.334c0.057-0.17-0.111-0.117-0.076-0.264
		c-0.131-0.08-0.307-0.111-0.298-0.336c-0.487-0.271-1.184-0.725-2.206-0.561c-0.197,0.031-0.439,0.17-0.634,0.223h-0.338
		c-0.05,0.012-0.03,0.094-0.037,0.152c-0.519,0.176-0.8,0.596-1.233,0.859v0.26c-0.305-0.006-0.211,0.387-0.523,0.375
		c-0.036,0.248-0.352,0.221-0.411,0.447c-1.129,0.197-1.171-0.695-1.644-1.156c-0.044-0.281-0.091-0.559-0.375-0.6
		c-0.032-0.252,0.021-0.596-0.223-0.637c0.019-0.17-0.02-0.275-0.149-0.299v-0.262c-0.289-0.398-0.56-0.811-1.384-0.672
		c-0.203,0.123-0.363,0.283-0.598,0.375c-0.041,0.283-0.15,0.496-0.299,0.672c-0.055,0.143,0.102,0.072,0.074,0.186
		c-0.181,0.281-0.313,0.609-0.375,1.01h-0.112v0.561c-0.119-0.045-0.028,0.123-0.148,0.076c-0.041,0.545-0.264,0.908-0.299,1.457
		c-0.122-0.045-0.028,0.121-0.149,0.076v0.408c-0.374,0.674-0.395,1.699-1.048,2.096c-0.038,0.072-0.073,0.146-0.111,0.223
		c-0.457,0.018-0.776,0.17-1.196,0.225c-0.137-0.023-0.416-0.266-0.523-0.074c-0.408-0.094-0.701-0.605-1.195-0.373
		c0.015,0.365-0.244,0.615-0.299,0.973c-0.036,0.24,0.053,0.486,0,0.744c-0.027,0.137-0.188,0.234-0.225,0.375
		c-0.104,0.402-0.031,0.879-0.149,1.271c-0.358,0.303-0.288,1.033-0.635,1.346c-0.055,0.143,0.103,0.072,0.074,0.188
		c-0.106-0.008-0.073,0.125-0.225,0.074c0.046,0.256-0.13,0.293-0.075,0.561c-0.15-0.053-0.117,0.082-0.225,0.074v0.225
		c-0.173,0.125-0.25,0.346-0.335,0.563c-0.111,0.086-0.333,0.066-0.298,0.297c-0.53,0.008-1.1,0.555-1.57,0.148
		c-0.141-0.039-0.108,0.094-0.226,0.076c0.046-0.119-0.12-0.029-0.075-0.15c-0.43,0.045-0.561-0.213-0.859-0.297
		c-0.157-0.416-0.508-0.641-0.598-1.123c-0.483-0.451-0.687-1.182-0.935-1.869c-0.06-0.053-0.146-0.076-0.186-0.148
		c-0.044-0.117,0.057-0.092,0.073-0.15c-0.287-0.299-0.308-0.861-0.523-1.232c-0.727,0.283-0.215,1.801-0.636,2.393
		c-0.044,0.158,0.062,0.164,0.075,0.26c-0.385,0.338-0.072,1.375-0.448,1.721v0.447c-0.137,0.305-0.223,0.959-0.187,1.199
		c-0.206,0.328-0.237,0.834-0.3,1.305c-0.261,0.125-0.223,0.551-0.223,0.936c-0.21,0.104-0.231,0.395-0.226,0.709h-0.112
		c-0.072,0.441-0.37,0.652-0.373,1.16c-0.218,0.244-0.344,0.578-0.448,0.936c-0.142-0.057-0.072,0.1-0.188,0.072
		c0.001,0.461-0.406,0.52-0.448,0.936c-0.421,0.166-0.464,0.709-0.897,0.861c-0.734,0.088-1.301,0.324-1.831,0
		c-0.04-0.023-0.094-0.199-0.149-0.227c-0.035-0.016-0.12,0.025-0.149,0c-0.13-0.107-0.111-0.418-0.411-0.412
		c0.031-0.229-0.187-0.211-0.3-0.299c0.063-0.234-0.113-0.234-0.224-0.297c-0.084-0.391-0.222-0.729-0.486-0.936
		c0.001-0.338-0.182-0.49-0.374-0.635c-0.001-0.471-0.271-0.676-0.3-1.121c-0.063-0.035-0.06-0.141-0.186-0.113
		c0.036-0.262-0.131-0.316-0.074-0.596c-0.321-0.316-0.261-1.014-0.598-1.311c-0.041-0.139,0.092-0.107,0.073-0.225
		c-0.371-0.908-0.61-1.955-0.935-2.914c0.126-0.701-0.511-1.643-0.41-2.318c-0.209-0.375-0.145-1.027-0.375-1.383
		c0.066-0.279-0.098-0.906-0.224-1.234c0.235-0.254-0.24-0.6,0-0.857c-0.261-0.631-0.22-1.354-0.411-1.908
		c0.091-0.811-0.351-2.105-0.225-2.951c-0.184-0.414-0.072-1.123-0.225-1.568c-0.053-0.67,0.133-1.582-0.15-2.021
		c0.051-0.258,0.094-0.354,0-0.598c-0.02-0.082,0.074-0.053,0.076-0.111c0.007-0.59,0.023-1.285-0.076-1.719
		c-0.084-0.379,0.129-0.645-0.035-0.973h-0.149c-0.069,0.469-0.216,1.127-0.15,1.496c-0.342,0.652-0.066,1.926-0.374,2.617
		c-0.053,0.141,0.103,0.07,0.074,0.184c-0.121-0.045-0.027,0.123-0.147,0.076v1.232c0.031,0.121-0.125,0.051-0.113,0.15
		c0.123,0.412-0.136,0.779-0.149,1.123c-0.008,0.203,0.1,0.297,0.073,0.447c-0.015,0.1-0.124,0.174-0.148,0.299
		c-0.122,0.664,0.046,1.445-0.224,2.057c0.135,0.152,0.053,0.518,0.075,0.785c-0.392,0.504-0.052,1.742-0.374,2.314
		c-0.047,0.123,0.119,0.031,0.073,0.152c-0.214,0.223-0.138,0.732-0.148,1.156c0.033,0.121-0.124,0.051-0.112,0.152
		c0.058,0.504-0.105,0.789-0.075,1.27c-0.236,0.486-0.2,1.244-0.298,1.869c-0.314,0.557-0.158,1.588-0.562,2.057
		c-0.108,1.023-0.341,1.928-0.673,2.729c-0.055,0.141,0.104,0.07,0.074,0.186c-0.23,0.156-0.165,0.607-0.41,0.748
		c-0.013,0.076,0.008,0.117,0.074,0.111c-0.141,0.121-0.103,0.422-0.297,0.486c0.062,0.422-0.246,0.479-0.227,0.859
		c-0.128,0.021-0.171,0.131-0.148,0.301c-0.129,0.018-0.17,0.127-0.15,0.299c-0.153,0.018-0.209,0.137-0.186,0.336
		c-0.234,0.289-0.578,0.469-0.748,0.822c-0.336,0.148-0.614,0.357-0.973,0.486c-0.117,0.043-0.09-0.061-0.149-0.074
		c-0.396,0.346-0.857-0.041-1.31-0.076c0.007-0.105-0.125-0.072-0.072-0.223c-0.631-0.17-0.896-0.699-1.159-1.236
		c-0.013-0.049-0.095-0.029-0.15-0.035c0.075-0.287-0.202-0.221-0.148-0.486c-0.077-0.049-0.194-0.059-0.263-0.113
		c-0.029-0.607-0.352-0.918-0.449-1.457c-0.057-0.029-0.055-0.117-0.148-0.111c0.059-0.484-0.127-0.721-0.337-0.936
		c0.036-0.471-0.12-0.752-0.298-1.01c0.087-0.609-0.405-1.254-0.375-2.057c-0.004-0.041-0.068-0.029-0.111-0.035
		c-0.244-1.154-0.381-2.408-0.749-3.439c0.143-0.158,0.011-0.475-0.073-0.635c-0.28,0.189-0.496,0.447-0.485,0.934
		c-0.151-0.053-0.12,0.082-0.226,0.074v0.262c-0.165-0.053-0.1,0.123-0.111,0.227c-0.129-0.006-0.095,0.152-0.225,0.146
		c0.037,0.424-0.283,0.492-0.299,0.861c-0.369,0.24-0.458,0.764-0.896,0.936c-0.051,0.01-0.031,0.09-0.038,0.148
		c-0.243,0.082-0.502,0.146-0.674,0.299c-0.798-0.012-1.406-0.211-1.718-0.709c-0.51-0.301-0.577-1.043-1.047-1.383
		c-0.04-0.285-0.232-0.416-0.225-0.75c-0.119-0.152-0.193-0.355-0.225-0.598c-0.059-0.027-0.055-0.117-0.15-0.111
		c-0.075-0.346-0.066-0.779-0.335-0.936c-0.059-0.648-0.423-0.996-0.374-1.754c-0.058-0.031-0.055-0.121-0.148-0.113
		c0-0.584-0.219-0.953-0.224-1.533c-0.114-0.012-0.012-0.236-0.188-0.188c-0.021-0.914-0.118-1.748-0.074-2.729
		c-0.355,0.08-0.42,0.453-0.524,0.785c-0.19-0.078-0.032,0.191-0.186,0.15c-0.021,0.514-0.309,0.762-0.374,1.232
		c-0.374,0.412-0.504,1.066-0.933,1.422c-0.137,0.439-0.579,0.568-0.86,0.859c-0.124-0.012-0.232-0.008-0.263,0.074
		c-0.142-0.008-0.339,0.039-0.374-0.074h-0.375c-0.988-0.443-1.348-1.52-1.531-2.766c-0.006-0.045-0.068-0.033-0.112-0.039
		c-0.016-0.445-0.221-0.701-0.226-1.16c-0.275-0.281-0.283-0.834-0.372-1.307c-0.272-0.252-0.144-0.457-0.226-0.861
		c-0.066-0.029-0.102-0.094-0.188-0.111c0.015-0.498-0.354-0.615-0.521-0.932c-0.281,0.064-0.397-0.164-0.562,0
		c-0.151,0.051-0.117-0.082-0.224-0.076c-0.498,0.15-0.832,0.463-0.859,1.084c-0.689,0.656-0.777,1.914-1.533,2.504
		c-0.138-0.057-0.29-0.121-0.411,0c-0.418-0.143-0.849-0.271-1.008-0.672c-0.013-0.051-0.094-0.031-0.15-0.039
		c-0.115-0.508-0.649-0.594-0.86-1.008c-0.331-0.018-0.489-0.207-0.635-0.41c-0.162,0.012-0.303,0-0.299-0.152
		c-0.205,0.152-0.502-0.006-0.71-0.072c-0.425,0.086-0.595,0.08-1.01,0c-0.355-0.033-0.353,0.295-0.672,0.299
		c-0.04,0.209-0.325,0.172-0.337,0.412c-0.191-0.057-0.16,0.111-0.336,0.072c-0.25,0.525-0.617,0.93-1.01,1.309h-0.186
		c-0.036,0.066-0.14,0.061-0.113,0.188c-0.457,0.102-0.771,0.352-1.385,0.299c-0.063-0.086-0.096-0.203-0.072-0.373
		c0.272-0.039,0.517-0.105,0.785-0.152c0.137-0.297,0.688-0.182,0.746-0.557c0.175,0.047,0.126-0.127,0.299-0.076
		c0.098-0.428,0.521-0.527,0.637-0.934c0.266-0.133,0.544-0.254,0.635-0.561c0.285-0.029,0.602-0.021,0.635-0.299
		c0.616-0.117,1.475-0.102,2.094,0c0.086,0.188,0.33,0.217,0.599,0.223c0.127,0.195,0.326,0.322,0.561,0.41
		c0.015,0.211,0.131,0.316,0.372,0.299c-0.018,0.232,0.189,0.238,0.338,0.299c0.008,0.152,0.047,0.281,0.225,0.264
		c-0.087,0.41,0.385,0.264,0.373,0.598c0.582,0.217,1.041-0.094,1.121-0.598c0.165,0.053,0.1-0.127,0.225-0.111
		c-0.034-0.234,0.132-0.27,0.074-0.523c0.296-0.129,0.191-0.66,0.562-0.713c-0.029-0.639,0.425-0.795,0.599-1.232h0.186
		c0.052-0.01,0.03-0.092,0.037-0.148c0.485-0.094,1.088-0.105,1.57,0c-0.028,0.115,0.129,0.045,0.075,0.186
		c0.249,0.039,0.222,0.352,0.447,0.41c0.087,0.539,0.449,0.799,0.413,1.459c0.11,0.09,0.113,0.285,0.223,0.375
		c-0.031,0.604,0.297,0.85,0.299,1.418c0.177,0.188,0.1,0.627,0.338,0.75c-0.068,0.789,0.307,1.137,0.374,1.793
		c0.236,0.25,0.483,0.488,0.708,0.748c0.952,0.154,1.299-0.299,1.722-0.672c-0.058-0.193,0.112-0.162,0.075-0.338
		c0.512-0.547,0.673-1.445,1.156-2.018c-0.049-0.225,0.125-0.225,0.076-0.451c0.139,0.041,0.059-0.137,0.186-0.109
		c0.04-0.705,0.567-1.223,0.674-1.982c0.17-1.201-0.067-2.619,0.074-3.85c-0.02-0.078,0.055-0.07,0.112-0.074
		c0.098-0.6-0.189-1.586,0.522-1.57c-0.007,0.121,0.064,0.162,0.15,0.188c0.39,2.787,0.083,6.27,0.372,9.156
		c-0.03,0.121,0.099,0.078,0.112,0.152c0.027,0.176-0.061,0.457,0.076,0.521c0.051,0.908,0.082,1.834,0.374,2.504
		c-0.022,1.146,0.505,1.738,0.635,2.729c0.06,0.029,0.058,0.117,0.148,0.111c0.101,0.861,0.563,1.359,1.011,1.869
		c0.479,0.123,0.92,0.188,1.458,0.076c0.148-0.088,0.172-0.303,0.411-0.299c0.064-0.619,0.738-0.633,0.709-1.348
		c0.182-0.117,0.28-0.316,0.299-0.598c0.288-0.035,0.324-0.322,0.338-0.633c0.15,0.051,0.117-0.082,0.225-0.076
		c0.08-0.609,0.658-1.104,0.373-1.832c0.185-0.383-0.146-0.764-0.15-1.197c-0.059-0.027-0.055-0.117-0.148-0.111
		c-0.023-1.172-0.388-2.004-0.374-3.213c-0.009-0.057-0.112-0.016-0.112-0.074c0.047-0.34-0.203-0.639,0-0.896
		c0.188-0.053,0.197,0.076,0.336,0.072c0.127,0.889,0.22,1.773,0.449,2.652c0.031,0.121-0.033,0.145-0.074,0.189
		c0.289,0.459,0.088,1.406,0.561,1.682c0.193-0.092,0.223-0.352,0.225-0.635c0.188-0.088,0.217-0.334,0.223-0.598
		c0.381-0.232,0.301-0.922,0.637-1.197v-0.672c0.124,0.012,0.041-0.184,0.225-0.113c-0.023-0.385,0.184-0.537,0.147-0.934
		c0.442-0.156,0.2-0.998,0.563-1.232v-0.525c0.267-0.254,0.268-0.781,0.298-1.27c0.121,0.045,0.028-0.121,0.151-0.074
		c0.181-0.742,0.366-1.477,0.559-2.207c-0.044-0.195,0.033-0.354,0.15-0.746c0.056-0.191,0.127-0.381,0.225-0.486
		c-0.115-0.111-0.066-0.383-0.076-0.598c0.45-0.326-0.141-0.662-0.073-1.16c-0.15-0.123-0.223-0.326-0.226-0.598
		c-0.378-0.592-0.541-1.4-0.71-2.205c-0.059-0.027-0.055-0.119-0.148-0.113c-0.155-0.781-0.314-1.508-0.449-2.129
		c0.075-0.275,0.009-0.523-0.187-0.711c-0.023-0.176,0.063-0.461-0.076-0.523c0.25-0.281-0.223-0.621,0-0.859
		c-0.312-0.912-0.057-2.385-0.297-3.365c0.225-0.359,0.044-1.127,0.297-1.457c0.028-0.115-0.126-0.045-0.074-0.188
		c0.036-0.203,0.163-0.309,0.15-0.561c0.151-0.346,0.392-0.604,0.486-1.008c0.36-0.428,0.81-0.762,1.72-0.639
		c0.053,0.148,0.199,0.203,0.371,0.227c-0.036,0.313,0.246,0.305,0.227,0.598c0.062,0.051,0.078,0.145,0.224,0.113
		c-0.181,0.609,0.353,0.879,0.338,1.27c-0.006,0.129-0.089,0.43,0.073,0.676c-0.102,0.656-0.182,1.916,0.074,2.465
		c-0.026,0.254-0.068,0.18,0,0.41c-0.32,0.453-0.005,1.539-0.224,2.094c0.177,0.404-0.064,1.178-0.149,1.758
		c0.193,0.232-0.078,0.475,0,0.82c0.033,0.121-0.123,0.053-0.112,0.152c0.019,1.264-0.354,2.135-0.45,3.287
		c0.045,0.381,0.314,0.535,0.3,0.973c0.179,0.023,0.068,0.33,0.263,0.336v0.299c0.393,0.643,0.715,1.352,0.936,2.17
		c0.058,0.027,0.055,0.117,0.147,0.111c-0.001,0.436,0.164,0.709,0.374,0.934c0.046,0.123-0.119,0.029-0.074,0.15
		c0.291,0.242,0.12,0.949,0.486,1.121c-0.022,0.17,0.02,0.277,0.148,0.299c-0.043,0.367,0.195,0.455,0.15,0.822
		c0.338,0.285,0.17,1.076,0.522,1.346c0.006,0.092,0.009,0.182-0.073,0.188c0.079,0.033,0.064,0.158,0.186,0.146
		c-0.175,0.1,0.086,0.336,0.15,0.639c0.104,0.494,0.227,1.131,0.375,1.494c0.035,0.475,0.141,0.879,0.297,1.234v0.26
		c0.128,0.072,0.06,0.34,0.188,0.412c-0.135,0.145,0.02,0.418,0.074,0.598c0.033,0.117-0.031,0.141-0.074,0.188
		c0.233,0.668,0.172,1.48,0.373,2.02c-0.254,0.432,0.251,1.068,0,1.494c0.11,0.348-0.185,1.26,0.188,1.533
		c0.312-0.145,0.179-0.502,0.224-0.787c0.086-0.561,0.348-1.217,0.224-1.941c0.263-0.088,0.131-0.568,0.3-0.75
		c-0.053-0.361-0.078-0.783,0-1.344c0.021-0.145,0.127-0.268,0.149-0.412c0.035-0.227-0.029-0.471,0-0.672
		c0.009-0.064,0.092-0.092,0.111-0.15c0.115-0.369,0.007-1.049,0.076-1.57c0.056-0.434,0.299-0.668,0.073-0.936
		c0.303-0.682,0.103-1.52,0.224-2.277c0.022-0.127,0.131-0.232,0.15-0.338c0.159-0.826,0.017-1.697,0.15-2.543
		c0.052-0.084,0.073-0.199,0.148-0.26c0.046-0.156-0.06-0.164-0.075-0.262c0.398-0.598,0.099-1.895,0.413-2.578
		c0.05-0.152-0.081-0.117-0.075-0.227c0.217-0.795,0.262-1.777,0.447-2.539c0.066-0.266-0.048-0.598,0-0.896
		c0.03-0.193,0.1-0.342,0.15-0.637c0.072-0.424,0.101-0.807,0.075-1.232c-0.019-0.082,0.054-0.072,0.111-0.076
		c0.007-0.342-0.052-0.75,0.149-0.896c0.022-0.121-0.012-0.188-0.076-0.225c0.233-0.59,0.189-1.457,0.45-2.02
		c0.021-0.082-0.071-0.053-0.075-0.111c0.11-0.129,0.052-0.424,0.225-0.486c0.045-0.121-0.121-0.029-0.074-0.152
		c0.296-0.867,0.416-1.641,0.56-2.727c0.06-0.441,0.148-0.906,0.374-1.234c-0.061-0.469,0.159-0.662,0.15-1.082
		c0.135,0.035,0.017-0.184,0.188-0.113c0.018-0.543,0.229-0.891,0.447-1.23c0.047-0.123-0.122-0.031-0.075-0.152
		c0.144-0.219,0.267-0.453,0.3-0.785c0.145-0.141,0.324-0.246,0.337-0.523c0.325-0.121,0.352-0.543,0.709-0.637
		c0.042-0.082,0.18-0.066,0.15-0.223c0.406-0.066,0.858-0.135,1.233-0.074c0.223,0.035,0.573,0.215,0.748,0.373
		c0.025,0.025-0.021,0.123,0,0.15c0.025,0.031,0.368,0.23,0.41,0.301c0.05,0.082,0.018,0.246,0.075,0.297
		C854.743,680.438,854.819,680.473,854.894,680.512 M841.139,688.172c0.049-0.148-0.124-0.074-0.077-0.223
		c0.255-0.73-0.19-1.725,0.077-2.469c-0.115-0.48-0.127-1.066-0.15-1.645c-0.092-0.059-0.105-0.189-0.188-0.26
		c0.035-0.658-0.303-0.943-0.597-1.271c-0.2,0.051-0.245-0.053-0.338-0.111c-0.237,0.084-0.489,0.158-0.56,0.41
		c-0.297-0.047-0.187,0.311-0.449,0.299v0.336c-0.119-0.047-0.028,0.121-0.148,0.074c-0.279,1.082-0.101,2.619-0.149,3.926
		c0.127,0.217,0.226,0.672,0.075,0.934c0.23,0.232,0.066,0.857,0.297,1.088c-0.199,0.506,0.248,0.992,0.151,1.643
		c0.27,0.578,0.447,1.246,0.56,1.982c0.094-0.008,0.092,0.082,0.15,0.113c0.102,0.543,0.167,1.125,0.486,1.457
		c0.335-0.131,0.148-0.518,0.224-0.898c0.022-0.125,0.13-0.215,0.147-0.336c0.086-0.523,0-1.125,0.15-1.607
		c-0.075-0.225,0.009-0.156,0-0.449c0.117,0.029,0.047-0.129,0.188-0.076c-0.06-0.375,0.129-0.998-0.111-1.195
		C841.225,689.582,840.938,688.633,841.139,688.172 M867.042,697.592c-0.103-0.73-0.334-1.336-0.375-2.129
		c-0.094,0.008-0.09-0.082-0.149-0.113c0.044-0.365-0.192-0.453-0.149-0.822c-0.211,0-0.088-0.334-0.261-0.373
		c-0.169-0.977-0.565-1.727-1.159-2.281c-0.662-0.199-1.025,0.254-1.42,0.637c-0.104,0.102-0.061,0.219-0.15,0.375
		c-0.061,0.107-0.238,0.195-0.299,0.297c-0.051,0.092-0.029,0.236-0.075,0.338c-0.118,0.26-0.356,0.6-0.3,0.936h-0.111
		c0.035,0.418-0.297,0.473-0.224,0.934c-0.122-0.047-0.028,0.121-0.149,0.074c-0.076,0.797-0.395,1.352-0.449,2.168h-0.113
		c-0.053,0.143,0.168,0.008,0.113,0.148c-0.387,0.615-0.206,1.406-0.336,2.244c-0.02,0.141-0.13,0.268-0.149,0.41
		c-0.121,0.889-0.067,1.533-0.149,2.393c-0.074,0.76-0.075,1.703,0,2.467c0.05,0.502,0.059,0.988,0.226,1.42
		c0.054,0.143-0.104,0.072-0.076,0.188c0.085,0.107,0.085,0.191,0,0.301c0.304,0.492,0.095,1.5,0.485,1.904
		c0.034,0.9,0.26,1.611,0.525,2.279c-0.019,0.059-0.119,0.031-0.076,0.152c0.234,0.371,0.275,0.539,0.373,0.969
		c0.127-0.025,0.123,0.078,0.188,0.113c0.059,0.518,0.407,0.738,0.523,1.195c0.179,0.072,0.373,0.125,0.41,0.336
		c0.234-0.059,0.234,0.117,0.375,0.152c0.121,0.043,0.028-0.123,0.15-0.076c0.381,0.229,0.884-0.059,1.269-0.076
		c0.039-0.074,0.073-0.152,0.114-0.223h0.188c0.123-0.375,0.417-0.58,0.483-1.008c0.166,0.053,0.099-0.129,0.225-0.113
		c0.035-0.701,0.427-1.047,0.449-1.758c0.134,0.037,0.017-0.182,0.188-0.111c0.157-0.514-0.043-0.924,0.298-1.234
		c0.08-0.459-0.021-1.143,0.299-1.531c0.035-0.15-0.129-0.096-0.074-0.262c0.125-0.127,0.061-0.439,0.074-0.674
		c-0.646-1.424-0.271-4.178-0.373-5.906c-0.004-0.059-0.096-0.029-0.075-0.111c0.024-1.059-0.052-2.016-0.148-2.953
		c-0.044-0.006-0.106,0.006-0.112-0.037v-0.449c0.057-0.004,0.129,0.006,0.112-0.074
		C867.211,697.572,867.025,697.682,867.042,697.592 M840.651,698.564c-0.207-0.078-0.154-0.553-0.485-0.299
		c-0.229,0.563-0.266,1.713-0.598,2.168c0.15,0.463-0.19,0.943-0.335,1.346c-0.012,0.1,0.145,0.031,0.111,0.148h-0.111
		c-0.076,0.563-0.363,0.908-0.375,1.535c-0.119-0.047-0.028,0.121-0.148,0.074c-0.021,0.289-0.119,0.504-0.074,0.859
		c-0.152-0.051-0.119,0.08-0.227,0.074c-0.031,0.117,0.09,0.387-0.111,0.336c-0.063,0.672-0.283,1.188-0.522,1.682
		c0.003,0.061,0.097,0.029,0.075,0.113c-0.23-0.029-0.064,0.336-0.225,0.373c-0.029,0.115,0.129,0.045,0.075,0.186
		c-0.193,0.07-0.132,0.393-0.338,0.451c-0.077,0.508-0.216,0.953-0.522,1.23c0.015,0.496-0.412,0.938-0.074,1.309
		c-0.207,0.088,0.145,0.355-0.076,0.488c0.211,0.5,0.336,1.082,0.375,1.754c0.078,0.629,0.303,0.838,0.298,1.646
		c0.139,0,0.089,0.188,0.188,0.225c-0.019,0.615,0.201,0.994,0.3,1.496c-0.113-0.039-0.067,0.082-0.075,0.148
		c0.346,0.287,0.168,1.102,0.522,1.383v0.262c0.313,0.41,0.313,1.133,0.635,1.531c0.043,0.434,0.186,0.762,0.412,1.008v0.301
		c0.182,0.117,0.28,0.318,0.299,0.598c0.244,0.045,0.27,0.305,0.3,0.563c0.181,0.055,0.304,0.168,0.337,0.373
		c0.172-0.049,0.124,0.123,0.299,0.076c-0.057,0.164,0.108,0.113,0.072,0.26c0.48,0.088,1.383,0.201,1.609-0.188
		c0.392-0.057,0.498-0.396,0.822-0.521c-0.011-0.195,0.016-0.359,0.225-0.336c-0.071-0.295,0.219-0.23,0.148-0.523
		c0.151,0.051,0.118-0.082,0.226-0.076c0.243-0.754,0.462-1.529,0.783-2.205c-0.014-0.096-0.12-0.102-0.074-0.26
		c0.321-0.49,0.203-1.418,0.225-2.209c0.411-0.234-0.083-0.941,0.188-1.344c-0.043-0.199-0.1-0.23,0-0.41
		c-0.403-0.508-0.092-1.727-0.412-2.318c0.238-0.25-0.239-0.574,0-0.822c-0.341-0.621-0.182-1.42-0.522-2.055
		c0.067,0.004,0.086-0.035,0.074-0.111c-0.223-0.938-0.6-1.721-0.635-2.842c-0.28-0.219-0.202-0.797-0.45-1.049
		c0.019-0.057,0.118-0.029,0.075-0.146c-0.205-0.133-0.273-0.4-0.262-0.75c-0.264-0.246-0.287-0.732-0.374-1.156
		c-0.271-0.238-0.197-0.824-0.485-1.047c0.006-0.293-0.07-0.504-0.225-0.637c-0.015-0.271-0.063-0.512-0.224-0.635
		c0.017-0.059,0.118-0.033,0.076-0.15c-0.227-0.248-0.386-0.563-0.377-1.045C840.786,699.279,840.664,698.979,840.651,698.564"/>
	<path fill-rule="evenodd" clip-rule="evenodd" fill="#231F20" d="M829.252,694.902c-0.442-0.082-0.246-0.801-0.522-1.049v-1.195
		c0.231-0.34,0.116-1.027,0.484-1.23c0.345,0.203,0.276,0.82,0.449,1.195C829.682,693.537,829.673,694.424,829.252,694.902"/>
</g>
<g>
	<rect x="131.079" y="747.009" fill="none" width="766.263" height="35.289"/>
	<path d="M132.727,755.388h-0.873v-6.464h2.908c1.197,0,1.917,0.774,1.917,1.818c0,0.9-0.513,1.918-1.917,1.918h-2.035V755.388z
		 M132.727,751.913h1.737c0.784,0,1.314-0.288,1.314-1.162c0-0.819-0.558-1.08-1.278-1.08h-1.773V751.913z"/>
	<path d="M138.473,755.388h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.323-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.297c-0.738,0-1.207,0.576-1.207,1.26V755.388z"/>
	<path d="M142.554,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C140.366,751.813,141.051,750.544,142.554,750.544z M142.554,754.829c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801s-1.369,1.17-1.369,1.801C141.186,753.659,141.42,754.829,142.554,754.829z"/>
	<path d="M148.733,752.264c-0.099-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.296,0.936-1.296,1.773c0,0.81,0.198,1.81,1.288,1.81
		c0.531,0,0.99-0.396,1.125-1.116h0.766c-0.081,0.747-0.541,1.8-1.918,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.26-2.62c1.233,0,1.702,0.9,1.765,1.72H148.733z"/>
	<path d="M154.414,753.911c-0.027,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.107-0.999-2.107-2.358c0-1.458,0.703-2.62,2.26-2.62c1.359,0,2.026,1.081,2.026,2.746h-3.439
		c0,0.981,0.459,1.549,1.368,1.549c0.747,0,1.188-0.576,1.215-0.928H154.414z M153.658,752.66c-0.045-0.729-0.351-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H153.658z"/>
	<path d="M159.503,753.911c-0.027,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.259-2.62c1.359,0,2.026,1.081,2.026,2.746h-3.439
		c0,0.981,0.459,1.549,1.368,1.549c0.747,0,1.188-0.576,1.215-0.928H159.503z M158.747,752.66c-0.045-0.729-0.351-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H158.747z"/>
	<path d="M164.524,755.388h-0.747v-0.648h-0.018c-0.342,0.64-0.882,0.783-1.341,0.783c-1.603,0-2.071-1.503-2.071-2.629
		c0-1.323,0.711-2.35,1.963-2.35c0.855,0,1.215,0.531,1.404,0.802l0.018-0.063v-2.358h0.792V755.388z M162.436,754.839
		c0.504,0,1.287-0.333,1.287-1.585c0-0.765-0.126-1.998-1.269-1.998c-1.225,0-1.288,1.161-1.288,1.764
		C161.166,754.056,161.571,754.839,162.436,754.839z"/>
	<path d="M165.746,749.824v-0.9h0.792v0.9H165.746z M166.539,755.388h-0.792v-4.708h0.792V755.388z"/>
	<path d="M171.624,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.242,0.288-1.242,1.566v2.565h-0.792v-4.708
		h0.748v0.666h0.018c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M176.775,755.001c0,0.675-0.009,2.367-2.188,2.367c-0.558,0-1.647-0.153-1.801-1.404h0.792
		c0.144,0.747,0.873,0.747,1.063,0.747c1.368,0,1.341-1.099,1.341-1.656v-0.198h-0.018v0.036c-0.207,0.333-0.621,0.63-1.251,0.63
		c-1.603,0-2.07-1.503-2.07-2.629c0-1.323,0.711-2.35,1.962-2.35c0.855,0,1.215,0.531,1.404,0.802h0.018v-0.666h0.747V755.001z
		 M174.732,754.839c0.504,0,1.288-0.333,1.288-1.585c0-0.765-0.126-1.998-1.27-1.998c-1.225,0-1.288,1.161-1.288,1.764
		C173.462,754.056,173.868,754.839,174.732,754.839z"/>
	<path d="M180.677,752.021c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.936,0.081-0.936,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.864,1.558c-1.863,0-1.999-1.08-2.025-1.647h0.765
		c0.027,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.747c0-0.379-0.261-0.505-0.936-0.676l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.855-1.504,1.783-1.504c1.684,0,1.801,1.243,1.801,1.477H180.677z"/>
	<path d="M187.18,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C184.992,751.813,185.676,750.544,187.18,750.544z M187.18,754.829c1.134,0,1.368-1.17,1.368-1.8c0-0.631-0.234-1.801-1.368-1.801
		c-1.135,0-1.369,1.17-1.369,1.801C185.811,753.659,186.045,754.829,187.18,754.829z"/>
	<path d="M191.26,751.337v4.051h-0.792v-4.051h-0.648v-0.657h0.648v-0.811c0-0.711,0.45-1.035,1.224-1.035
		c0.117,0,0.234,0.009,0.36,0.018v0.711c-0.099-0.009-0.225-0.018-0.324-0.018c-0.342,0-0.468,0.171-0.468,0.549v0.586h0.792v0.657
		H191.26z"/>
	<path d="M196.512,751.337v3.061c0,0.369,0.315,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.784,0-0.955-0.441-0.955-1.009v-3.105h-0.639v-0.657h0.639v-1.314h0.792v1.314h0.756v0.657H196.512z"/>
	<path d="M201.964,755.388h-0.792v-3.079c0-0.639-0.18-1.053-0.936-1.053c-0.648,0-1.314,0.378-1.314,1.566v2.565h-0.792v-6.464
		h0.792v2.386h0.018c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M207.207,753.911c-0.027,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.107-0.999-2.107-2.358c0-1.458,0.703-2.62,2.26-2.62c1.359,0,2.026,1.081,2.026,2.746h-3.439
		c0,0.981,0.459,1.549,1.368,1.549c0.747,0,1.188-0.576,1.215-0.928H207.207z M206.451,752.66c-0.045-0.729-0.351-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H206.451z"/>
	<path d="M210.783,751.327c0-2.062,1.503-2.269,2.188-2.269c1.098,0,1.98,0.711,1.98,1.891c0,1.135-0.748,1.611-1.692,2.116
		l-0.657,0.359c-0.864,0.478-1.054,0.973-1.089,1.216h3.439v0.747h-4.331c0.045-1.314,0.639-2.017,1.486-2.512l0.837-0.486
		c0.675-0.387,1.162-0.648,1.162-1.477c0-0.504-0.324-1.144-1.251-1.144c-1.197,0-1.251,1.117-1.278,1.558H210.783z"/>
	<path d="M217.929,755.559c-1.819,0-2.17-1.936-2.17-3.25s0.351-3.25,2.17-3.25c1.818,0,2.169,1.936,2.169,3.25
		S219.747,755.559,217.929,755.559z M217.929,749.77c-0.91,0-1.324,0.955-1.324,2.539c0,1.585,0.414,2.539,1.324,2.539
		c0.909,0,1.323-0.954,1.323-2.539C219.252,750.725,218.838,749.77,217.929,749.77z"/>
	<path d="M222.167,751.337v3.061c0,0.369,0.315,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.784,0-0.955-0.441-0.955-1.009v-3.105h-0.639v-0.657h0.639v-1.314h0.792v1.314h0.756v0.657H222.167z"/>
	<path d="M227.62,755.388h-0.792v-3.079c0-0.639-0.18-1.053-0.936-1.053c-0.648,0-1.314,0.378-1.314,1.566v2.565h-0.792v-6.464
		h0.792v2.386h0.018c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M232.664,753.506l-0.675,1.882h-0.9l2.431-6.464h0.99l2.341,6.464h-0.954l-0.639-1.882H232.664z M234.941,752.731
		l-0.954-2.845h-0.018l-1.035,2.845H234.941z"/>
	<path d="M241.479,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.242,0.288-1.242,1.566v2.565h-0.792v-4.708
		h0.748v0.666h0.018c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M246.557,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.242,0.288-1.242,1.566v2.565h-0.792v-4.708
		h0.748v0.666h0.018c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M250.841,755.388v-0.685l-0.018-0.018c-0.333,0.549-0.738,0.837-1.495,0.837c-0.693,0-1.53-0.333-1.53-1.458v-3.385h0.792
		v3.123c0,0.774,0.396,1.036,0.909,1.036c1,0,1.296-0.883,1.296-1.566v-2.593h0.792v4.708H250.841z"/>
	<path d="M252.833,752.11c0.036-1.125,0.783-1.566,1.945-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71
		c0,0.198,0.099,0.279,0.261,0.279c0.072,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.432,0.09
		c-0.702,0-0.81-0.36-0.837-0.72c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359
		c0-0.396,0.117-1.314,1.431-1.477l1.306-0.162c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.36-0.766-1.107-0.766
		c-0.9,0-1.026,0.55-1.08,0.909H252.833z M255.732,752.984c-0.126,0.099-0.324,0.171-1.305,0.297
		c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819c0.738,0,1.458-0.477,1.458-1.107V752.984z"/>
	<path d="M258.736,755.388h-0.792v-6.464h0.792V755.388z"/>
	<path d="M267.318,750.868c-0.243-1.134-1.188-1.341-1.809-1.341c-1.17,0-2.116,0.864-2.116,2.565c0,1.521,0.54,2.691,2.143,2.691
		c0.567,0,1.548-0.27,1.846-1.773h0.846c-0.359,2.44-2.34,2.548-2.826,2.548c-1.468,0-2.908-0.954-2.908-3.438
		c0-1.99,1.134-3.367,3.016-3.367c1.666,0,2.53,1.035,2.656,2.115H267.318z"/>
	<path d="M271.201,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C269.014,751.813,269.697,750.544,271.201,750.544z M271.201,754.829c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C269.833,753.659,270.066,754.829,271.201,754.829z"/>
	<path d="M278.225,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M280.404,751.337v4.051h-0.792v-4.051h-0.648v-0.657h0.648v-0.811c0-0.711,0.45-1.035,1.225-1.035
		c0.116,0,0.233,0.009,0.359,0.018v0.711c-0.099-0.009-0.225-0.018-0.324-0.018c-0.342,0-0.468,0.171-0.468,0.549v0.586h0.792v0.657
		H280.404z"/>
	<path d="M285.872,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H285.872z M285.116,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H285.116z"/>
	<path d="M287.805,755.388h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.324-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.298c-0.738,0-1.206,0.576-1.206,1.26V755.388z"/>
	<path d="M293.943,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H293.943z M293.188,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H293.188z"/>
	<path d="M298.862,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M303.095,752.264c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H303.095z"/>
	<path d="M308.774,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H308.774z M308.019,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H308.019z"/>
	<path d="M314.458,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C312.271,751.813,312.954,750.544,314.458,750.544z M314.458,754.829c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C313.09,753.659,313.323,754.829,314.458,754.829z"/>
	<path d="M318.539,751.337v4.051h-0.792v-4.051h-0.648v-0.657h0.648v-0.811c0-0.711,0.45-1.035,1.225-1.035
		c0.116,0,0.233,0.009,0.359,0.018v0.711c-0.099-0.009-0.225-0.018-0.324-0.018c-0.342,0-0.468,0.171-0.468,0.549v0.586h0.792v0.657
		H318.539z"/>
	<path d="M323.791,751.337v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H323.791z"/>
	<path d="M329.242,755.388h-0.792v-3.079c0-0.639-0.18-1.053-0.937-1.053c-0.647,0-1.314,0.378-1.314,1.566v2.565h-0.792v-6.464
		h0.792v2.386h0.019c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M334.485,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H334.485z M333.729,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H333.729z"/>
	<path d="M339.358,755.388h-0.873v-6.464h0.873V755.388z"/>
	<path d="M344.607,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M346.769,751.337v3.061c0,0.369,0.315,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H346.769z"/>
	<path d="M352.275,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H352.275z M351.52,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H351.52z"/>
	<path d="M354.208,755.388h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.324-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.298c-0.738,0-1.206,0.576-1.206,1.26V755.388z"/>
	<path d="M360.262,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M361.468,752.11c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H361.468z M364.366,752.984
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.738,0,1.458-0.477,1.458-1.107V752.984z"/>
	<path d="M367.44,751.337v3.061c0,0.369,0.315,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H367.44z"/>
	<path d="M369.08,749.824v-0.9h0.792v0.9H369.08z M369.872,755.388h-0.792v-4.708h0.792V755.388z"/>
	<path d="M373.069,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C370.882,751.813,371.565,750.544,373.069,750.544z M373.069,754.829c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C371.701,753.659,371.935,754.829,373.069,754.829z"/>
	<path d="M380.088,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708H377
		v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M381.295,752.11c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H381.295z M384.193,752.984
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.737,0,1.458-0.477,1.458-1.107V752.984z"/>
	<path d="M387.199,755.388h-0.792v-6.464h0.792V755.388z"/>
	<path d="M395.143,750.814c-0.045-0.999-0.873-1.314-1.603-1.314c-0.549,0-1.476,0.153-1.476,1.135c0,0.549,0.387,0.729,0.765,0.818
		l1.846,0.424c0.837,0.197,1.468,0.702,1.468,1.729c0,1.53-1.423,1.953-2.53,1.953c-1.197,0-1.665-0.36-1.953-0.621
		c-0.55-0.495-0.657-1.035-0.657-1.639h0.819c0,1.171,0.954,1.513,1.782,1.513c0.63,0,1.692-0.162,1.692-1.089
		c0-0.676-0.315-0.892-1.377-1.144l-1.324-0.307c-0.423-0.099-1.377-0.396-1.377-1.521c0-1.008,0.657-1.998,2.224-1.998
		c2.26,0,2.484,1.351,2.521,2.062H395.143z"/>
	<path d="M397.21,750.68h0.747v0.666h0.018c0.189-0.271,0.55-0.802,1.404-0.802c1.252,0,1.963,1.026,1.963,2.35
		c0,1.126-0.468,2.629-2.07,2.629c-0.631,0-1.045-0.297-1.252-0.63h-0.018v2.358h-0.792V750.68z M399.253,754.839
		c0.864,0,1.27-0.783,1.27-1.819c0-0.603-0.063-1.764-1.287-1.764c-1.144,0-1.27,1.233-1.27,1.998
		C397.966,754.506,398.749,754.839,399.253,754.839z"/>
	<path d="M406.363,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H406.363z M405.607,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H405.607z"/>
	<path d="M411.457,753.911c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H411.457z M410.701,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H410.701z"/>
	<path d="M415.536,752.264c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H415.536z"/>
	<path d="M421.076,755.388h-0.792v-3.079c0-0.639-0.18-1.053-0.937-1.053c-0.647,0-1.314,0.378-1.314,1.566v2.565h-0.792v-6.464
		h0.792v2.386h0.019c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M429.641,750.868c-0.243-1.134-1.188-1.341-1.81-1.341c-1.171,0-2.116,0.864-2.116,2.565c0,1.521,0.541,2.691,2.144,2.691
		c0.566,0,1.548-0.27,1.845-1.773h0.847c-0.36,2.44-2.341,2.548-2.827,2.548c-1.467,0-2.907-0.954-2.907-3.438
		c0-1.99,1.134-3.367,3.016-3.367c1.665,0,2.529,1.035,2.655,2.115H429.641z"/>
	<path d="M433.522,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C431.335,751.813,432.019,750.544,433.522,750.544z M433.522,754.829c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C432.154,753.659,432.388,754.829,433.522,754.829z"/>
	<path d="M436.711,750.68h0.747v0.666h0.019c0.171-0.252,0.576-0.802,1.422-0.802c0.847,0,1.117,0.514,1.252,0.766
		c0.396-0.441,0.711-0.766,1.44-0.766c0.504,0,1.458,0.262,1.458,1.585v3.259h-0.792v-3.043c0-0.648-0.198-1.089-0.873-1.089
		c-0.666,0-1.107,0.63-1.107,1.26v2.872h-0.792v-3.259c0-0.396-0.153-0.873-0.738-0.873c-0.45,0-1.243,0.288-1.243,1.566v2.565
		h-0.792V750.68z"/>
	<path d="M444.296,750.68h0.747v0.666h0.019c0.171-0.252,0.576-0.802,1.422-0.802c0.847,0,1.116,0.514,1.252,0.766
		c0.396-0.441,0.711-0.766,1.44-0.766c0.504,0,1.458,0.262,1.458,1.585v3.259h-0.792v-3.043c0-0.648-0.198-1.089-0.873-1.089
		c-0.666,0-1.107,0.63-1.107,1.26v2.872h-0.792v-3.259c0-0.396-0.153-0.873-0.738-0.873c-0.45,0-1.243,0.288-1.243,1.566v2.565
		h-0.792V750.68z"/>
	<path d="M454.921,755.388v-0.685l-0.019-0.018c-0.333,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.53-0.333-1.53-1.458v-3.385h0.792
		v3.123c0,0.774,0.396,1.036,0.909,1.036c0.999,0,1.297-0.883,1.297-1.566v-2.593h0.792v4.708H454.921z"/>
	<path d="M460.76,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M462.018,749.824v-0.9h0.792v0.9H462.018z M462.81,755.388h-0.792v-4.708h0.792V755.388z"/>
	<path d="M467.064,752.264c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H467.064z"/>
	<path d="M468.742,752.11c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H468.742z M471.641,752.984
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.737,0,1.458-0.477,1.458-1.107V752.984z"/>
	<path d="M474.715,751.337v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H474.715z"/>
	<path d="M476.354,749.824v-0.9h0.792v0.9H476.354z M477.146,755.388h-0.792v-4.708h0.792V755.388z"/>
	<path d="M480.344,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C478.156,751.813,478.84,750.544,480.344,750.544z M480.344,754.829c1.134,0,1.368-1.17,1.368-1.8c0-0.631-0.234-1.801-1.368-1.801
		c-1.135,0-1.368,1.17-1.368,1.801C478.976,753.659,479.209,754.829,480.344,754.829z"/>
	<path d="M487.362,755.388h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V755.388z"/>
	<path d="M492.406,753.506l-0.675,1.882h-0.9l2.431-6.464h0.99l2.341,6.464h-0.954l-0.64-1.882H492.406z M494.685,752.731
		l-0.954-2.845h-0.019l-1.035,2.845H494.685z"/>
	<path d="M500.031,752.021c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.937,0.081-0.937,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.863,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.026,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.747c0-0.379-0.261-0.505-0.937-0.676l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.854-1.504,1.782-1.504c1.684,0,1.801,1.243,1.801,1.477H500.031z"/>
	<path d="M504.648,752.021c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.937,0.081-0.937,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.863,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.026,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.747c0-0.379-0.261-0.505-0.937-0.676l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.854-1.504,1.782-1.504c1.684,0,1.801,1.243,1.801,1.477H504.648z"/>
	<path d="M508.474,750.544c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C506.286,751.813,506.97,750.544,508.474,750.544z M508.474,754.829c1.134,0,1.368-1.17,1.368-1.8c0-0.631-0.234-1.801-1.368-1.801
		c-1.135,0-1.368,1.17-1.368,1.801C507.105,753.659,507.339,754.829,508.474,754.829z"/>
	<path d="M514.653,752.264c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.082,0.747-0.541,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.703,0.9,1.765,1.72H514.653z"/>
	<path d="M516.375,749.824v-0.9h0.793v0.9H516.375z M517.168,755.388h-0.793v-4.708h0.793V755.388z"/>
	<path d="M518.401,752.11c0.035-1.125,0.783-1.566,1.945-1.566c0.377,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.07,0,0.17-0.019,0.26-0.036v0.576c-0.135,0.036-0.252,0.09-0.432,0.09c-0.703,0-0.811-0.36-0.838-0.72
		c-0.305,0.333-0.783,0.765-1.701,0.765c-0.863,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.105-0.766c-0.9,0-1.027,0.55-1.082,0.909H518.401z M521.299,752.984
		c-0.125,0.099-0.324,0.171-1.305,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.26,0.819,0.855,0.819
		c0.738,0,1.457-0.477,1.457-1.107V752.984z"/>
	<path d="M524.374,751.337v3.061c0,0.369,0.316,0.369,0.477,0.369h0.279v0.621c-0.287,0.027-0.512,0.063-0.594,0.063
		c-0.783,0-0.953-0.441-0.953-1.009v-3.105h-0.641v-0.657h0.641v-1.314h0.791v1.314h0.756v0.657H524.374z"/>
	<path d="M526.014,749.824v-0.9h0.791v0.9H526.014z M526.805,755.388h-0.791v-4.708h0.791V755.388z"/>
	<path d="M530.002,750.544c1.504,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C527.815,751.813,528.499,750.544,530.002,750.544z M530.002,754.829c1.135,0,1.369-1.17,1.369-1.8
		c0-0.631-0.234-1.801-1.369-1.801s-1.367,1.17-1.367,1.801C528.635,753.659,528.868,754.829,530.002,754.829z"/>
	<path d="M537.022,755.388h-0.793v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.449,0-1.242,0.288-1.242,1.566v2.565h-0.793v-4.708
		h0.748v0.666h0.018c0.172-0.252,0.613-0.802,1.422-0.802c0.73,0,1.648,0.298,1.648,1.639V755.388z"/>
	<path d="M543.047,748.789c-0.766,1.44-1.225,2.305-1.225,4.465c0,1.485,0.514,2.584,1.232,3.997h-0.529
		c-0.938-1.404-1.549-2.529-1.549-4.213c0-1.585,0.549-2.863,1.521-4.249H543.047z"/>
	<path d="M545.25,755.388h-0.873v-6.464h0.873V755.388z"/>
	<path d="M551.061,748.924h0.846v6.464h-0.982l-3.285-5.222h-0.018v5.222h-0.846v-6.464h1.035l3.23,5.222h0.02V748.924z"/>
	<path d="M552.772,748.924h5.248v0.774h-2.188v5.689h-0.873v-5.689h-2.188V748.924z"/>
	<path d="M563.704,755.388h-4.771v-6.464h4.709v0.774h-3.836v1.98h3.539v0.774h-3.539v2.16h3.898V755.388z"/>
	<path d="M565.739,755.388h-0.873v-6.464h2.99c1.063,0,2.125,0.369,2.125,1.737c0,0.954-0.486,1.306-0.9,1.549
		c0.369,0.153,0.738,0.314,0.773,1.215l0.055,1.171c0.008,0.359,0.053,0.495,0.324,0.648v0.144h-1.072
		c-0.125-0.396-0.152-1.377-0.152-1.62c0-0.531-0.109-1.152-1.152-1.152h-2.117V755.388z M565.739,751.867h2.027
		c0.639,0,1.314-0.161,1.314-1.116c0-0.999-0.73-1.08-1.162-1.08h-2.18V751.867z"/>
	<path d="M575.174,750.814c-0.045-0.999-0.873-1.314-1.602-1.314c-0.549,0-1.477,0.153-1.477,1.135c0,0.549,0.387,0.729,0.766,0.818
		l1.846,0.424c0.836,0.197,1.467,0.702,1.467,1.729c0,1.53-1.422,1.953-2.529,1.953c-1.197,0-1.666-0.36-1.953-0.621
		c-0.551-0.495-0.658-1.035-0.658-1.639h0.82c0,1.171,0.953,1.513,1.781,1.513c0.631,0,1.693-0.162,1.693-1.089
		c0-0.676-0.316-0.892-1.377-1.144l-1.324-0.307c-0.424-0.099-1.377-0.396-1.377-1.521c0-1.008,0.656-1.998,2.223-1.998
		c2.26,0,2.484,1.351,2.521,2.062H575.174z"/>
	<path d="M578.29,755.388h-0.873v-6.464h2.906c1.199,0,1.918,0.774,1.918,1.818c0,0.9-0.512,1.918-1.918,1.918h-2.033V755.388z
		 M578.29,751.913h1.736c0.783,0,1.314-0.288,1.314-1.162c0-0.819-0.557-1.08-1.277-1.08h-1.773V751.913z"/>
	<path d="M588.073,755.388h-4.771v-6.464h4.707v0.774h-3.834v1.98h3.537v0.774h-3.537v2.16h3.898V755.388z"/>
	<path d="M593.997,755.388h-4.771v-6.464h4.707v0.774h-3.834v1.98h3.537v0.774h-3.537v2.16h3.898V755.388z"/>
	<path d="M599.54,750.868c-0.244-1.134-1.189-1.341-1.811-1.341c-1.17,0-2.115,0.864-2.115,2.565c0,1.521,0.541,2.691,2.143,2.691
		c0.566,0,1.549-0.27,1.846-1.773h0.846c-0.359,2.44-2.34,2.548-2.826,2.548c-1.467,0-2.908-0.954-2.908-3.438
		c0-1.99,1.135-3.367,3.016-3.367c1.666,0,2.529,1.035,2.656,2.115H599.54z"/>
	<path d="M605.819,748.924h0.873v6.464h-0.873v-3.016h-3.377v3.016h-0.873v-6.464h0.873v2.674h3.377V748.924z"/>
	<path d="M610.53,751.327c0-2.062,1.502-2.269,2.188-2.269c1.098,0,1.98,0.711,1.98,1.891c0,1.135-0.748,1.611-1.693,2.116
		l-0.656,0.359c-0.865,0.478-1.055,0.973-1.09,1.216h3.439v0.747h-4.33c0.045-1.314,0.639-2.017,1.484-2.512l0.838-0.486
		c0.676-0.387,1.16-0.648,1.16-1.477c0-0.504-0.324-1.144-1.25-1.144c-1.197,0-1.252,1.117-1.279,1.558H610.53z"/>
	<path d="M617.674,755.559c-1.818,0-2.17-1.936-2.17-3.25s0.352-3.25,2.17-3.25s2.17,1.936,2.17,3.25
		S619.493,755.559,617.674,755.559z M617.674,749.77c-0.908,0-1.322,0.955-1.322,2.539c0,1.585,0.414,2.539,1.322,2.539
		c0.91,0,1.324-0.954,1.324-2.539C618.999,750.725,618.584,749.77,617.674,749.77z"/>
	<path d="M623.196,755.388h-0.848v-4.492h-1.477v-0.63c1.027-0.072,1.449-0.172,1.701-1.207h0.623V755.388z"/>
	<path d="M624.959,751.229c0-1.152,0.701-2.17,2.035-2.17c2.016,0,2.213,1.909,2.213,2.936c0,0.675-0.115,3.564-2.295,3.564
		c-1.484,0-1.881-1.08-1.881-1.692h0.791c0.045,0.64,0.424,1.035,1.072,1.035c0.863,0,1.322-0.729,1.521-2.367l-0.018-0.019
		c-0.234,0.486-0.865,0.757-1.449,0.757C625.778,753.272,624.959,752.489,624.959,751.229z M628.282,751.12
		c0-0.72-0.369-1.351-1.279-1.351c-0.738,0-1.225,0.576-1.225,1.423c0,1.216,0.756,1.368,1.287,1.368
		C627.479,752.561,628.282,752.372,628.282,751.12z"/>
	<path d="M630.167,757.251c0.766-1.44,1.225-2.305,1.225-4.465c0-1.485-0.514-2.584-1.234-3.997h0.531
		c0.938,1.404,1.549,2.529,1.549,4.213c0,1.585-0.549,2.863-1.521,4.249H630.167z"/>
	<path d="M634.776,755.388h-0.936v-0.954h0.936V755.388z"/>
	<path d="M642.768,755.388h-0.873v-6.464h0.873V755.388z"/>
	<path d="M648.147,750.814c-0.045-0.999-0.873-1.314-1.602-1.314c-0.549,0-1.477,0.153-1.477,1.135c0,0.549,0.387,0.729,0.766,0.818
		l1.846,0.424c0.836,0.197,1.467,0.702,1.467,1.729c0,1.53-1.422,1.953-2.529,1.953c-1.197,0-1.666-0.36-1.953-0.621
		c-0.551-0.495-0.658-1.035-0.658-1.639h0.82c0,1.171,0.953,1.513,1.781,1.513c0.631,0,1.693-0.162,1.693-1.089
		c0-0.676-0.316-0.892-1.377-1.144l-1.324-0.307c-0.424-0.099-1.377-0.396-1.377-1.521c0-1.008,0.656-1.998,2.223-1.998
		c2.26,0,2.484,1.351,2.521,2.062H648.147z"/>
	<path d="M654.182,750.814c-0.045-0.999-0.873-1.314-1.604-1.314c-0.549,0-1.475,0.153-1.475,1.135c0,0.549,0.387,0.729,0.764,0.818
		l1.846,0.424c0.838,0.197,1.469,0.702,1.469,1.729c0,1.53-1.424,1.953-2.531,1.953c-1.197,0-1.664-0.36-1.953-0.621
		c-0.549-0.495-0.656-1.035-0.656-1.639h0.818c0,1.171,0.955,1.513,1.783,1.513c0.629,0,1.691-0.162,1.691-1.089
		c0-0.676-0.314-0.892-1.377-1.144l-1.324-0.307c-0.422-0.099-1.377-0.396-1.377-1.521c0-1.008,0.658-1.998,2.225-1.998
		c2.26,0,2.484,1.351,2.52,2.062H654.182z"/>
	<path d="M660.661,748.924h0.846v6.464h-0.982l-3.285-5.222h-0.018v5.222h-0.846v-6.464h1.035l3.23,5.222h0.02V748.924z"/>
	<path d="M665.344,751.327c0-2.062,1.502-2.269,2.188-2.269c1.098,0,1.98,0.711,1.98,1.891c0,1.135-0.748,1.611-1.693,2.116
		l-0.656,0.359c-0.865,0.478-1.055,0.973-1.09,1.216h3.439v0.747h-4.33c0.045-1.314,0.639-2.017,1.484-2.512l0.838-0.486
		c0.676-0.387,1.16-0.648,1.16-1.477c0-0.504-0.324-1.144-1.25-1.144c-1.197,0-1.252,1.117-1.279,1.558H665.344z"/>
	<path d="M671.977,751.787c0.135,0.009,0.279,0.018,0.414,0.018c0.613,0,1.207-0.243,1.207-1.035c0-0.378-0.225-1-1.17-1
		c-1.125,0-1.197,0.919-1.234,1.36h-0.773c0-0.928,0.377-2.071,2.043-2.071c1.225,0,1.953,0.702,1.953,1.666
		c0,0.81-0.467,1.197-0.809,1.305v0.019c0.611,0.198,1.053,0.63,1.053,1.477c0,1.035-0.666,2.034-2.277,2.034
		c-0.469,0-0.865-0.117-1.172-0.279c-0.701-0.369-0.891-1.098-0.945-1.827h0.82c0.027,0.594,0.172,1.396,1.35,1.396
		c0.811,0,1.379-0.495,1.379-1.225c0-1.063-0.938-1.161-1.477-1.161c-0.117,0-0.244,0.009-0.361,0.009V751.787z"/>
	<path d="M677.647,755.559c-1.818,0-2.17-1.936-2.17-3.25s0.352-3.25,2.17-3.25s2.17,1.936,2.17,3.25
		S679.465,755.559,677.647,755.559z M677.647,749.77c-0.91,0-1.324,0.955-1.324,2.539c0,1.585,0.414,2.539,1.324,2.539
		c0.908,0,1.322-0.954,1.322-2.539C678.969,750.725,678.555,749.77,677.647,749.77z"/>
	<path d="M683.928,752.021c0.855,0.36,1.035,1.063,1.035,1.54c0,1.017-0.648,1.998-2.143,1.998c-0.352,0-1.018-0.09-1.521-0.486
		c-0.648-0.513-0.648-1.197-0.648-1.503c0-0.774,0.404-1.27,1.07-1.54c-0.539-0.207-0.854-0.639-0.854-1.233
		c0-0.657,0.404-1.737,1.916-1.737c1.404,0,1.955,0.892,1.955,1.63C684.739,751.598,684.208,751.877,683.928,752.021z
		 M681.497,753.587c0,0.505,0.252,1.261,1.352,1.261c0.576,0,1.27-0.207,1.27-1.197c0-0.855-0.596-1.233-1.314-1.233
		C681.911,752.417,681.497,753.002,681.497,753.587z M683.918,750.715c0-0.387-0.252-0.945-1.18-0.945
		c-0.846,0-1.053,0.586-1.053,0.991c0,0.603,0.531,0.972,1.107,0.972C683.477,751.732,683.918,751.291,683.918,750.715z"/>
	<path d="M688.165,753.299h-2.205v-0.81h2.205V753.299z"/>
	<path d="M692.665,753.849v1.539h-0.793v-1.539h-2.764v-0.774l2.898-4.016h0.658v4.105h0.926v0.685H692.665z M689.836,753.164h2.035
		v-2.872h-0.018L689.836,753.164z"/>
	<path d="M695.37,751.706c0.27-0.208,0.656-0.396,1.225-0.396c1.025,0,2.078,0.72,2.078,2.025c0,0.702-0.314,2.224-2.295,2.224
		c-0.828,0-1.891-0.333-2.043-1.746h0.818c0.082,0.738,0.631,1.063,1.342,1.063c0.818,0,1.332-0.657,1.332-1.45
		c0-0.909-0.621-1.404-1.404-1.404c-0.459,0-0.873,0.216-1.17,0.604l-0.684-0.036l0.477-3.395h3.277v0.774h-2.684L695.37,751.706z"
		/>
	<path d="M699.317,749.193h4.375v0.693c-0.631,0.657-2.143,2.674-2.602,5.501h-0.873c0.215-1.737,1.367-4.006,2.584-5.42h-3.484
		V749.193z"/>
	<path d="M709.899,755.388h-1.08l-1.738-2.656l-1.791,2.656h-1.053l2.313-3.313l-2.168-3.15h1.098l1.639,2.476l1.639-2.476h1.043
		l-2.168,3.15L709.899,755.388z"/>
	<path d="M711.913,755.388h-0.938v-0.954h0.938V755.388z"/>
	<path d="M715.327,752.146c0-1.953,1.584-3.394,3.457-3.394c1.855,0,3.439,1.44,3.439,3.394c0,1.972-1.584,3.412-3.439,3.412
		C716.911,755.559,715.327,754.118,715.327,752.146z M718.784,754.991c1.539,0,2.756-1.206,2.756-2.845
		c0-1.611-1.217-2.826-2.756-2.826c-1.557,0-2.771,1.215-2.771,2.826C716.012,753.785,717.227,754.991,718.784,754.991z
		 M720.575,752.786c-0.17,0.891-0.846,1.449-1.674,1.449c-1.225,0-2.008-0.918-2.008-2.098c0-1.197,0.748-2.079,1.973-2.079
		c0.863,0,1.574,0.495,1.701,1.413h-0.604c-0.107-0.495-0.521-0.819-1.09-0.819c-0.846,0-1.295,0.639-1.295,1.477
		c0,0.811,0.504,1.513,1.322,1.513c0.568,0,0.982-0.352,1.063-0.855H720.575z"/>
	<path d="M725.241,751.327c0-2.062,1.504-2.269,2.188-2.269c1.098,0,1.98,0.711,1.98,1.891c0,1.135-0.746,1.611-1.691,2.116
		l-0.658,0.359c-0.863,0.478-1.053,0.973-1.09,1.216h3.439v0.747h-4.33c0.045-1.314,0.639-2.017,1.486-2.512l0.836-0.486
		c0.676-0.387,1.162-0.648,1.162-1.477c0-0.504-0.324-1.144-1.252-1.144c-1.197,0-1.25,1.117-1.277,1.558H725.241z"/>
	<path d="M732.387,755.559c-1.818,0-2.17-1.936-2.17-3.25s0.352-3.25,2.17-3.25s2.17,1.936,2.17,3.25
		S734.206,755.559,732.387,755.559z M732.387,749.77c-0.91,0-1.324,0.955-1.324,2.539c0,1.585,0.414,2.539,1.324,2.539
		c0.908,0,1.322-0.954,1.322-2.539C733.709,750.725,733.295,749.77,732.387,749.77z"/>
	<path d="M737.909,755.388h-0.848v-4.492h-1.477v-0.63c1.027-0.072,1.449-0.172,1.701-1.207h0.623V755.388z"/>
	<path d="M739.67,751.229c0-1.152,0.703-2.17,2.035-2.17c2.018,0,2.215,1.909,2.215,2.936c0,0.675-0.117,3.564-2.295,3.564
		c-1.486,0-1.883-1.08-1.883-1.692h0.793c0.045,0.64,0.422,1.035,1.07,1.035c0.865,0,1.324-0.729,1.521-2.367l-0.018-0.019
		c-0.234,0.486-0.863,0.757-1.449,0.757C740.491,753.272,739.67,752.489,739.67,751.229z M742.993,751.12
		c0-0.72-0.369-1.351-1.277-1.351c-0.738,0-1.225,0.576-1.225,1.423c0,1.216,0.756,1.368,1.287,1.368
		C742.192,752.561,742.993,752.372,742.993,751.12z"/>
	<path d="M748.749,755.388h-0.873v-6.464h0.873V755.388z"/>
	<path d="M753.997,755.388h-0.791v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.451,0-1.244,0.288-1.244,1.566v2.565h-0.791v-4.708
		h0.746v0.666h0.02c0.17-0.252,0.611-0.802,1.422-0.802c0.729,0,1.646,0.298,1.646,1.639V755.388z"/>
	<path d="M756.159,751.337v3.061c0,0.369,0.316,0.369,0.477,0.369h0.279v0.621c-0.287,0.027-0.512,0.063-0.594,0.063
		c-0.783,0-0.953-0.441-0.953-1.009v-3.105h-0.641v-0.657h0.641v-1.314h0.791v1.314h0.756v0.657H756.159z"/>
	<path d="M761.665,753.911c-0.025,0.226-0.242,0.892-0.836,1.287c-0.217,0.145-0.523,0.324-1.279,0.324
		c-1.322,0-2.105-0.999-2.105-2.358c0-1.458,0.701-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.746,0,1.188-0.576,1.215-0.928H761.665z M760.909,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H760.909z"/>
	<path d="M763.598,755.388h-0.793v-4.708h0.748v0.783h0.018c0.314-0.55,0.729-0.919,1.324-0.919c0.098,0,0.143,0.01,0.207,0.027
		v0.819h-0.299c-0.738,0-1.205,0.576-1.205,1.26V755.388z"/>
	<path d="M769.653,755.388h-0.793v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.449,0-1.242,0.288-1.242,1.566v2.565h-0.793v-4.708
		h0.748v0.666h0.018c0.172-0.252,0.613-0.802,1.422-0.802c0.73,0,1.648,0.298,1.648,1.639V755.388z"/>
	<path d="M770.858,752.11c0.035-1.125,0.783-1.566,1.945-1.566c0.377,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.07,0,0.17-0.019,0.26-0.036v0.576c-0.135,0.036-0.252,0.09-0.432,0.09c-0.703,0-0.811-0.36-0.838-0.72
		c-0.305,0.333-0.783,0.765-1.701,0.765c-0.863,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.105-0.766c-0.9,0-1.027,0.55-1.082,0.909H770.858z M773.756,752.984
		c-0.125,0.099-0.324,0.171-1.305,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.26,0.819,0.855,0.819
		c0.738,0,1.457-0.477,1.457-1.107V752.984z"/>
	<path d="M776.831,751.337v3.061c0,0.369,0.316,0.369,0.477,0.369h0.279v0.621c-0.287,0.027-0.512,0.063-0.594,0.063
		c-0.783,0-0.953-0.441-0.953-1.009v-3.105h-0.641v-0.657h0.641v-1.314h0.791v1.314h0.756v0.657H776.831z"/>
	<path d="M778.471,749.824v-0.9h0.791v0.9H778.471z M779.262,755.388h-0.791v-4.708h0.791V755.388z"/>
	<path d="M782.459,750.544c1.504,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C780.272,751.813,780.956,750.544,782.459,750.544z M782.459,754.829c1.135,0,1.369-1.17,1.369-1.8
		c0-0.631-0.234-1.801-1.369-1.801s-1.367,1.17-1.367,1.801C781.092,753.659,781.325,754.829,782.459,754.829z"/>
	<path d="M789.479,755.388h-0.793v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.449,0-1.242,0.288-1.242,1.566v2.565h-0.793v-4.708
		h0.748v0.666h0.018c0.172-0.252,0.613-0.802,1.422-0.802c0.73,0,1.648,0.298,1.648,1.639V755.388z"/>
	<path d="M790.686,752.11c0.035-1.125,0.783-1.566,1.943-1.566c0.379,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.072,0,0.172-0.019,0.262-0.036v0.576c-0.135,0.036-0.252,0.09-0.434,0.09c-0.701,0-0.809-0.36-0.836-0.72
		c-0.307,0.333-0.783,0.765-1.701,0.765c-0.865,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.107-0.766c-0.9,0-1.025,0.55-1.08,0.909H790.686z M793.584,752.984
		c-0.127,0.099-0.324,0.171-1.307,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.262,0.819,0.855,0.819
		c0.738,0,1.459-0.477,1.459-1.107V752.984z"/>
	<path d="M796.59,755.388h-0.793v-6.464h0.793V755.388z"/>
	<path d="M804.534,750.814c-0.045-0.999-0.873-1.314-1.604-1.314c-0.549,0-1.475,0.153-1.475,1.135c0,0.549,0.387,0.729,0.764,0.818
		l1.846,0.424c0.838,0.197,1.469,0.702,1.469,1.729c0,1.53-1.424,1.953-2.531,1.953c-1.197,0-1.664-0.36-1.953-0.621
		c-0.549-0.495-0.656-1.035-0.656-1.639h0.818c0,1.171,0.955,1.513,1.783,1.513c0.629,0,1.691-0.162,1.691-1.089
		c0-0.676-0.314-0.892-1.377-1.144l-1.324-0.307c-0.422-0.099-1.377-0.396-1.377-1.521c0-1.008,0.658-1.998,2.225-1.998
		c2.26,0,2.484,1.351,2.52,2.062H804.534z"/>
	<path d="M806.6,750.68h0.746v0.666h0.018c0.189-0.271,0.551-0.802,1.404-0.802c1.252,0,1.963,1.026,1.963,2.35
		c0,1.126-0.467,2.629-2.07,2.629c-0.631,0-1.045-0.297-1.252-0.63h-0.018v2.358H806.6V750.68z M808.643,754.839
		c0.863,0,1.27-0.783,1.27-1.819c0-0.603-0.063-1.764-1.287-1.764c-1.145,0-1.27,1.233-1.27,1.998
		C807.356,754.506,808.139,754.839,808.643,754.839z"/>
	<path d="M815.752,753.911c-0.025,0.226-0.242,0.892-0.836,1.287c-0.217,0.145-0.523,0.324-1.279,0.324
		c-1.322,0-2.105-0.999-2.105-2.358c0-1.458,0.701-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.746,0,1.188-0.576,1.215-0.928H815.752z M814.997,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H814.997z"/>
	<path d="M820.846,753.911c-0.025,0.226-0.242,0.892-0.836,1.287c-0.217,0.145-0.523,0.324-1.279,0.324
		c-1.322,0-2.105-0.999-2.105-2.358c0-1.458,0.701-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.746,0,1.188-0.576,1.215-0.928H820.846z M820.09,752.66c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H820.09z"/>
	<path d="M824.926,752.264c-0.1-0.63-0.451-1.008-1.117-1.008c-0.98,0-1.297,0.936-1.297,1.773c0,0.81,0.199,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.127-1.116h0.764c-0.08,0.747-0.539,1.8-1.916,1.8c-1.324,0-2.107-0.999-2.107-2.358
		c0-1.458,0.703-2.62,2.26-2.62c1.232,0,1.701,0.9,1.764,1.72H824.926z"/>
	<path d="M830.465,755.388h-0.791v-3.079c0-0.639-0.18-1.053-0.938-1.053c-0.646,0-1.314,0.378-1.314,1.566v2.565h-0.791v-6.464
		h0.791v2.386h0.02c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.646,0.298,1.646,1.639V755.388z"/>
	<path d="M839.03,750.868c-0.242-1.134-1.188-1.341-1.809-1.341c-1.172,0-2.115,0.864-2.115,2.565c0,1.521,0.539,2.691,2.143,2.691
		c0.566,0,1.547-0.27,1.844-1.773h0.848c-0.361,2.44-2.342,2.548-2.828,2.548c-1.467,0-2.906-0.954-2.906-3.438
		c0-1.99,1.133-3.367,3.016-3.367c1.664,0,2.529,1.035,2.654,2.115H839.03z"/>
	<path d="M842.913,750.544c1.502,0,2.188,1.27,2.188,2.485c0,1.215-0.686,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C840.725,751.813,841.409,750.544,842.913,750.544z M842.913,754.829c1.133,0,1.367-1.17,1.367-1.8
		c0-0.631-0.234-1.801-1.367-1.801c-1.135,0-1.369,1.17-1.369,1.801C841.543,753.659,841.778,754.829,842.913,754.829z"/>
	<path d="M846.1,750.68h0.748v0.666h0.018c0.172-0.252,0.576-0.802,1.422-0.802c0.848,0,1.117,0.514,1.252,0.766
		c0.396-0.441,0.711-0.766,1.441-0.766c0.504,0,1.457,0.262,1.457,1.585v3.259h-0.791v-3.043c0-0.648-0.199-1.089-0.873-1.089
		c-0.666,0-1.107,0.63-1.107,1.26v2.872h-0.793v-3.259c0-0.396-0.152-0.873-0.738-0.873c-0.449,0-1.242,0.288-1.242,1.566v2.565
		H846.1V750.68z"/>
	<path d="M853.686,750.68h0.748v0.666h0.018c0.172-0.252,0.576-0.802,1.422-0.802c0.848,0,1.117,0.514,1.252,0.766
		c0.396-0.441,0.711-0.766,1.441-0.766c0.504,0,1.457,0.262,1.457,1.585v3.259h-0.791v-3.043c0-0.648-0.199-1.089-0.873-1.089
		c-0.666,0-1.107,0.63-1.107,1.26v2.872h-0.793v-3.259c0-0.396-0.152-0.873-0.738-0.873c-0.449,0-1.242,0.288-1.242,1.566v2.565
		h-0.793V750.68z"/>
	<path d="M864.311,755.388v-0.685l-0.018-0.018c-0.334,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.531-0.333-1.531-1.458v-3.385
		h0.793v3.123c0,0.774,0.396,1.036,0.908,1.036c1,0,1.297-0.883,1.297-1.566v-2.593h0.793v4.708H864.311z"/>
	<path d="M870.151,755.388h-0.793v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.449,0-1.242,0.288-1.242,1.566v2.565h-0.793v-4.708
		h0.748v0.666h0.018c0.172-0.252,0.613-0.802,1.422-0.802c0.73,0,1.648,0.298,1.648,1.639V755.388z"/>
	<path d="M871.409,749.824v-0.9h0.791v0.9H871.409z M872.2,755.388h-0.791v-4.708h0.791V755.388z"/>
	<path d="M876.456,752.264c-0.1-0.63-0.451-1.008-1.117-1.008c-0.98,0-1.297,0.936-1.297,1.773c0,0.81,0.199,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.127-1.116h0.764c-0.08,0.747-0.539,1.8-1.916,1.8c-1.324,0-2.107-0.999-2.107-2.358
		c0-1.458,0.703-2.62,2.26-2.62c1.232,0,1.701,0.9,1.764,1.72H876.456z"/>
	<path d="M878.133,752.11c0.035-1.125,0.783-1.566,1.943-1.566c0.379,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.072,0,0.172-0.019,0.262-0.036v0.576c-0.135,0.036-0.252,0.09-0.434,0.09c-0.701,0-0.809-0.36-0.836-0.72
		c-0.307,0.333-0.783,0.765-1.701,0.765c-0.865,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.107-0.766c-0.9,0-1.025,0.55-1.08,0.909H878.133z M881.032,752.984
		c-0.127,0.099-0.324,0.171-1.307,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.262,0.819,0.855,0.819
		c0.738,0,1.459-0.477,1.459-1.107V752.984z"/>
	<path d="M884.104,751.337v3.061c0,0.369,0.316,0.369,0.477,0.369h0.279v0.621c-0.287,0.027-0.512,0.063-0.594,0.063
		c-0.783,0-0.953-0.441-0.953-1.009v-3.105h-0.641v-0.657h0.641v-1.314h0.791v1.314h0.756v0.657H884.104z"/>
	<path d="M885.745,749.824v-0.9h0.791v0.9H885.745z M886.536,755.388h-0.791v-4.708h0.791V755.388z"/>
	<path d="M889.733,750.544c1.504,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C887.545,751.813,888.229,750.544,889.733,750.544z M889.733,754.829c1.135,0,1.369-1.17,1.369-1.8
		c0-0.631-0.234-1.801-1.369-1.801s-1.367,1.17-1.367,1.801C888.366,753.659,888.598,754.829,889.733,754.829z"/>
	<path d="M896.752,755.388h-0.791v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.451,0-1.244,0.288-1.244,1.566v2.565h-0.791v-4.708
		h0.746v0.666h0.02c0.17-0.252,0.611-0.802,1.422-0.802c0.729,0,1.646,0.298,1.646,1.639V755.388z"/>
	<path d="M132.781,764.306l-0.675,1.882h-0.9l2.431-6.464h0.99l2.341,6.464h-0.954l-0.639-1.882H132.781z M135.058,763.531
		l-0.954-2.845h-0.018l-1.035,2.845H135.058z"/>
	<path d="M140.405,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.936,0.081-0.936,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.864,1.558c-1.863,0-1.999-1.08-2.025-1.647h0.765
		c0.027,0.369,0.135,0.964,1.251,0.964c0.567,0,1.081-0.226,1.081-0.748c0-0.378-0.261-0.504-0.937-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.855-1.504,1.783-1.504c1.684,0,1.801,1.242,1.801,1.477H140.405z"/>
	<path d="M145.022,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.936,0.081-0.936,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.864,1.558c-1.863,0-1.999-1.08-2.025-1.647h0.765
		c0.027,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.748c0-0.378-0.261-0.504-0.936-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.855-1.504,1.783-1.504c1.684,0,1.801,1.242,1.801,1.477H145.022z"/>
	<path d="M148.847,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C146.659,762.613,147.343,761.344,148.847,761.344z M148.847,765.629c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.369,1.17-1.369,1.801C147.478,764.459,147.712,765.629,148.847,765.629z"/>
	<path d="M155.026,763.063c-0.099-0.63-0.45-1.008-1.117-1.008c-0.981,0-1.296,0.936-1.296,1.773c0,0.81,0.198,1.81,1.288,1.81
		c0.531,0,0.99-0.396,1.125-1.116h0.765c-0.081,0.747-0.541,1.8-1.917,1.8c-1.324,0-2.107-0.999-2.107-2.358
		c0-1.458,0.702-2.62,2.26-2.62c1.233,0,1.702,0.9,1.765,1.72H155.026z"/>
	<path d="M156.75,760.624v-0.9h0.792v0.9H156.75z M157.542,766.188h-0.792v-4.708h0.792V766.188z"/>
	<path d="M158.775,762.91c0.036-1.125,0.783-1.566,1.945-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71
		c0,0.198,0.099,0.279,0.261,0.279c0.072,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.432,0.09
		c-0.702,0-0.81-0.36-0.837-0.72c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359
		c0-0.396,0.117-1.314,1.431-1.477l1.306-0.162c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.36-0.766-1.107-0.766
		c-0.9,0-1.026,0.55-1.08,0.909H158.775z M161.674,763.784c-0.126,0.099-0.324,0.171-1.305,0.297
		c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819c0.738,0,1.458-0.477,1.458-1.107V763.784z"/>
	<path d="M164.748,762.137v3.061c0,0.369,0.315,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.784,0-0.955-0.441-0.955-1.009v-3.105h-0.639v-0.657h0.639v-1.314h0.792v1.314h0.756v0.657H164.748z"/>
	<path d="M166.387,760.624v-0.9h0.792v0.9H166.387z M167.179,766.188h-0.792v-4.708h0.792V766.188z"/>
	<path d="M170.376,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C168.188,762.613,168.873,761.344,170.376,761.344z M170.376,765.629c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801s-1.369,1.17-1.369,1.801C169.008,764.459,169.242,765.629,170.376,765.629z"/>
	<path d="M177.396,766.188h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.242,0.288-1.242,1.566v2.565h-0.792v-4.708
		h0.748v0.666h0.018c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M179.694,766.188h-0.937v-0.954h0.937V766.188z"/>
	<path d="M187.182,764.306l-0.675,1.882h-0.9l2.431-6.464h0.99l2.341,6.464h-0.954l-0.639-1.882H187.182z M189.459,763.531
		l-0.954-2.845h-0.018l-1.035,2.845H189.459z"/>
	<path d="M192.964,766.188h-0.792v-6.464h0.792V766.188z"/>
	<path d="M195.013,766.188h-0.792v-6.464h0.792V766.188z"/>
	<path d="M199.603,766.188h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.323-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.297c-0.738,0-1.207,0.576-1.207,1.261V766.188z"/>
	<path d="M201.831,760.624v-0.9h0.792v0.9H201.831z M202.624,766.188h-0.792v-4.708h0.792V766.188z"/>
	<path d="M207.798,765.801c0,0.675-0.009,2.367-2.188,2.367c-0.558,0-1.647-0.153-1.8-1.404h0.792
		c0.144,0.747,0.873,0.747,1.063,0.747c1.368,0,1.341-1.099,1.341-1.656v-0.198h-0.018v0.036c-0.207,0.333-0.621,0.63-1.251,0.63
		c-1.603,0-2.071-1.503-2.071-2.629c0-1.323,0.711-2.35,1.963-2.35c0.855,0,1.215,0.531,1.404,0.802h0.018v-0.666h0.747V765.801z
		 M205.755,765.639c0.504,0,1.288-0.333,1.288-1.585c0-0.765-0.126-1.998-1.27-1.998c-1.225,0-1.288,1.161-1.288,1.764
		C204.485,764.855,204.891,765.639,205.755,765.639z"/>
	<path d="M212.843,766.188h-0.792v-3.079c0-0.639-0.18-1.053-0.936-1.053c-0.648,0-1.314,0.378-1.314,1.566v2.565h-0.792v-6.464
		h0.792v2.386h0.018c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M215,762.137v3.061c0,0.369,0.315,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.784,0-0.955-0.441-0.955-1.009v-3.105h-0.639v-0.657h0.639v-1.314H215v1.314h0.756v0.657H215z"/>
	<path d="M219.295,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.936,0.081-0.936,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.864,1.558c-1.863,0-1.999-1.08-2.025-1.647h0.765
		c0.027,0.369,0.135,0.964,1.251,0.964c0.567,0,1.081-0.226,1.081-0.748c0-0.378-0.261-0.504-0.937-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.855-1.504,1.783-1.504c1.684,0,1.801,1.242,1.801,1.477H219.295z"/>
	<path d="M224.524,766.188h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.323-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.297c-0.738,0-1.207,0.576-1.207,1.261V766.188z"/>
	<path d="M230.663,764.711c-0.027,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.107-0.999-2.107-2.358c0-1.458,0.703-2.62,2.26-2.62c1.359,0,2.026,1.081,2.026,2.746h-3.439
		c0,0.981,0.459,1.549,1.368,1.549c0.747,0,1.188-0.576,1.215-0.928H230.663z M229.907,763.46c-0.045-0.729-0.351-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H229.907z"/>
	<path d="M234.423,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.936,0.081-0.936,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.864,1.558c-1.863,0-1.999-1.08-2.025-1.647h0.765
		c0.027,0.369,0.135,0.964,1.251,0.964c0.567,0,1.081-0.226,1.081-0.748c0-0.378-0.261-0.504-0.937-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.855-1.504,1.783-1.504c1.684,0,1.801,1.242,1.801,1.477H234.423z"/>
	<path d="M240.314,764.711c-0.027,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.259-2.62c1.359,0,2.026,1.081,2.026,2.746h-3.439
		c0,0.981,0.459,1.549,1.368,1.549c0.747,0,1.188-0.576,1.215-0.928H240.314z M239.558,763.46c-0.045-0.729-0.351-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H239.558z"/>
	<path d="M242.246,766.188h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.323-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.297c-0.738,0-1.207,0.576-1.207,1.261V766.188z"/>
	<path d="M247.688,761.479h0.864l-1.792,4.708h-0.846l-1.719-4.708h0.918l1.225,3.835h0.018L247.688,761.479z"/>
	<path d="M253.118,764.711c-0.027,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.107-0.999-2.107-2.358c0-1.458,0.703-2.62,2.26-2.62c1.359,0,2.026,1.081,2.026,2.746h-3.439
		c0,0.981,0.459,1.549,1.368,1.549c0.747,0,1.188-0.576,1.215-0.928H253.118z M252.362,763.46c-0.045-0.729-0.351-1.404-1.314-1.404
		c-0.729,0-1.305,0.675-1.305,1.404H252.362z"/>
	<path d="M258.139,766.188h-0.747v-0.648h-0.018c-0.342,0.64-0.882,0.783-1.341,0.783c-1.603,0-2.071-1.503-2.071-2.629
		c0-1.323,0.711-2.35,1.963-2.35c0.855,0,1.215,0.531,1.404,0.802l0.018-0.063v-2.358h0.792V766.188z M256.05,765.639
		c0.504,0,1.287-0.333,1.287-1.585c0-0.765-0.126-1.998-1.269-1.998c-1.225,0-1.288,1.161-1.288,1.764
		C254.781,764.855,255.186,765.639,256.05,765.639z"/>
	<path d="M260.399,766.188h-0.937v-0.954h0.937V766.188z"/>
	<path d="M267.833,766.188h-0.874v-6.464h4.474v0.774h-3.601v1.98h3.169v0.774h-3.169V766.188z"/>
	<path d="M274.122,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C271.935,762.613,272.618,761.344,274.122,761.344z M274.122,765.629c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C272.754,764.459,272.987,765.629,274.122,765.629z"/>
	<path d="M278.155,766.188h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.324-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.298c-0.738,0-1.206,0.576-1.206,1.261V766.188z"/>
	<path d="M283.727,762.137v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H283.727z"/>
	<path d="M289.233,764.711c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H289.233z M288.478,763.46c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H288.478z"/>
	<path d="M293.313,763.063c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H293.313z"/>
	<path d="M298.853,766.188h-0.792v-3.079c0-0.639-0.18-1.053-0.937-1.053c-0.647,0-1.314,0.378-1.314,1.566v2.565h-0.792v-6.464
		h0.792v2.386h0.019c0.225-0.297,0.621-0.766,1.377-0.766c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M303.931,766.188h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M305.184,760.624v-0.9h0.792v0.9H305.184z M305.976,766.188h-0.792v-4.708h0.792V766.188z"/>
	<path d="M310.231,763.063c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H310.231z"/>
	<path d="M311.909,762.91c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H311.909z M314.808,763.784
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.738,0,1.458-0.477,1.458-1.107V763.784z"/>
	<path d="M317.811,766.188h-0.792v-6.464h0.792V766.188z"/>
	<path d="M324.174,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.937,0.081-0.937,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.863,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.026,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.748c0-0.378-0.261-0.504-0.937-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.854-1.504,1.782-1.504c1.684,0,1.801,1.242,1.801,1.477H324.174z"/>
	<path d="M329.122,766.188v-0.685l-0.019-0.018c-0.333,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.53-0.333-1.53-1.458v-3.385h0.792
		v3.124c0,0.773,0.396,1.035,0.909,1.035c0.999,0,1.297-0.883,1.297-1.566v-2.593h0.792v4.708H329.122z"/>
	<path d="M331.093,761.479h0.747v0.666h0.018c0.189-0.271,0.55-0.802,1.404-0.802c1.252,0,1.963,1.026,1.963,2.35
		c0,1.126-0.468,2.629-2.07,2.629c-0.631,0-1.045-0.297-1.252-0.63h-0.018v2.358h-0.792V761.479z M333.136,765.639
		c0.864,0,1.27-0.783,1.27-1.819c0-0.603-0.063-1.764-1.287-1.764c-1.144,0-1.27,1.233-1.27,1.998
		C331.849,765.306,332.632,765.639,333.136,765.639z"/>
	<path d="M336.221,761.479h0.747v0.666h0.018c0.189-0.271,0.55-0.802,1.404-0.802c1.252,0,1.963,1.026,1.963,2.35
		c0,1.126-0.468,2.629-2.07,2.629c-0.631,0-1.045-0.297-1.252-0.63h-0.018v2.358h-0.792V761.479z M338.264,765.639
		c0.864,0,1.27-0.783,1.27-1.819c0-0.603-0.063-1.764-1.287-1.764c-1.144,0-1.27,1.233-1.27,1.998
		C336.977,765.306,337.76,765.639,338.264,765.639z"/>
	<path d="M343.315,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C341.128,762.613,341.812,761.344,343.315,761.344z M343.315,765.629c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C341.947,764.459,342.181,765.629,343.315,765.629z"/>
	<path d="M347.349,766.188h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.324-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.298c-0.738,0-1.206,0.576-1.206,1.261V766.188z"/>
	<path d="M350.758,762.137v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H350.758z"/>
	<path d="M354.727,761.479h0.747v0.666h0.018c0.189-0.271,0.55-0.802,1.404-0.802c1.252,0,1.963,1.026,1.963,2.35
		c0,1.126-0.468,2.629-2.07,2.629c-0.631,0-1.045-0.297-1.252-0.63h-0.018v2.358h-0.792V761.479z M356.77,765.639
		c0.864,0,1.27-0.783,1.27-1.819c0-0.603-0.063-1.764-1.287-1.764c-1.144,0-1.27,1.233-1.27,1.998
		C355.482,765.306,356.266,765.639,356.77,765.639z"/>
	<path d="M360.697,766.188h-0.792v-6.464h0.792V766.188z"/>
	<path d="M365.962,764.711c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H365.962z M365.206,763.46c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H365.206z"/>
	<path d="M366.996,762.91c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H366.996z M369.895,763.784
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.737,0,1.458-0.477,1.458-1.107V763.784z"/>
	<path d="M374.779,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.937,0.081-0.937,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.863,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.026,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.748c0-0.378-0.261-0.504-0.937-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.854-1.504,1.782-1.504c1.684,0,1.801,1.242,1.801,1.477H374.779z"/>
	<path d="M380.67,764.711c-0.026,0.226-0.243,0.892-0.837,1.287c-0.216,0.145-0.522,0.324-1.278,0.324
		c-1.323,0-2.106-0.999-2.106-2.358c0-1.458,0.702-2.62,2.26-2.62c1.359,0,2.025,1.081,2.025,2.746h-3.439
		c0,0.981,0.459,1.549,1.369,1.549c0.747,0,1.188-0.576,1.215-0.928H380.67z M379.914,763.46c-0.045-0.729-0.352-1.404-1.314-1.404
		c-0.729,0-1.306,0.675-1.306,1.404H379.914z"/>
	<path d="M387.123,763.063c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H387.123z"/>
	<path d="M390.735,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C388.548,762.613,389.231,761.344,390.735,761.344z M390.735,765.629c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C389.367,764.459,389.601,765.629,390.735,765.629z"/>
	<path d="M397.755,766.188h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M399.912,762.137v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H399.912z"/>
	<path d="M401.529,762.91c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H401.529z M404.428,763.784
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.737,0,1.458-0.477,1.458-1.107V763.784z"/>
	<path d="M409.539,763.063c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H409.539z"/>
	<path d="M412.215,762.137v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H412.215z"/>
	<path d="M420.883,761.668c-0.243-1.134-1.188-1.341-1.81-1.341c-1.171,0-2.116,0.864-2.116,2.565c0,1.521,0.541,2.691,2.144,2.691
		c0.566,0,1.548-0.27,1.845-1.773h0.847c-0.36,2.44-2.341,2.548-2.827,2.548c-1.467,0-2.907-0.954-2.907-3.438
		c0-1.99,1.134-3.367,3.016-3.367c1.665,0,2.529,1.035,2.655,2.115H420.883z"/>
	<path d="M422.749,762.91c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H422.749z M425.647,763.784
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.738,0,1.458-0.477,1.458-1.107V763.784z"/>
	<path d="M430.835,766.188v-0.685l-0.019-0.018c-0.333,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.53-0.333-1.53-1.458v-3.385h0.792
		v3.124c0,0.773,0.396,1.035,0.909,1.035c0.999,0,1.297-0.883,1.297-1.566v-2.593h0.792v4.708H430.835z"/>
	<path d="M435.531,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.937,0.081-0.937,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.323,0.604,1.323,1.242c0,0.973-0.801,1.558-1.863,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.026,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.748c0-0.378-0.261-0.504-0.937-0.675l-0.918-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.854-1.504,1.782-1.504c1.684,0,1.801,1.242,1.801,1.477H435.531z"/>
	<path d="M437.432,762.91c0.035-1.125,0.783-1.566,1.944-1.566c0.378,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.071,0,0.171-0.019,0.261-0.036v0.576c-0.135,0.036-0.252,0.09-0.433,0.09c-0.702,0-0.81-0.36-0.837-0.72
		c-0.306,0.333-0.783,0.765-1.701,0.765c-0.864,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.106-0.766c-0.9,0-1.026,0.55-1.081,0.909H437.432z M440.33,763.784
		c-0.126,0.099-0.324,0.171-1.306,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.261,0.819,0.855,0.819
		c0.737,0,1.458-0.477,1.458-1.107V763.784z"/>
	<path d="M443.333,766.188h-0.792v-6.464h0.792V766.188z"/>
	<path d="M448.085,766.188h-0.873v-6.464h2.907c1.198,0,1.918,0.774,1.918,1.818c0,0.9-0.513,1.918-1.918,1.918h-2.034V766.188z
		 M448.085,762.713h1.737c0.783,0,1.314-0.288,1.314-1.162c0-0.819-0.558-1.08-1.278-1.08h-1.773V762.713z"/>
	<path d="M453.832,766.188h-0.792v-4.708h0.747v0.783h0.018c0.315-0.55,0.729-0.919,1.324-0.919c0.099,0,0.144,0.01,0.207,0.027
		v0.819h-0.298c-0.738,0-1.206,0.576-1.206,1.261V766.188z"/>
	<path d="M457.913,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C455.726,762.613,456.409,761.344,457.913,761.344z M457.913,765.629c1.134,0,1.368-1.17,1.368-1.8
		c0-0.631-0.234-1.801-1.368-1.801c-1.135,0-1.368,1.17-1.368,1.801C456.545,764.459,456.778,765.629,457.913,765.629z"/>
	<path d="M465.034,766.188h-0.747v-0.648h-0.018c-0.343,0.64-0.883,0.783-1.342,0.783c-1.603,0-2.07-1.503-2.07-2.629
		c0-1.323,0.711-2.35,1.962-2.35c0.855,0,1.216,0.531,1.404,0.802l0.019-0.063v-2.358h0.792V766.188z M462.945,765.639
		c0.505,0,1.288-0.333,1.288-1.585c0-0.765-0.126-1.998-1.27-1.998c-1.225,0-1.287,1.161-1.287,1.764
		C461.677,764.855,462.081,765.639,462.945,765.639z"/>
	<path d="M469.297,766.188v-0.685l-0.019-0.018c-0.333,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.53-0.333-1.53-1.458v-3.385h0.792
		v3.124c0,0.773,0.396,1.035,0.909,1.035c1,0,1.297-0.883,1.297-1.566v-2.593h0.792v4.708H469.297z"/>
	<path d="M474.299,763.063c-0.1-0.63-0.45-1.008-1.116-1.008c-0.981,0-1.297,0.936-1.297,1.773c0,0.81,0.198,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.126-1.116h0.765c-0.081,0.747-0.54,1.8-1.917,1.8c-1.323,0-2.106-0.999-2.106-2.358
		c0-1.458,0.702-2.62,2.259-2.62c1.233,0,1.702,0.9,1.765,1.72H474.299z"/>
	<path d="M476.975,762.137v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.288,0.027-0.513,0.063-0.594,0.063
		c-0.783,0-0.954-0.441-0.954-1.009v-3.105h-0.64v-0.657h0.64v-1.314h0.792v1.314h0.756v0.657H476.975z"/>
	<path d="M478.614,760.624v-0.9h0.792v0.9H478.614z M479.406,766.188h-0.792v-4.708h0.792V766.188z"/>
	<path d="M482.604,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C480.416,762.613,481.1,761.344,482.604,761.344z M482.604,765.629c1.134,0,1.368-1.17,1.368-1.8c0-0.631-0.234-1.801-1.368-1.801
		c-1.135,0-1.368,1.17-1.368,1.801C481.235,764.459,481.469,765.629,482.604,765.629z"/>
	<path d="M489.622,766.188h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M493.546,762.82c-0.009-0.297-0.117-0.792-1.116-0.792c-0.243,0-0.937,0.081-0.937,0.666c0,0.388,0.243,0.478,0.855,0.63
		l0.792,0.198c0.981,0.243,1.324,0.604,1.324,1.242c0,0.973-0.802,1.558-1.864,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.026,0.369,0.135,0.964,1.251,0.964c0.567,0,1.08-0.226,1.08-0.748c0-0.378-0.261-0.504-0.936-0.675l-0.919-0.225
		c-0.657-0.162-1.089-0.495-1.089-1.144c0-1.035,0.855-1.504,1.782-1.504c1.684,0,1.801,1.242,1.801,1.477H493.546z"/>
	<path d="M499.972,759.589c-0.765,1.44-1.225,2.305-1.225,4.465c0,1.485,0.514,2.584,1.233,3.997h-0.531
		c-0.936-1.404-1.548-2.529-1.548-4.213c0-1.585,0.549-2.863,1.521-4.249H499.972z"/>
	<path d="M501.002,760.624v-0.9h0.792v0.9H501.002z M501.794,766.188h-0.792v-4.708h0.792V766.188z"/>
	<path d="M506.879,766.188h-0.792v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.45,0-1.243,0.288-1.243,1.566v2.565h-0.792v-4.708
		h0.747v0.666h0.019c0.171-0.252,0.612-0.802,1.422-0.802c0.729,0,1.647,0.298,1.647,1.639V766.188z"/>
	<path d="M509.059,762.137v4.051h-0.792v-4.051h-0.648v-0.657h0.648v-0.811c0-0.711,0.45-1.035,1.225-1.035
		c0.116,0,0.233,0.009,0.359,0.018v0.711c-0.099-0.009-0.225-0.018-0.324-0.018c-0.342,0-0.468,0.171-0.468,0.549v0.586h0.792v0.657
		H509.059z"/>
	<path d="M512.47,761.344c1.503,0,2.188,1.27,2.188,2.485c0,1.215-0.685,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C510.282,762.613,510.966,761.344,512.47,761.344z M512.47,765.629c1.134,0,1.368-1.17,1.368-1.8c0-0.631-0.234-1.801-1.368-1.801
		c-1.135,0-1.368,1.17-1.368,1.801C511.102,764.459,511.335,765.629,512.47,765.629z"/>
	<path d="M520.958,761.182h0.611c-0.342,1.261-0.811,2.692-0.811,2.936c0,0.171,0.045,0.261,0.189,0.261
		c0.611,0,1.279-0.981,1.279-1.944c0-1.44-1.152-2.323-2.512-2.323c-1.604,0-2.738,1.288-2.738,2.881c0,1.594,1.225,2.81,2.801,2.81
		c0.838,0,1.674-0.396,2.188-1.026h0.604c-0.586,0.981-1.656,1.584-2.809,1.584c-1.928,0-3.439-1.503-3.439-3.421
		c0-1.882,1.539-3.385,3.395-3.385c1.664,0,3.096,1.17,3.096,2.809c0,1.63-1.35,2.602-2.15,2.602c-0.307,0-0.559-0.18-0.576-0.567
		l-0.02,0.01c-0.26,0.278-0.656,0.558-1.08,0.558c-0.773,0-1.322-0.657-1.322-1.44c0-1.215,0.828-2.484,2.078-2.484
		c0.434,0,0.803,0.198,1.035,0.702L520.958,761.182z M519.76,761.687c-0.803,0-1.359,1.008-1.359,1.755
		c0,0.514,0.305,0.873,0.756,0.873c0.766,0,1.342-1.08,1.342-1.8C520.499,762.073,520.155,761.687,519.76,761.687z"/>
	<path d="M527.706,763.063c-0.1-0.63-0.451-1.008-1.117-1.008c-0.98,0-1.297,0.936-1.297,1.773c0,0.81,0.199,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.127-1.116h0.764c-0.08,0.747-0.539,1.8-1.916,1.8c-1.324,0-2.107-0.999-2.107-2.358
		c0-1.458,0.703-2.62,2.26-2.62c1.232,0,1.701,0.9,1.764,1.72H527.706z"/>
	<path d="M529.383,762.91c0.035-1.125,0.783-1.566,1.943-1.566c0.379,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.072,0,0.172-0.019,0.262-0.036v0.576c-0.135,0.036-0.252,0.09-0.434,0.09c-0.701,0-0.809-0.36-0.836-0.72
		c-0.307,0.333-0.783,0.765-1.701,0.765c-0.865,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.107-0.766c-0.9,0-1.025,0.55-1.08,0.909H529.383z M532.282,763.784
		c-0.127,0.099-0.324,0.171-1.307,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.262,0.819,0.855,0.819
		c0.738,0,1.459-0.477,1.459-1.107V763.784z"/>
	<path d="M537.469,766.188v-0.685l-0.02-0.018c-0.332,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.529-0.333-1.529-1.458v-3.385h0.791
		v3.124c0,0.773,0.396,1.035,0.91,1.035c1,0,1.297-0.883,1.297-1.566v-2.593h0.791v4.708H537.469z"/>
	<path d="M542.165,762.82c-0.01-0.297-0.117-0.792-1.117-0.792c-0.242,0-0.936,0.081-0.936,0.666c0,0.388,0.242,0.478,0.855,0.63
		l0.791,0.198c0.982,0.243,1.324,0.604,1.324,1.242c0,0.973-0.801,1.558-1.863,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.025,0.369,0.135,0.964,1.25,0.964c0.568,0,1.08-0.226,1.08-0.748c0-0.378-0.26-0.504-0.936-0.675l-0.918-0.225
		c-0.658-0.162-1.09-0.495-1.09-1.144c0-1.035,0.855-1.504,1.783-1.504c1.684,0,1.801,1.242,1.801,1.477H542.165z"/>
	<path d="M544.065,762.91c0.035-1.125,0.783-1.566,1.945-1.566c0.377,0,1.746,0.108,1.746,1.314v2.71c0,0.198,0.1,0.279,0.262,0.279
		c0.07,0,0.17-0.019,0.26-0.036v0.576c-0.135,0.036-0.252,0.09-0.432,0.09c-0.703,0-0.811-0.36-0.838-0.72
		c-0.305,0.333-0.783,0.765-1.701,0.765c-0.863,0-1.477-0.549-1.477-1.359c0-0.396,0.117-1.314,1.432-1.477l1.305-0.162
		c0.189-0.018,0.414-0.09,0.414-0.558c0-0.495-0.359-0.766-1.105-0.766c-0.9,0-1.027,0.55-1.082,0.909H544.065z M546.963,763.784
		c-0.125,0.099-0.324,0.171-1.305,0.297c-0.387,0.054-1.008,0.171-1.008,0.765c0,0.514,0.26,0.819,0.855,0.819
		c0.738,0,1.457-0.477,1.457-1.107V763.784z"/>
	<path d="M549.967,766.188h-0.793v-6.464h0.793V766.188z"/>
	<path d="M551.184,761.479h0.746v0.666h0.018c0.189-0.271,0.551-0.802,1.404-0.802c1.252,0,1.963,1.026,1.963,2.35
		c0,1.126-0.467,2.629-2.07,2.629c-0.631,0-1.045-0.297-1.252-0.63h-0.018v2.358h-0.791V761.479z M553.227,765.639
		c0.863,0,1.27-0.783,1.27-1.819c0-0.603-0.063-1.764-1.287-1.764c-1.145,0-1.27,1.233-1.27,1.998
		C551.94,765.306,552.723,765.639,553.227,765.639z"/>
	<path d="M557.194,766.188h-0.793v-4.708h0.748v0.783h0.018c0.314-0.55,0.729-0.919,1.324-0.919c0.098,0,0.143,0.01,0.207,0.027
		v0.819h-0.299c-0.738,0-1.205,0.576-1.205,1.261V766.188z"/>
	<path d="M561.274,761.344c1.504,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C559.086,762.613,559.77,761.344,561.274,761.344z M561.274,765.629c1.135,0,1.369-1.17,1.369-1.8c0-0.631-0.234-1.801-1.369-1.801
		s-1.367,1.17-1.367,1.801C559.907,764.459,560.139,765.629,561.274,765.629z"/>
	<path d="M568.395,766.188h-0.746v-0.648h-0.018c-0.344,0.64-0.883,0.783-1.342,0.783c-1.604,0-2.07-1.503-2.07-2.629
		c0-1.323,0.711-2.35,1.961-2.35c0.855,0,1.217,0.531,1.404,0.802l0.02-0.063v-2.358h0.791V766.188z M566.307,765.639
		c0.504,0,1.287-0.333,1.287-1.585c0-0.765-0.125-1.998-1.27-1.998c-1.225,0-1.287,1.161-1.287,1.764
		C565.038,764.855,565.442,765.639,566.307,765.639z"/>
	<path d="M572.659,766.188v-0.685l-0.02-0.018c-0.332,0.549-0.738,0.837-1.494,0.837c-0.693,0-1.529-0.333-1.529-1.458v-3.385h0.791
		v3.124c0,0.773,0.396,1.035,0.91,1.035c1,0,1.297-0.883,1.297-1.566v-2.593h0.791v4.708H572.659z"/>
	<path d="M577.659,763.063c-0.1-0.63-0.449-1.008-1.115-1.008c-0.982,0-1.297,0.936-1.297,1.773c0,0.81,0.197,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.125-1.116h0.766c-0.082,0.747-0.541,1.8-1.918,1.8c-1.322,0-2.105-0.999-2.105-2.358
		c0-1.458,0.701-2.62,2.258-2.62c1.234,0,1.703,0.9,1.766,1.72H577.659z"/>
	<path d="M580.336,762.137v3.061c0,0.369,0.314,0.369,0.477,0.369h0.279v0.621c-0.289,0.027-0.514,0.063-0.594,0.063
		c-0.783,0-0.955-0.441-0.955-1.009v-3.105h-0.639v-0.657h0.639v-1.314h0.793v1.314h0.756v0.657H580.336z"/>
	<path d="M581.975,760.624v-0.9h0.793v0.9H581.975z M582.768,766.188h-0.793v-4.708h0.793V766.188z"/>
	<path d="M585.965,761.344c1.502,0,2.188,1.27,2.188,2.485c0,1.215-0.686,2.484-2.188,2.484c-1.504,0-2.188-1.27-2.188-2.484
		C583.778,762.613,584.461,761.344,585.965,761.344z M585.965,765.629c1.133,0,1.367-1.17,1.367-1.8
		c0-0.631-0.234-1.801-1.367-1.801c-1.135,0-1.369,1.17-1.369,1.801C584.596,764.459,584.831,765.629,585.965,765.629z"/>
	<path d="M592.983,766.188h-0.791v-2.898c0-0.819-0.234-1.233-1.008-1.233c-0.451,0-1.244,0.288-1.244,1.566v2.565h-0.791v-4.708
		h0.746v0.666h0.02c0.17-0.252,0.611-0.802,1.422-0.802c0.729,0,1.646,0.298,1.646,1.639V766.188z"/>
	<path d="M596.907,762.82c-0.008-0.297-0.117-0.792-1.115-0.792c-0.244,0-0.938,0.081-0.938,0.666c0,0.388,0.244,0.478,0.855,0.63
		l0.793,0.198c0.98,0.243,1.324,0.604,1.324,1.242c0,0.973-0.803,1.558-1.865,1.558c-1.863,0-1.998-1.08-2.025-1.647h0.766
		c0.027,0.369,0.135,0.964,1.252,0.964c0.566,0,1.08-0.226,1.08-0.748c0-0.378-0.262-0.504-0.936-0.675l-0.92-0.225
		c-0.656-0.162-1.088-0.495-1.088-1.144c0-1.035,0.855-1.504,1.781-1.504c1.684,0,1.801,1.242,1.801,1.477H596.907z"/>
	<path d="M599.891,766.188h-0.938v-0.954h0.938V766.188z"/>
	<path d="M604.151,763.063c-0.1-0.63-0.451-1.008-1.117-1.008c-0.98,0-1.297,0.936-1.297,1.773c0,0.81,0.199,1.81,1.287,1.81
		c0.531,0,0.99-0.396,1.127-1.116h0.764c-0.08,0.747-0.539,1.8-1.916,1.8c-1.324,0-2.107-0.999-2.107-2.358
		c0-1.458,0.703-2.62,2.26-2.62c1.232,0,1.701,0.9,1.764,1.72H604.151z"/>
	<path d="M607.762,761.344c1.504,0,2.188,1.27,2.188,2.485c0,1.215-0.684,2.484-2.188,2.484s-2.188-1.27-2.188-2.484
		C605.575,762.613,606.258,761.344,607.762,761.344z M607.762,765.629c1.135,0,1.369-1.17,1.369-1.8
		c0-0.631-0.234-1.801-1.369-1.801s-1.367,1.17-1.367,1.801C606.395,764.459,606.627,765.629,607.762,765.629z"/>
	<path d="M610.948,761.479h0.746v0.666h0.02c0.17-0.252,0.576-0.802,1.422-0.802s1.117,0.514,1.252,0.766
		c0.395-0.441,0.711-0.766,1.439-0.766c0.504,0,1.459,0.262,1.459,1.585v3.259h-0.793v-3.043c0-0.648-0.197-1.089-0.873-1.089
		c-0.666,0-1.107,0.63-1.107,1.26v2.872h-0.791v-3.259c0-0.396-0.154-0.873-0.738-0.873c-0.451,0-1.244,0.288-1.244,1.566v2.565
		h-0.791V761.479z"/>
	<path d="M618.317,768.051c0.766-1.44,1.225-2.305,1.225-4.465c0-1.485-0.514-2.584-1.234-3.997h0.531
		c0.938,1.404,1.549,2.529,1.549,4.213c0,1.585-0.549,2.863-1.521,4.249H618.317z"/>
	<path d="M622.926,766.188h-0.938v-0.954h0.938V766.188z"/>
</g>
<g>
	<path fill="#211E1E" d="M131.947,450.078c-0.147,4.03-0.83,8.625-5.768,8.625c-5.619,0-6.377-6.159-6.377-13.783
		c0-7.624,0.759-13.805,6.377-13.805c4.204,0,5.4,3.202,5.499,6.939h4.228c-0.17-6.475-2.981-10.24-9.727-10.24
		c-9.089,0-10.604,8.797-10.604,17.105s1.246,17.105,10.604,17.105c7.185,0,9.629-5.718,10.021-11.947H131.947z"/>
	<path fill="#211E1E" d="M146.346,431.775h4.839c3.786,0,5.938,2.028,5.938,5.252c0,6.378-4.375,6.378-6.771,6.378h-4.006V431.775z
		 M142.29,461.39h4.056v-14.662h5.474c2.077,0,4.571,0.268,4.887,4.791l0.292,5.129c0.099,1.613,0.417,4.059,1.1,4.741h4.89
		c-0.955-0.609-1.564-1.515-1.762-5.156l-0.267-5.646c-0.173-2.81-1.786-5.156-4.669-5.473v-0.098
		c3.836-0.927,5.083-4.715,5.083-8.309c0-5.203-3.275-8.258-8.429-8.258H142.29V461.39z"/>
	<path fill="#211E1E" d="M178.682,431.115c5.619,0,6.353,6.181,6.353,13.805c0,7.625-0.734,13.783-6.353,13.783
		c-5.621,0-6.377-6.159-6.377-13.783C172.305,437.296,173.062,431.115,178.682,431.115z M178.682,427.814
		c-9.091,0-10.606,8.797-10.606,17.105s1.516,17.105,10.606,17.105c9.09,0,10.605-8.797,10.605-17.105
		S187.772,427.814,178.682,427.814z"/>
	<path fill="#211E1E" d="M214.536,436.979c-0.097-6.037-2.956-9.166-9.237-9.166c-8.357,0-9.627,6.038-9.627,9.066
		c0,12.098,15.541,6.697,15.541,16.03c0,3.496-2.491,5.792-5.718,5.792c-5.961,0-6.182-3.959-6.182-7.647h-4.057
		c0,7.012,2.249,10.97,9.948,10.97c4.666,0,10.238-2.15,10.238-9.774c0-12.047-15.542-6.5-15.542-15.784
		c0-3.496,2.028-5.352,5.448-5.352c3.886,0,5.133,2.419,5.133,5.865H214.536z"/>
	<path fill="#211E1E" d="M239.914,436.979c-0.099-6.037-2.958-9.166-9.236-9.166c-8.358,0-9.63,6.038-9.63,9.066
		c0,12.098,15.543,6.697,15.543,16.03c0,3.496-2.494,5.792-5.718,5.792c-5.963,0-6.182-3.959-6.182-7.647h-4.058
		c0,7.012,2.248,10.97,9.945,10.97c4.668,0,10.239-2.15,10.239-9.774c0-12.047-15.541-6.5-15.541-15.784
		c0-3.496,2.027-5.352,5.449-5.352c3.884,0,5.131,2.419,5.131,5.865H239.914z"/>
	<path fill="#211E1E" d="M251.307,431.775h4.839c3.787,0,5.937,2.028,5.937,5.252c0,6.378-4.374,6.378-6.768,6.378h-4.009V431.775z
		 M247.25,461.39h4.057v-14.662h5.474c2.077,0,4.57,0.268,4.888,4.791l0.294,5.129c0.097,1.613,0.414,4.059,1.098,4.741h4.887
		c-0.954-0.609-1.563-1.515-1.759-5.156l-0.268-5.646c-0.171-2.81-1.785-5.156-4.667-5.473v-0.098
		c3.836-0.927,5.083-4.715,5.083-8.309c0-5.203-3.275-8.258-8.431-8.258H247.25V461.39z"/>
	<path fill="#211E1E" d="M283.643,431.115c5.622,0,6.353,6.181,6.353,13.805c0,7.625-0.731,13.783-6.353,13.783
		c-5.62,0-6.378-6.159-6.378-13.783C277.264,437.296,278.022,431.115,283.643,431.115z M283.643,427.814
		c-9.091,0-10.605,8.797-10.605,17.105s1.514,17.105,10.605,17.105c9.09,0,10.605-8.797,10.605-17.105
		S292.732,427.814,283.643,427.814z"/>
	<path fill="#211E1E" d="M310.505,433.117h0.073l4.203,15.908h-8.846L310.505,433.117z M298.407,461.39h4.253l2.444-9.041h10.507
		l2.345,9.041h4.252l-8.846-32.938h-5.45L298.407,461.39z"/>
	<path fill="#211E1E" d="M327.231,461.39h9.236c9.042,0,11.12-8.162,11.12-16.471c0-9.089-1.295-16.468-11.534-16.468h-8.822V461.39
		z M331.311,431.775h4.229c7.378,0,7.794,6.816,7.794,13.194c0,5.914-0.782,13.098-7.794,13.098h-4.229V431.775z"/>
	<path fill="#211E1E" d="M372.829,436.979c-0.097-6.037-2.955-9.166-9.236-9.166c-8.358,0-9.628,6.038-9.628,9.066
		c0,12.098,15.541,6.697,15.541,16.03c0,3.496-2.491,5.792-5.717,5.792c-5.962,0-6.182-3.959-6.182-7.647h-4.057
		c0,7.012,2.249,10.97,9.946,10.97c4.667,0,10.237-2.15,10.237-9.774c0-12.047-15.541-6.5-15.541-15.784
		c0-3.496,2.028-5.352,5.448-5.352c3.887,0,5.134,2.419,5.134,5.865H372.829z"/>
	<path fill="#211E1E" d="M403.693,431.115c5.619,0,6.353,6.181,6.353,13.805c0,7.625-0.733,13.783-6.353,13.783
		c-5.621,0-6.378-6.159-6.378-13.783C397.315,437.296,398.072,431.115,403.693,431.115z M403.693,427.814
		c-9.091,0-10.607,8.797-10.607,17.105s1.516,17.105,10.607,17.105c9.09,0,10.605-8.797,10.605-17.105
		S412.783,427.814,403.693,427.814z"/>
	<polygon fill="#211E1E" points="421.506,461.39 425.563,461.39 425.563,445.972 437.781,445.972 437.781,442.378 425.563,442.378 
		425.563,432.043 438.515,432.043 438.515,428.452 421.506,428.452 	"/>
	<path fill="#EE1D52" d="M475.712,436.979c-0.097-6.037-2.956-9.166-9.236-9.166c-8.358,0-9.628,6.038-9.628,9.066
		c0,12.098,15.542,6.697,15.542,16.03c0,3.496-2.493,5.792-5.719,5.792c-5.962,0-6.182-3.959-6.182-7.647h-4.057
		c0,7.012,2.249,10.97,9.946,10.97c4.667,0,10.239-2.15,10.239-9.774c0-12.047-15.542-6.5-15.542-15.784
		c0-3.496,2.029-5.352,5.45-5.352c3.885,0,5.132,2.419,5.132,5.865H475.712z"/>
	<path fill="#211E1E" d="M487.113,431.775h4.569c4.007,0,5.718,2.345,5.718,5.938c0,4.396-2.492,6.425-5.449,6.425h-4.838V431.775z
		 M483.056,461.39h4.057v-13.928h5.009c5.596,0,9.505-3.421,9.505-9.554c0-9.456-7.331-9.456-10.19-9.456h-8.381V461.39z"/>
	<polygon fill="#211E1E" points="507.554,461.39 525.124,461.39 525.124,457.798 511.611,457.798 511.611,445.972 523.829,445.972 
		523.829,442.378 511.611,442.378 511.611,432.043 524.561,432.043 524.561,428.452 507.554,428.452 	"/>
	<polygon fill="#211E1E" points="531.22,461.39 548.79,461.39 548.79,457.798 535.276,457.798 535.276,445.972 547.495,445.972 
		547.495,442.378 535.276,442.378 535.276,432.043 548.228,432.043 548.228,428.452 531.22,428.452 	"/>
	<path fill="#211E1E" d="M570.795,450.078c-0.147,4.03-0.831,8.625-5.768,8.625c-5.621,0-6.378-6.159-6.378-13.783
		c0-7.624,0.757-13.805,6.378-13.805c4.203,0,5.4,3.202,5.498,6.939h4.229c-0.172-6.475-2.981-10.24-9.727-10.24
		c-9.09,0-10.605,8.797-10.605,17.105s1.246,17.105,10.605,17.105c7.185,0,9.629-5.718,10.019-11.947H570.795z"/>
	<polygon fill="#211E1E" points="600.546,428.452 596.489,428.452 596.489,441.914 585.201,441.914 585.201,428.452 
		581.144,428.452 581.144,461.39 585.201,461.39 585.201,445.533 596.489,445.533 596.489,461.39 600.546,461.39 	"/>
	<path fill="#211E1E" d="M628.936,433.117h0.072l4.204,15.908h-8.847L628.936,433.117z M616.84,461.39h4.251l2.444-9.041h10.507
		l2.347,9.041h4.251l-8.846-32.938h-5.449L616.84,461.39z"/>
	<polygon fill="#211E1E" points="645.27,461.39 649.328,461.39 649.328,434.73 649.401,434.73 659.982,461.39 665.553,461.39 
		665.553,428.452 661.498,428.452 661.498,455.942 661.399,455.942 650.525,428.452 645.27,428.452 	"/>
	<path fill="#211E1E" d="M673.624,461.39h9.237c9.042,0,11.118-8.162,11.118-16.471c0-9.089-1.294-16.468-11.534-16.468h-8.821
		V461.39z M677.705,431.775h4.228c7.379,0,7.796,6.816,7.796,13.194c0,5.914-0.782,13.098-7.796,13.098h-4.228V431.775z"/>
	<polygon fill="#EE1D52" points="714.567,461.39 731.991,461.39 731.991,457.798 718.624,457.798 718.624,428.452 714.567,428.452 	
		"/>
	<path fill="#211E1E" d="M746.402,433.117h0.074l4.201,15.908h-8.846L746.402,433.117z M734.305,461.39h4.252l2.443-9.041h10.509
		l2.345,9.041h4.253l-8.846-32.938h-5.451L734.305,461.39z"/>
	<polygon fill="#211E1E" points="762.738,461.39 766.794,461.39 766.794,434.73 766.867,434.73 777.449,461.39 783.019,461.39 
		783.019,428.452 778.962,428.452 778.962,455.942 778.865,455.942 767.992,428.452 762.738,428.452 	"/>
	<path fill="#211E1E" d="M810.56,437.588c-0.05-1.708-0.27-9.774-9.727-9.774c-9.09,0-10.604,8.797-10.604,17.105
		c0,8.847,1.515,17.105,10.091,17.105c3.593,0,6.231-2.294,7.111-5.105h0.098v4.47h3.031v-17.104h-10.191v3.298h6.501
		c0,4.399-0.416,11.119-6.036,11.119s-6.379-6.159-6.379-13.783c0-7.624,0.759-13.805,6.379-13.805c4.008,0,5.304,3.005,5.498,6.474
		H810.56z"/>
	<path fill="#211E1E" d="M817.383,451.616c0,6.768,3.055,10.409,9.652,10.409c6.451,0,9.677-4.105,9.677-10.409v-23.164h-4.057
		v23.164c0,4.619-1.294,7.087-5.62,7.087c-4.008,0-5.57-2.249-5.57-7.087v-23.164h-4.082V451.616z"/>
	<path fill="#211E1E" d="M853.066,433.117h0.074l4.203,15.908h-8.846L853.066,433.117z M840.971,461.39h4.251l2.443-9.041h10.508
		l2.347,9.041h4.251l-8.846-32.938h-5.449L840.971,461.39z"/>
	<path fill="#211E1E" d="M889.269,437.588c-0.049-1.708-0.268-9.774-9.726-9.774c-9.09,0-10.605,8.797-10.605,17.105
		c0,8.847,1.516,17.105,10.093,17.105c3.592,0,6.23-2.294,7.111-5.105h0.097v4.47h3.03v-17.104H879.08v3.298h6.5
		c0,4.399-0.415,11.119-6.036,11.119c-5.62,0-6.378-6.159-6.378-13.783c0-7.624,0.758-13.805,6.378-13.805
		c4.008,0,5.304,3.005,5.499,6.474H889.269z"/>
	<polygon fill="#211E1E" points="896.485,461.39 914.054,461.39 914.054,457.798 900.541,457.798 900.541,445.972 912.758,445.972 
		912.758,442.378 900.541,442.378 900.541,432.043 913.492,432.043 913.492,428.452 896.485,428.452 	"/>
	<g>
		<defs>
			<rect id="SVGID_3_" x="93.099" y="201.251" width="223.643" height="198.62"/>
		</defs>
		<clipPath id="SVGID_4_">
			<use xlink:href="#SVGID_3_"  overflow="visible"/>
		</clipPath>
		<g clip-path="url(#SVGID_4_)">
			<defs>
				<path id="SVGID_5_" d="M93.099,201.251v197.771H240.33c-0.282-16.691,1.654-23.734,1.654-23.734s1.429-9.211,6.171-11.113
					c4.74-1.894,3.786-1.417,18.479,0c14.692,1.424,27.962-2.364,28.908-7.105c0.947-4.732-1.423-17.521-0.946-19.891
					c0.472-2.369,8.528-8.051,8.528-10.422c0-2.364-3.317-5.211-3.317-6.158c0-0.947,5.211-5.682,5.211-8.052
					c0-2.363-4.74-8.052-3.788-10.416c0.941-2.371,12.316-6.165,14.215-10.422c1.894-4.264,1.417-7.582-2.37-11.846
					c-3.794-4.265-18.957-23.208-18.957-28.89c0-5.688,4.74-14.685,3.788-20.368c-0.946-5.687-1.417-15.157-6.157-23.208
					c-1.333-2.258-2.547-4.331-3.611-6.146H93.099z"/>
			</defs>
			<clipPath id="SVGID_6_">
				<use xlink:href="#SVGID_5_"  overflow="visible"/>
			</clipPath>
			<g clip-path="url(#SVGID_6_)">
				<defs>
					<rect id="SVGID_7_" x="93.099" y="201.251" width="378.472" height="261.178"/>
				</defs>
				<clipPath id="SVGID_8_">
					<use xlink:href="#SVGID_7_"  overflow="visible"/>
				</clipPath>
				<g clip-path="url(#SVGID_8_)">
					<defs>
						<rect id="SVGID_9_" x="93.099" y="201.251" width="377.256" height="261.178"/>
					</defs>
					<clipPath id="SVGID_10_">
						<use xlink:href="#SVGID_9_"  overflow="visible"/>
					</clipPath>
					<g clip-path="url(#SVGID_10_)">
						<defs>
							<rect id="SVGID_11_" y="157.406" width="470.352" height="305.348"/>
						</defs>
						<clipPath id="SVGID_12_">
							<use xlink:href="#SVGID_11_"  overflow="visible"/>
						</clipPath>
						<g transform="matrix(1 0 0 1 0 8.895731e-006)" clip-path="url(#SVGID_12_)">
							
								<image overflow="visible" width="1772" height="1181" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEBDwEPAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAAEvDAAD8RAACFKf/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIBIAG8QMBIgACEQEDEQH/
xADCAAACAwEBAQAAAAAAAAAAAAAAAQIDBAUGBwEBAQAAAAAAAAAAAAAAAAAAAAEQAAICAgIBAQYE
BgEFAQADAAECAAMRBBIFIRMQMSIUFQYgMEBBUDIjMzQWYHBCJDUHJZCgwBEAAQMCBAMEBwUGBAMG
BQQDAQARAiExQVESA2FxIhCBkROhscHRMpIEIEJSIzNg8OFicoIwUPEUQLKzcKLSczR0wuJDU2OD
kyRUBaNkEgEAAAAAAAAAAAAAAAAAAADA/9oADAMBAAIRAxEAAAD0+OfNreYA3GETcYQ3GENxhRvW
ENxhF3GFG8wCbzAG9YQ3GFG8wBvMAbzAG8wJd5gDeYA3mAN5zw6Bzw3mAN5zw6Bzw6Bzw6C54dA5
4dA56Oic4Ogc8Ogc4Oic4Ogc9HROcHROcjpLnB0TnB0Tmh0jmh0Tmh0jmh0jmh0lzQ6RzQ6RzA6R
zQ6RzA6ZzA6ZzA6RzA6ZzA6ZzA6Zyw6i5gdQ5YdM5gdM5gdM5gdM5bOmctnTOYHUXMDqHLDqHLZ0
zmB1Dls6ZzA6b5gdM5odM5jOkc1nSOazpHNZ0XzWdF81nROeHROeHROeHROeze8AnQMDN5hDeYWb
jCzc8LNrws2vEzY8bNhja7DIGwyM1mUNZlZpeVml5WaTMzSZyNJnDQ84aCgNBQF7zsvKGXOgLykL
nSFxSy4pC4pZbdkvNoB53ndHnUAIhghoAAABMEMEACYIAAAABMEAAAACGCAAAAFQ0ACCYIaAAEwQ
CgAhpAAQxUAACIaBMVDQAAmCAAAQ0AAhoABDQAAAIAABDQAAmACAAAAAAAABMAAAAAAABgAAMAAG
AMAGmDTBpg0waYNMYAwBtCNpjAG0xgDaYMBtMYMGmNpg0xtMGmMGo0wYDAGDAAGMABgAxgADAGnA
BTABhABQwg0Z9BsAPPc3pc6kMRAxAAAIYIAAATBDBAAmCGgAAAQwQ0AAAAmCAABUMEAgAIaUAABE
NAAqGCAQTFQAJghpABUNAACYIAABAAAIaAAQ0AAJggAABNAAAAJggAAAAAAAAAAAAAYAAADABpgA
MAGANMGmDTBpjAGDABG0waY2mDAbTBpjBjAG0wYDaYMBtMYA2mo0xgDAG0wBg0wYADBpgAMCGADT
oBwAAMAGF9F5sAPPc7o8+kAgAAAhghoABDBDQAAAIaAAABDQAAAJgIYIAABAAAAAhoAATBAKAAmI
gFEwQCACoAABDQACGgABMEACYIaAAQ0AAgAAENAAAAgAAAABMQAAAmAAAAAA0DAAAYAADEwBgAMA
YANMGmDAYA2mDTQaYwBtMYA2mMGDTGDBpjAG0xtMGmNpqNMYA2mg01GAMAYDABgDAGmDTgApgQMA
ApgQMAvovNgB5/ndHn0gEEwQwQAAAmCGCGgTBDQAAAIYIaBMEMEAAAJggAABMENAAIaUAQTBDQAK
JiIBRMENIJghpQAQ0gAqGgABMEAAAgATBAAmCABMEAAAACGgAAATAEADBAxDQAwAAAABgAAMAGmA
AwBgDAGAMAYMGmDTQaYyFJqeVmp5Q1PNpG4yBqQNMGmNpjBg0xgDaajAbTBpgwGAMGAAwBgwAGBA
wBhQMgAoYQAwAHdTcbADz/P6HPoAQTATBAAmCGCAAAABDBACGCAAAQwQ0AAhggAAENAAAAhoAAAV
AIACGCAUTEQAAKhoAEQxUAIaQAUTQACGgAAAQAJggAAEAAAhoAAAEMEAAAAAAAAAAAAAAAwAAGgY
AwAaYADAGAMAbTBpg4Vl9JaZrNAUzsaMGDTI0agx33VFzxVHTcZDaYMBtMYA2mDBW0xgDAG0wBg0
waYNMGmDAAcAOgCGAAOgCGADAL6LzWAcDn9Dn0hiIAAAABMENAAIYIAAATBDBACGCGgABMENAAIa
AAEwQ0AAJghoABDSgCCYIBQBEMEAqGCARDSgAJpABUNAAIaAAEwQAJggATBAAAIAAAAEMEAAAAAA
AAAxAAwAAAAaYAwABpg0wAGAMGDhUXVzsIWAMGgDBpjaYMBtMYAZ9LMW2GM6LzaRgwaYwYNMbTVg
wBg0waYNMGAMAYDAGBDABhQDgAGADTAAYAX0XmsA4HP6PPpDSAAACGCAAAEwQwQ0CYIYIAABDBDQ
JghoAATBDBACGgABMENAACYIAEwQ0AAhoAFExEAomgAQTFQAhiIBRMEACcRgAmCGgTBAAmCAAAQw
QAAAAIYIaAaAYIYIYAAAAAMAGmAAwBiYNMGmEFElcmDAYCNpg0xgwaY2mDTGDG0DapK6dsC+fK3G
gGDAYNRgNpg0waYwBgwBgANpgDAHADoAhgA0wBgADAAAvpuNYBwef0MFIBAAEwQwQAJghoAAABME
MEAIaAAABDBAAmCGgABMEACaAAAATBAAAIaAAQ0oAgmCAUARDSiYIBBMVACGIgFEwjGi8kMEACaA
AEwQAJggAABMEAAAAAAAAAAAAAAAADAABgA0wAGAMAEQJzAYA2mDQjaYMBtMYA2mNpg0xgwaYwYc
/ohi34qV6gpINNWDBgMAYMABtMGANMGAwIYmDChpwAUwIGAADABphdTeagDg4Ohz6AEQwQwQMQAh
ghggATBDBAAAIYIaBMENAAIYIaBMENAACYIaEMEAAAgYgBDBACGKgEEwQAACGlEwQ0CjIBoExEAp
GSAaBMEACYIAE0AAJghoAAAEMEAAAAAAAAAADEwAAGAAAwBgAMACm+MwBgDBpg00GAwYNMbTBgNp
gxg0xgwYDaYU3s5HUOUvZpqkT0pjBgDBpgwGAMAYMAYNOAGAAwAYA0wABgAMAYr6bjUAcLBvwUJi
IaAAAATBDBDQACGCGCABMENAAIYIYJMEMEAJgIaBMENAAIYIAQwQAJggAAEAACoaQTBDQACGgAXD
tGIAQ0ACV7smpcqYIAEwQAJggATBAAAIaAaAAABDBAxAANAMEMAAAYAADAAYAMAqsY2mg01GmDTQ
YDBg0xtMGmMGMAbTGDBpjBgwG0xphytG7AvQfM6Y2mDTGAMGDTBpgwBpgwgYUNOAHQDgABgAAMAY
BdTcagDh4N+CgBEMEMEmCGCGgABMEMEAAAhghoQwQwQAJghoEwQwQAJghoEwQ0AAhgkwQ0AAhggB
DBACGCAENAAomEHGwiNAmCGg6HP6ZyxoEwQAhggATBAAAIaAAABDBDQAAAJgAAAAAAwTAABgAwAG
AAwBgAwGAMGg0waYwBtMJJg0xgxgDaYwYMBtMGMGmrKMBqz3aS2fE6ZpBgDBgDAYMAYA4AdAOAAG
AMAaYAAwBpgAF1VpqAOHh3YaQxENAAAAhghggBDBAAAIYIYIAABDBDCIwQAACGgABNAACYIaEMEN
AmCGERghoEwQAACGgTFQCIYIBatLRUNCGgI5DZ1fMekOaSiiGlEwQAJggAARDSgAhoAAAAAQwQ0A
AAxDBMAAAGJgDTAAGAAwaYNMGmDTQYAwGDBpjBg0xtMGMGmMGDAbTGDBpjrsa4btDBjI8jshg6PK
rO0JjaYNMGANOGADChhA0waYADABpgAMABgW1XGkA4mDfhpDEQ0A0IYIYIAABDBDBACGCGgAEMEN
AAAAhhEYIaAAEwQAJghoEwQ0CYIAEwQAhggBDBACGCAENAmGiLzKR5OM7OPnEWwiyz0XnenXWz5d
YgBDSCYqGgTBDQACGCAAAQ0AAAAAAAAAAAAAwAAGJgAMAAYAwGAMAYAwBg0GmMGDTG0xgDaYMYMB
tMGMGA2moxgDBgDGGLazgd3JxT1DovGDAGDTgadAOAAYAMAYAAMAGAAxMAupuNIBxMO/DSGIhoAA
AEMEAAAhoAAABMENAACYIaAAQwQAAAmgAEMEACYIaBMEACYIaEMEAIYIAQ0CYIAQwQAh8xdXCqcJ
jEJjCJu1YtdPfztZemCABMRAKJggATBAAAIYIAAATBDBDBDBMAAAAABgAwAGJgDBBgowBgDBBgDA
bTBgNpjaYNSBpgxg0xtMGA2mDGo0xgwBgygks3QKlezzXZ1cA9E+X1AYQwBgDABpg0wBgADAGmAA
MAaYW1WmkA4uLbipDEQ0AAACGCGgTBDBDCIwQ0AAAAmCGCABMENAmCGgAEMEACYIaEMENCGCABME
pIQwQAhoEwQAhoACHnetxFSFEiKJIBMDo3U3VJug6qjIEwQ0ACIaUTBDQACGCAAATAQwQwQwQwQw
QwAAABgAMAAYADAGDTAGg0waYMBgwYDBgwG0wYwaY2mDGDTGDUYDBgwGCKc1Fxo1qQwYAzg29njH
afB7sNp0BTF5xOvVpHLGpcq2tWrjwO48+iAGAAMABgAO2q00AHGxbcVCYIYIBAAQwQ0AAhghoABD
BDBJghggAABMENAAIYIAQwQAJghgkwQ0CYIAQwQAhgkwQ0CYIaENBGXMXnZxQIABDaYIDpXW10hB
ds5vUIpggBDBDQhggATBDQACGCGCBiAAAAAAAYJgAAAwABgDBAGoDAGgDAGAMGMAYMAkmDGAMGMG
AwYMYNMYNRgNpg0wy68ZRtWgbAGAwBg44+b0PJrrPzfpIYViosprXn25I159FZHNrDi93LRXVAgY
ADAGAAW1WmgA42PbioTEQwQwQAhioYIaRDBDQKSEMENAACYIYRGCGCAAAQwQAhghoEwQAJghoQwQ
AJghoQwSYIYJMENCGFfmOrx1SHCGCYCGCC06kimmywp6HP1mpMRAKJgkwQ0AAhggATBDQAAAAAhg
hgmAAADEwAGJgDAAYAwARgwBgDBgDGAMGMGmMGDAGMGA2AMYMFYMGAwYNMSkwYwBg0wacMAGFcnl
+r5MT3eS9kPLqiTjJiGCbCOXahtMAYAwAAALa7DQAcfHtx0kwQwQwQCIYIYIaBMEMVDQJiIYIaEM
ENAACYIYIAQwQ0CYIaBMENAmCGCTBDBJghoEwQ0IYJMENChZyV5VYoEADQAA0h6KNZ0Kb6qthKBC
6qB11GaJMEMVJghgkwQwQAhghoAAAEMEMEMEMAAAAGAADAAYAwBomMAYAwGAwGDBjAGDGAwGMGAM
YMAY1GAwYMAYwBgwBgMGAMGEAOhhADOLl9JwD0Bw+6AMAAYADAGAMAAAAYFldpeAcjHsyUhiJMVD
BDBACGCGgAQTBDQACGCGgAEMEMIjBDBAAAIaAAQwQ0IYIAQwQAhghoQwSYIaBMEpIQ0VeX6vFWST
hDCLcRgBJRH0ed0TVVfGo1aayEohuv5/REmIhoEwQ0omCGgARDSgAAIJghioYiGCGAAACgMAEGmo
DABGADGAMAYMYDYmMGAMYAxgwBg0xgwYDaajAYMGAwBtMGAMBgwBwAwGADAKyyvi9iuB0b+AevOf
0IGADAGAADAAAYADCyuwvAOTj2ZKQwQwQAJghiIBRMEMEmCGCABMRDBDBJioYiABMEMEpIQwQ0IY
IaEMENAmCGhDBAAmCGhDBJghoVV3DXmQFACBpiYgAE1IOpy+wXUzjU3WxxsCrfnsNA0iGgTBDQhg
gFEwQ0AAJghggAAAAAAGCYADEMAGAAMEGMTGAA2mMGDTGDBpgwBjBgMGDAYMGCsGDAYMGAMYAwYA
xgDgBgwAGDAOfrwVp0VaoK7A8d6ezyh7J0XgwAAYAAAwAAGAWV2F4BysmvJQmCGCGgABMEMENCGC
GIkxUMEMEmCGCABMRDBACGCGKkxEMEAIYIaEMENCGCAEMEmCGgTBDQJoy+X28+WUUxhEakEW0CYA
Ifb4vfIODpOGgz2MBQ0miVF6IaBMVDSCYIBUMRDSiYIaABAAAFAAAQAAGoADTQAUYINNRpoNMGmM
AbTBgNpjBgDBpjaYNSBpqMYMBtMGMGmDAYMGAMYA4AYAwBgwDNGszdLh+hrkbdqhtMMmwPHeuxcI
9cJgwAGAAAAMAMBvs8v2zqAHLya8ohlJMEMEMEAIYIYIAQwQwSYIYIYIAQwQwSkhDBKSRDBDBJgh
ioBEMEmCGCAEMEpIQ0CYJSQhoWXV51eWMhMATAEDTATCLAu6/O3VMQSlBjcJkB1mvTh2gNIJghoE
0AAhggBDBAKACGCGCGCGAAAANNABRggDAAGAwBgDBg0xtMGA2mDTGDBgNpjBgxqMBtMGAwYMAYwa
YMAYQwYAwYBRfwi7FdvrRTfogYAwAES43ZDzHp/KdA7gMAAAGJgFZm8zdmqv0/l+0esAjl5deUQy
kNAACYIYIaEMEMEACYIYIaEMEMEACYIYIaRDFQwSYIYJSQhiJSQhipMRDBKSEMEmCGERoE0ZfJ9L
myoAGmIaCJISYSSYk2a+jz99QlIBTrCcJFtYg05bDcSSJMEACYIaENKJiIYIAAFQxEMVAxDEQwAA
AUYIAwAAYDAAYNMGMAYMBtMGA2mDAbTG0wY1GMGmMGDTGDBpgxgDBhAwGDAGAMp8t0yrexi2ikEC
xcE9Rk0cYu28j0teen0eIeg8tt9BGXo+H9MdI4uI9QvG1V7TJ5dHb52BF1dchdzjds9YBHMzasoA
UhghghoEwQwQAACGCGERghghoEwQwQ0CYIYIaEMENAmCGCTBDBAIhglJCGCTBDBJgkwWPZ5VcKCG
gGIBDEwENAANNHWvolUpRkWVyiSSmVWQmEWjoyo0IhoQwSYIAEwQ0CYIaEMEMEAACgADEQwAABiY
KNNAGAAwYAwaYwYNMYMAYwBtMbTBjBjUYDaYMYNMGMGmDAYOBp0NOBgMAYByeorCMyA+Lhpqnv23
GTs8/bHLfXzGnn9APGT9Lya5GLpXmMyaxyleZ5XVkarYFE1En2eJ2T14Ec3NqzCGqABDBDBDQJgh
ghoEwQwQ0AAhghghoQwQwQ0IYIYJMEMEpIQwQ0IYIaEMENIhgkwQwipI5/ld+CVMCJIBNDQAmgbQ
xIYw6yshVhGJa6Zk65sjZWyUHMlv5m4sGIhoQwiSQlIIjBDCIwQwiMEMEMEDEDEMVMEAYmAAwABj
AGAMGmMGDTBgNpgxg0xtMYNRjBpjBjAG0wYDBgwGBDBgDBgDTBgDAOB1OMdGO/QUIvIwsmAMAAyb
Kws5ecz8DrcatsK7iBbEqlNlcRj7fF7R60COdn05hDKQwQwSYIYIaAAQwQwQAhgiSENAAIYIaEME
MENCGCGCAEMENCGCUkIYIaEMEpIQwSYi5vQ8iuMZCGEZIAQNxRKJMg7bTKb7Tl2dW+q6/eyj5+vY
cyuJHZnITSJSrYSIluimJ0xiJMEpIQwSYIYRGCGCUgiMEMEmCGCGCYAAAwTAAYmwAYAwBgwBjAYD
GDTBjBgNpgxqMYNMGMGmMGDTGDBpwNMYA2mAMGmDTAGcTpWXGbTGQ4yABgADAPN9rylTcETqJHH0
3886d0bAo9HUcNd3zQd/P6GOmAc/PpziGEWAhlIYJMEMEMEMEpIQwTA1ZRQhlIYIAQwQwSkhDBDB
JghglIIjBDQJghhEYIYRJIQ0KurzBdzSUKN1hlj0Gc+XR0nIs6sq5l24MaogdK6nWQlIFOLT2N+f
RKhhXhuynM5nrdp85h7zk15xbcwnWG/Xy+kMYkRgkwQwSYIYIaAAQwQwSkhDBDBDBDZFsEMAYJjE
xiYxMYDAYxMYDAYwGAxgxqAwYwBgwG0wYwaYMcAOhhAwGADAYAwAYDABgAAwAaYAHA5Op1iNAY4X
UlFfR6B57p87onoOBh7BzLJ1nqtvmOlHpADBn0UCGqAIAATBDBDQJghlIYIAQwQwQ0IYIYIaBSQh
ghggBDBDBJghgkwQwSkhDBI4RuwcZHcfCR2+biI688u6oyYYMXSzk+jg3gwBOJy5RkaNlNwMRKVb
PaW0Xw0My4tWYr7XM6YJhRg1cko5vsrT53r9ZwaartRDBJglJCGERghgkwQwQwQ0AwQwQwTABgmM
QwBgmMTGAMBgDAYwGAxgMBjUBgxgDBgDGDTBjAGDCBjAGDTBpg0waYNMAYADABpgAMAGqDi8+c6p
qdRSNG+zn6Tn5e1yTq449IrrwdAu7flPUnpAIw59FAhghggYgBDKQ1AAAAmAhghghoQwQykNCGCG
CGERghghoEwQwSYIYIaEOktoxZS3n3Mx5+mjkLVjCuyMHS5cjumC+qo0VnQ3cOw7EOLA7WbnhfXB
xpljVaYUWQ7apHpdXMVej1eTuO/Hj6Dr9Dg9GNpVYU87ZnNOvNoIcK3mVmcbAtzCazPoEMEmCGES
SEMEAAAhghgAAAAMTAGAAwABgDAGAMYAwYDBg0wY1GAMYAwYAxgMBjExgwgYAwBjAAYMAYmAwAYA
0wBgADACrzp2OFlVGzBtJ5u7hOUr8ZZs59hbVMOP2Yco63L7ETP0uD6uPVgGKi+kiwEMEMENAACY
IYIYIYJMEMpDIQ0AAJghgkwQykMEAIYIYJSCIwQwiSR5/m+x4Zy49Osxx6NRmL0Qz3cgshVGLym4
CTIyYRjYiMmEZORFiAQSaiWuMq7E6tRSXVBdFJeyRHVjidyzz019Vm4tRPO6kJ1g4sFODNZk0E1J
CGESSEMENCGCGCGCGCGAAAMQwBgAxMYmMAYDAYwBgwBjUYAxgDBjAGDGJjAGDCBgDAGMTAGAMAYA
wAIk1x+EepzeYVemn5aR75+bhHpub5uitGeEDVXWFxUG/teW65Hk+g4Jffy952KI0HR4fp0eW383
2x5Do+j8/HsADHRfSIYIAEwQwQwSkgGgTBDBDBAAAAAhghhEYIYIZURkIYIYJSCIwQykAIYLB0Oe
c2DY4SZEr45t50Zwotic2QkwFJEJJgxgKI0mElIjelRZDSatdGknRNJISLnRYTiwhJRGpBTj6lS8
o2IxmpGZ6ApshoNGoaIYJMEMEpIQwQwQwQwQwQ2IYIYJsExiGxDYmMTYDGJjVMYMYmMBsTYAwGMA
YDIGAMYAxNgAwBgDExiYh5+Pwzr8ylVOMUTQxEoFlM6y0gEoxAsTLJzmUJaSELIh2Y5T0lGLuR5D
0UvK16vzPY58eufF7ZYAY6bqQAAAAAAEMEMENAAAAhghghgkwQwQwQwQ0CYIYIYJMEMEMEAIYIYR
5/R59ciNlJlr5bi4cScYBbZlZpWVGxYkb1hidKXKR1I80N9eQNMaJE3XMCEwYHS03bq59m+kqlmr
Nc+fI6dnI1mtRaRaY3Gwqg2tENuEckyrRVadMYkRglJCGCGCTBDBDBDBDABiGADEwBgDAGAMAYxM
agMGAMYmMGmDAGMAYMIGAMYAwaYAwBgDAGAIXlbeOCapkQJWoKkwhdWSjKomJClKQAhuEi+dUiTr
gel5HoeUU+s8X7IM2xx4Xu7PI0/f/Pt8e5AMdN1IAAAAAAAAAgAAAAAAAAAABAAAAAAAAAIYIYIA
AATBDBACGC5vT4VcjmwIyOYRq151rlKRCUmkHNFcpAmwTQDAQ0EohIhIGAAHot2DfVsWJn43bzLD
bk2lcNAmQ0zXj2TsK5abCgvgRvw1iw6spo24tp1RiRGCGhDCJJCGCGCGCGCGCGAMEwAYAMAYDAGK
MYmMBgMAYAxiYxMYDAblEHNkCQIYJjAAYMAAYAwDhdXxhBJU2mK2oLKWwGDi2QhbAjdVYSEiIRFZ
C0tiwUbKz0+Q6hwPV+e65ryrykaMS1Vj6mlR7EAx03UgAAAAACGgAAAAAAAAAAAAABMBDBDBAAAA
AAAAICgAAIAABB5/0HnzyrTFFoM+jONxauUZIxANMAAQA0xAwaAGgaYAHod+HdTlBpOMkUykhuIN
kSVTgTsrmJkiGfVmMJOtX0cG46Y0iGERghhEYIYIYIYIYIYJgAMQwAapjRMAY1TGEiRKcWNucURu
hURgSiDaYNMnOtxaQZIgiSQDTBoGAMAGmAMARwODbTSBDQDEyMmhjBSjEsUAIqk0wpmNTkK9gWVs
ISpOz63xHqY4O+zkVXR2OucD1kyDz/oIGkAx03UA0DEA0DEAAAAAACGIGIGIGIGIGIGgGgAEMQMQ
MQNAAAAAIGIGgDg93hHkhABElnvoE1JW0JJwCTgyaiDEiTiDcQkRCZAJkQk4SPRb+TqrYsck1LPI
slRIslVIsrETUWOUAlKqZLNpzGZwmpryajqjQJiIaEMVDSAAhghghgAADEMEwAYA2qHIiSkQcwg5
MlODLYwcDJEXORGE6hpOmJjE4GnQ04GgbQMAYAwAaYNA8W3nnkq7IVFoGANlZZGuQiTAbFIoCDsI
S6NBnhrgK3JM0RIhCcDb6Hz/AHjZw+vSaOjCUWFYWSpsLgDFRfQAA0IYgYgYgYgYgYgGgBAxAxAx
AxA0IkkDEDEDEDEDEDIgxAxAxAxIl57vo+br6BjPFnsGeOz+6keDn7oPCr2PkistCouZQaAzmkMp
rDIa2Y3tZhOgHPe9nP1aO2U6ezCuWdKJzTfEw19IOXHrI5UOzE5EuvE5S6eErNWg59XUicSfURy9
O6w2oEBA0AAgAAABMAAAABWIGAg0wAVtAwBgA0xuLG4skIJCCQmDQNoGANpwNFNohgDExiYxBIiE
nAJiBiDx2P0Hn6igGQiMGNAOUZh0Z945+Xtox9bLGNvm+pGvNx6OEhTfEz2xgXRGb/WeT9KaJ8VH
bXGlHXOTA7FvmdJ6kAw0W5yZAJEQkRCREJEQkRRMgE1EJEQkRCREqREJJIkRCTgRIiEiISIhIiVI
iiaiEiISIhIiEiIMiEiISIhJRCREJEQp8Z7DyxQXhS7okCATlUi4LSqOhlRaitXBVKciv0vnvRno
0yEMEVYzpEZAKBYZ5lqUgTDz8CurCtJaqkXFDLioLVUFpTE0GZl5QF5nDQZg0mUNRlDUZQ1mQNby
BrWQNhkRsMYbTGG0xBseMNksTNrxtdbzM0POjUZWajKjYYZGx4g2mENxgRvOfE6S5lJ2V52k9FRw
w7dGBl1akJkS+3FI01RRHn7ucKICk2RnAJqLLNqvG62TdYWypE0RgDIgqNE1x5erhMU51Gz2PgvX
HPju5qTI1jwarV5/oMXRPUARzs2Dk16U85I9CeeD0J58PQHnonozzYekXmw9JDzyPQS84z0j83I9
EeeR6I87I9AcCJ6GHAgeifnonpF55HfhwQ7cOOHWXKR2J8UO6cGR3Zefkdo4oduXBZ3FxUd44Uju
HCR3VxA7Jxg7D40zqLlXG0yVGnibspWWVFcJWF+fZQZtmfaYoaGIr0ESQRcmQcmQ9L5z052gIADN
yOzyzsNVjk7Cocyi5QLQDymd5a0GQNDpZYISUapLJ1IuVFwiWcvITGTiIjSaFRcOMdRmdrKh2lDk
hScCagyTogaVBk1G8gikvdV5W7843SyxU2gtGQuM143skc+GygohZIymnQc7oU9Yycz1HLOaaqzM
r7DLK9EG5ECYReqwz4JYRwkEZACaBgPWayLJEVdAgtIZzoWHLt6NJij0Wc2TQh2GbP2Webt63JO5
m4nbA6ETCupyTF2eP6SPTAHk+P2sdYFoZSupkKY6QzJ6zPG1lBtwk5RgWxquGaLTBKywoh2dBwpb
8pmscgms50skEdO3DaUnapOFDrcYlPLYdCHPidDNBjnVWXo0Gecugcy7f0zz8fTcgypRLIZ2bdfF
Zuueow2auAdfhNkc87S3JszkqYMNuKQ9ULTM7cxoYxDBNhH0/mfTnYE4ABDgV3VXEGqS2dDLQkVz
qtPFYuhgpr1XGOY7qia1dc81VPUYVsDDbs5ZrrJDq2VFNtVxZG7QRtMhZPmMshZEbt1HMF1DlLv8
0z9Tk1kwrNZh0nT599Rnc7DHo6DMJ0A50rdxy+tHpHK5/qfOnTo63GOrzNcDjPpc0lJZC5XVlMK5
GpaLzJFdU5tfpuIUU9TEVwqmWbeZ0jq8Ts+ZMz0TMcbIkXZUNJDvnsNWbXWV2COtXhpNU+WG7Vxr
iyWSRdCNoGS4vtp551NHE6pKqecwYergNxyd5vjZNLulytS+pAjznC9jyq4dvXznM0bdhy8Xd5hh
ux9EvVXaPL6fQM4eiXVOLn9U48cei4FbY8/0ZqpcYzZunqrjYe3ziiiveZex5y89VltwRHteO6Ve
q5GfmRivPYV5o9DbHisvuPJVje3ecjb3KjhdJdE4vYizbmSMsSonzd0CdM8hLF0KCqN9AXRRmune
U0SoI6820qnkpNtmULVfMU6bgaYNMXpvNelOuJwABTdQOx1kYKZfThtMXY4PeCbieUybcNb8JoKq
euzFHocwox6qR6cWY7NVMy3JnsOuYazo8mVJZr1dQ89R6zCef7HPgdbHo0HG0rCb1hznb63idB0Y
ZLyynq7jzlmHYXLXzTaczUdfBgvLcu/YZjD0TJmstKqbbCqVvNNnR4szq4rKS7Vxt5a4I1Ryo10Z
wsdETpY44T1lHnpnZsyRLJVaTfwe754lkVpC2eIUZMjZX2DTorzFkIZQKuqPTjDFbr0HMnuEwQ2t
ctelGTp5uQaMkZBuxd0jjtrMyviUY+1hL+lw5lvY87349mAc/LT5o9bk4VVetwcZm5YmQ3c2s6u/
k9Yw177Dm7Wh5J4zpzxZTdPl7zJg9BScP1WKJ3Dw6j0dFKrbzuhyy67gXG3scP1JkhsiVdDj6jbr
wYY7XBhVULcW8zmoMUdN5xp9PlFlFfTMh2EcrVTWFq0G+ycTn078BTnsrLrMrI5rmJW4xxekz681
xuM4V6c2kYAMBen8z6c6oEAAqrKy6qykhbGwr52fcWakiyIHPz5OVXZhx7Cqim83wzRIQ0TMmLs8
Q7dcWTcJGjJJizambbOdcX7uLsKPSeM9EbKoYC7z2jMTz9Ks56LTFLTz4024w37uQV6fnZLhK+kn
p5XTLZUyL8G/IYowznYt5msspdxYq850NXGuOxLlWHYrwUmi7jM1UV7SgsZGHcwGS5YDq89UGvVL
UW8Pt8UySsqLMt1RFCLPW+f7pn42iojdj2lUbuWdSrQiT0ccvwRY4WxNVvNZdXFkiwNnU4cyuqSJ
kLCpW9I5VHegcX0XnfQR7UA89y9tdc5ejmef0TRg6K65w8nX44Q6PILN1SJV8e46OdUm+rlXnZt3
UGY6nQPMX1bTk493QOFv38I62YtObC+Rl9V5bQehr5uoq0LAaIUWFGyW889b3aiqNwY+ndMy5TgH
Y5+NnWjzqzbp5sDsKjCdyni9g6fmvYcU5i2ZSMr5mKcajRzutyjL1udvLqqrxl0COjFuAAYMj6vy
vrTcDgAI03Uk4WwHydvPKu/xOsScZklOB5/geg5FSnVUa3n0FuaWQdEmV6aYm959ArudcVXW4zT2
vKdIwZ+3gOeukHOfoeUVLp0GfQ+6c7VajmcX1/myiGoMlXSqMFfXqOVLVCNs891Ubqqx6MN5pt2+
eOln5usLKoF92PQWx0XnPt32nC07aSNOqkVZmNcc/UC7RSaseS4wFdR0dXF7o8+aB2OVu5xCqyor
jJEQZ08/SwGSFkBkQhC1lRaiNdsiiVwLfhRF3yKZWVjUqxtoahMco2mW2ETrW8Wk37eF2496AcZL
DWHL2eYZbXgNXpfG9M3crTUV2S2GWeigjj6+Iz82zCPTjlHpK+LYejr1KubGu40aarjItOY73LeE
1U5ple3FA6U+dYaNPK5p38fOznXo52g1xy3mjRfWZJ82w606eqcfN02U6cKNV3P5J3uHSo1et8T7
o63lvW+EHXbGowrkWQtkPmWBGUJivqsNpikGui8GmAAew8d7SNKAaYKq6gurdJx+nk6BwO7XcSmp
kZ12nF4Pe8xW7oedkd+PH6JJ4MZ6Dl4pnTtwTFp5tYU3bTnV+n5hx7PUdE8Xs7/liPRxbjq5ej0o
8dyPoXIrznpsmgnVcjNXq6Z5xdpHms3pvJF92KRvjmiPN0ZnIuMsdbXwdFewo4nXPNrXnK65OLlT
A09Pg9itEb5lHRx9Eu870OSWwp2lF2as7XQ81eQr2UlF/PkdnJgmaN/OuN3G2VGHdlqNdLZUOwIN
CmpCKwcZSK5tkZORGTkVxlSbN1HZOXzNWURKRHPZApeywyrLnjp2ci86UsuirOtzNUe7AMtduM14
tUTDHXzzzWyrfWPB1ueUR2zKZ6aCivTxCVJGBqJK3NM9FLh3V0TJ2zDPp4imV0yg0BmM1pYV1m05
4Vb1M5mD0FJx9NtZP0nn+kbY5qimhVE9nLvOjZkzHTOXE7ODrcsQ2HZ4fPPY4vPd8xOcDJYwM+2g
zaL6CiWqRLPtRihMNbTAGCaD2ni/axe0DEwqtiQgXmSV8jLK6YnCIrYTOT4f3vkjHq1+lryurVM5
2Xucczvis7Wjh0nfWLWT7HJ6J3/Pel8/F/T8/wAk9j4rXgqunRpMV+xnp6+PE6WSEy3HPml3X4nV
HU6jR4/1fki55ZRqibap1YQ2cHq4Yn2+F66tE8uk8xXz4xpjWi1RZHfj6Rn0Wa65UepmMVjtCnZA
zPSzHqvtKcu7EQeiwy1zsJK2Y69dRisskVR0s4/RUCiuITcAkgG2BJBYOI4qshamb6KYFdsYFtio
MsOjXGvbzyqJWc8qBwt9VZZ3PP8Aoz3AB5njek4lZLpzMpfaVZuryhqDE4ZjQZNgcXuYDlLpc6E4
zWu2NiEiRv6XKsrfz8m06q32HNWdldPTrMD22D5u7IdXPy9Jb7HiZS7yvocpx9nbiYM3X5hkV8Su
m2wyKJHUzaetVGqG84PH9f5cp149Z1e5w6CaAMuqBXGMzVm05jREkV254BpjYMABoGIPbeJ9vFoA
ADTRU7KCyVNwmoE6o6BgGL5z9M+ZkdOIPV6PGTr0FWDWY6t4c+vpI06eei2id5Zn0zPP1+qwmHfh
65RousMtG3AUypvMmqiRvyZUVy1I6HJ7fHHzb4Gedqi4ovqrNspivVTfV/X4/YOhHNsj55XILJUM
uKZGnRgZ2tPntVdl4rTXqwNNuarKtkc9ZsWSRuyQmQuz3Ebb9Rgzdyk4euy4z5+rSYbNCM8bthxl
ooGQkSjAJzjcSuupFXOgjBMJKZKp5iKusi2vdkrfz+hIxSu5pDM7YkrgzrZfWXuc3tx7AA89x+hz
a0Po8QnPns73KxB2c3P0lebRjM+q+k053eLDDQYc3YyRWdPmldW+gpnXIt349BozUKrr8QbpZ+wc
tX6Tk9KEjrdrxwetl5APQ8/NWacKDJ18uwrdVZp53RzGWqETp7eJebt3m4nTrqkZJT1FeKxHUaBi
ZCjUFUZQJkJlWibAGAAwSACnuPD+5iYANAADRAUZzKXaAANNEfm/0r5yZq9M651d9ET28+06uqFl
V2xmlpCZdbyIr2H5607a8rWep7PhKT2Pns20IXRAKyyFmIy2JkJKwjG2oUSMdCFEa0FECE6rItux
uupr43tTDdPSeD089xp5epGWyYVObI31WFgqTry5sq1beHedtZLCmnozOL0reuczJ14nn132cbVp
ykZ5cxsuwIVVgK+gOhhgCVkCISF1+TvJjpFmiyNhWTgpBCQTpjVFtCAvzo2ZYB0JYNBqVVlaLcFp
Pscfsx7MA8zxux56ra8NxunmrLLEimy2JJ54FTjnOpPlXGiM5GbVl2GccyMnyyEUROMWS05Eem42
JnR7XnuhWqLzlawQOnPnTNc7bSNd1xVj2cYvsxd0zZbKTdVT2zz1fRoMd9YRsdxVOESnZTeW07WQ
EwABgNAMQMAYmDQSQAAL3fg/dxMAAACJn59ucn0M1Z0buF2C1gAAvnP0b5yZbcs60ZdEzm6dOA26
eT2C113Eq1UWSxc86ss2c25skjQ8RG7bx7q6y5Oo08+3IT6tlJgIBMlYU5LaIK7YFsUyUFAsjJkJ
QRp9n5LfXW3eY6h5Krbniu+DLpau5XmuhdkJZoVk6oOIu2oRO0qoUTd0ODaejs59VdJcuJ1dXnNB
3zC03lIKu7IWUuS0Vb2cldmBRz/VUHmCcQ1ZQ2YWCmkQkpjhFRIpYosIRkiJIIqYIaJTqkF9Ggu9
Bwese7APK8rqcmsE50hfmtL8u0Jcvo882wngNWPoUGV76SN+fQY51M6FWjlDhpIwR00EbLbyGbXQ
Z5bcwtOOR6HnZY1G02RpmdCq4b85wl1KTi1z2RS5Z6tqn0i7o+dRZXVIjeXGnHfkJXxylluTSWW0
SLWgYmDQMTBoGADQMTBoGJh7nw3t4taYhoK588zvNbVCdou5zt0XlbJgC+ffQvBHAmkarM7q/n7o
GjTjyHXfJoO8uVeRxaoRPNpwEqp1gQkWXYmajKHShgmdOGbOWlVRsuyM1QqsJTrZfnlSJqwqqnMr
sLAkBo7fn3V+aaiK0ViuqVX51MgpkUuxEWIc4BLTjkUvTrOeeo8mTKJFqSLZ57zr38bdW3FdzTdP
HM3Sx2GiVFpstz3Jk4nt+Kvn3YiAAQYDGU5bKosnGYnIE0iUFMiWorJshK2ZC1YjX1fKeoPoIB5L
Dv51ZMmuszliFohaUPJqM2zCRrzVyOjl1c+pWZMsdPbzddX050MqvixwqqzVCgwwai2MUS0ZpFsa
2Xd3haa6sMTOzDzIdC/NpM3Q4WsfP2uIKyipWZ7BaqLx59WY0UjL7s8CFuHQbIOBZOuZJxYxMABi
YNAxMAAaBoRv9D5/vm91KL0UkKc8CNEZVrjfmOjVZxI6MuXfXoXyyOp4r2Pmjzl9BW6WWsphXGJV
3QI0aKiUqIGvXxukadHM5524c/QdGzNqrm4/V0R5GfV5hPXgmWqhmvLDUZmqywpS6LsNyalkmdfL
lgbM1cyU3eV32RqSvoKkWlctOgxZtuMnZYjLKyZkWiqN9d+Os0b4xCZtrP0JXlde1HmavW5Tzjuy
RK+ug2aeZM69WOVaTn642WYtVXyzzNSx1HSy4oGuiqJdLOFjqmXQSKs2/JBZO8qjbCoKyRVZKIOq
RMqpjZkyxLc1klp9V5z06e/APKcjqYKyhEiGMcbs0X02Zx6aIGnJWi+CtIUWRJuqZbFBZKis23Yb
SNdlxXnvDK0E3EJWUsvqiy155DjbWF+Zk9mSZsWDYaKLK6gWIujGJZGdZbXVnjVTmRKVNx2KrKqt
srmTcWMTGJgADQMAZEGIHqzbzpyhhJ9HzMj3PMjkjZjt01zSdSdgw9mXH5z0WCsE+rjKiIlnI6fP
XlRlASIxnuzdMphonWSm6iK8uvGr7XF7SS43SwFJopXpdvz/AHrF5/q+fjoxwbzPD0Oo8zBhplg1
jsnSV16YEC1GaU6gkpkboho6HK6J1+fVGtvMqrL1VKL7zqVx4epked3dC8p5/TrPK6+liM8Oh0Dz
q9bxjmRIkLaCOpfxlXo35yZ3MVfSPP5/Y4zzk+xacazfI5suhUYbJ0lueYVWtwxQp1ycOE4ChYit
22lWmLqcYRLnHoEaPR8Y4mdVxXYVhOcil6c4lFLZ6jyPrE+ggHmcHY81Xa5HJyxfV0KzmWVMlQVL
qKbUEBKLrLVVaE0EosCLCc41mjPBlirZFOpbVCRKIxNtIjgtiiknJRJzrC7XzNhdXVoAtiOWaovz
tEU2QjayotmdKtKr512DlFjExiYNAxAwABDQE9eSRdGGkptlBNxTM6vR52uXk5dWSx9Dms7nBu5a
2uq1NO3TrXzmHXzTnxixOJFGmhk5Z5EqrKRZeiHO68bzNTfnIivK+pzNguN1OYLdi1L3+pxuzZ4z
XjvgsqgWVKJZUqy0qa6CqaSkmE66y4hIl0+flNU8dxoKGdroeV1V1a56yiyW4xz6ug5mjVCJEKy3
znd5leUXQtjlR7OKqC2qHCtk9FV9aezzPSHnM/sfCDszwNFUQlKBE6aUOLmV3OJdGMRxLSu6+upl
doo2QFK2Bkq6nOj0/nOXeSqciuTqWy6bQcpmSu6kl6XzHqD3wB4/h9zzZPCsxvhkiTSittlLS1Xw
Kq1aRsskZkBJxRY6mOIiZBlbdayigkiQpJkXFFhAJSrCSAHFjnfsTJtlSWV0gEAkpBVKxlbukUys
ZXNlXJsnOuY2mDQMTGJjEDEDEDEzYataczfbtXm0dbnFNtdqdrNs4UtcUrJJA8umRGPRwHqqb+TL
x+dvzVxHoiVwvzxVbKRUaGZloqISviMIVkduWLcs9Jn6NN5mLuucjSo10tXC6B56/scmI64aji16
JGSuSVSjMLKZGqicEJ1WEtELCurVQSjORTZOA7YSNHZ5/Rqfe5O4vVd0UvTcUWacBy45JWXLOFvI
o3rya+rpOXtzbSjRaHQ6XI0HQ8H6fGebusrJVKyIZ76yuOmJG7KjTCkL3ReWbKL6Vd9ZTIZPXh9O
czpc/pHjczriWirWUWUdI5Fu/nnSeTVVautMmyiZzfQ4ejHuwDx3lvWeMJZJQBa7Tmu61cd09CWW
U2HPsvzkyVQ60EWpq1Bo5ERxUlg5VkisJImRALI9vInMbgslLYYb+k0o0VQLa5IvzWVkVJkXORXK
yVQk4kiFhFyCM0DrmhzgFs8zNLzsvKpFhFjEEhAxAxM7HW8t1ToqqwqqeYU0kuzuICQNIdlETRGh
HsfM20rko2VphNiXMaYmavcjM7gpLegcaG/KUR0Iy0b4nM6LtJGJGqTRijGyKNMQ25budWyOQjp4
KoGadbVOIMAvnnvSuNyCaZMbE1MiSiWacvRp9HJrN/X4uw1Zo0F8ObjPTUcTQY4WQITgFW3PI1qm
wnGuRYoIt18xm7RyMZXDOouV+YjCES2BMqbiNEjRoz2lyFU5WajlUb9x1fOdPy8HT4/UOcr6iF8Z
kbEzrcrVqrzvVz8+PQ6fP9uq6tUjP6Ly/fPZAR8+8/6LzxRV2Q5e6Eyxx55PP0LDDV08JWTpWZCQ
RTJKSIyhMahIkpiUxbWLsgXlAKZYb+nwBNE7oFkIFCcIkiQpJkp1Sqwiy0gE2ggrUVymyouiVloV
KaESRFsItggAYEnALXQzQ8rNLzzLnVM2Ru3lMXnS1UxNEKIl0a2rrsZVKxlbsaIaBRgSikrQhxQK
MolGJZI6O3jdmkmCTRGLoIV2ZI62UxG7OdAr52sMtlmcfQx1BiugsABDQwBzqmJ3szW2JITjWaJZ
omq3HI23cyw7tvFnXc0ectN1ebUBOZSSRFMIxYY9YiEbIi2ZEb44guy1xElVFkagvvpkONOgqNWE
vrheUXyAtnOq5atpi3buEei8lkpgsEXRV5Vsw7RZ9OeoTV8LcslTosiYKOrnizdwpHd6/lvUHtAD
wfL7XCNMEqrq6GE57psjo15EKnRnUUmJIJSiiThYRhNEpERyhAsiII3Ip10dJNVHbx1yrLyKSbp0
dCkyu6JW5srcwQ0NiG0DcWAIk4hKVYXFaLJVBKVYTFIIyZCbsKFpZmNQZncFMpMJ1hc6onSeCZqV
EiUZMiptIEmRbBNhEihoioJDIIaUSSSGRRkq25zVPLlOjHlqOquZaa4VyqEbaozW5g6mXAy+/JAv
qUh2UxK3KKxLAgrJFJeil3RJOFyEoMsdcxhEULmZ5aAwz0BS7IDuxo9HLztx6E5Fdd2HO0ESVZIJ
lcp7Tma+lAXI7/TPm9fX5kVOcRWLQPMVkiLN9WXoGJzrFZrZz+3d0ak+fIhwJZ4LobjLCdpTX3Oa
Zd2LQasl0DP2OHI7vHqznT18npVVDdacezNVB6PgeoPdAHjeH3+LSk8pPm1VRBuCzlABJEyuQEwi
3MinElWMJF5QdK9MGmyBKsCW/HZWvKpFtEwzq8M70MzysRByiNxCbrZNQZN1MsUUSSkIkESYQJoS
kgUkDixkGOdUiZBkpRCRFjlALHBlhCQ2mMbCUZgwAYAQSyuDViYyuBbEQEUSUUNJDcGS5u/mmjk9
DnxMrauVYXWZWmwxs0xgEwYiUyqVlY1VImQRYX2mKHRiYTfE5r6FRjunAUlMEqwI1l84IsIVFyrZ
o085nTMEjVUrjJT1A5s9lRSKJbdgsNvW85Yep9H899LXrnx5wvnn0LyRxVICaqIpgpAS05dBVOEz
p7OJlrblTiU4IhAvE2CsVhZLPXQnGNN1E6w17+fFipZZpy6zbdzb6p5va4sR9T5n06/QwE8dwutx
C+nPSSyKKzi0OE4kAAnFklAGxjhq6Ccrf09Rg077q5vO6PLIxjKCaKsdDL3QyyVQXOkNBSy50std
UgUZEVYisaEAMQDiBNMFKIDBEkIGRkMi2hADcWMUhCZKUGTlBk3CZMjEslWyx1IvecL1UE0kAmMi
E4pDUQcZIUWhLJcWNwK8WjPEs2q05a6ETAX1rWSRKIibiEkkTspmSszM1yxNNplsL88pk7MszS6E
aaoxEnEkWRM500cmPWoOfK+kiyC2DgSjNJOuIuikgbJ8/Wl1lWc6Dw6SysgRp13HOe2BROys0a+S
iVU0WZ9NAhoY4ktUGUuu4KtgZ5TRXCQRnOZCq5ESUCcZwNNFkym/PaWW01Vzn08EO2DLZ46jsc++
Bm9L570x70A+eef7XALHCoTiK4oJEQABtaTNZ0NKYdc4Flubo1tvzZDRXWFeDTSVFsIQ3SJBGQyJ
JDEwaBpsiMFIBAxMBoBoRIQDjIQMBMZFE51MtKgtUAtjEJqExFlQ2ImUzLYxCaGDbE5ITSG4svql
WTSYkgmq4lxQy6MIFkYolVLmxn2cy5eoVQTTRSjZbVpqpWxKoXoy1b4xzodGJzY9GBiWmpYMQOLA
QTcEO+iRaUhfZlE2TwWGmWeY4zZbLNUbL+ajp1YWOUZE4IBUzHBVrZXO0zzusScKplXS5u0sqkEt
+bVVOKNhXMInZnVaVmI0Y9sDCWQBlpdllSGijSU3RiTdQNxRMiDUmRjIHKuJdPNMsnXMnk2YyLlE
nmsCNOiJHTmsF6nyXpz6EAfNvNem8yrEFlYxR00kTf0E5G7qXnOt6OOqaoRBWRLOjy7y+qqBOMUN
NkWwYMBhEciCsCBORSWRISaGRkJMBNBGQDQSSYDQAhgDaBRkEZMAhIJCI1SZCYicEgiShWF9MGSB
ExSIwmhtsiphAcRkYmN68MWxrrLY22mN6kUudBs5+7McvTRvXbBOzJnvqjoX1SqbrCRVEuhWi95F
GyNWiqq7wyV71HMh04HMXQrMRrqWhziDQIAAYxIlOCL55ZprhBEb4TKnbIyW7LTmX9BnNvtvOfHT
ec9bWYs/V5pXuw9kyPfTVCtriMiZWtCqm2VATrjF1F0CourIxtrIRJBoptAUhRdZGTYhsY2QJMi2
itWIruzzL89lRbXSBZGBpjqsOVHdnKvT+a9MfQQD5v5n13OOFd0NJy/QZs51ecu3XP39SRludRLi
dblHMckQU4kiEiUXMgSQACYgaCSQAASAARJxCTrZJ1SJkQbQCmFamESSE2gREGpCZEYMQwYmKMwz
u8M60hRK4KnawAG4yBuJISJNBJANARjIIkkVxsqDLehSUIkSRBWRFn0QKr4odc5FN10AnSGueadR
jKZnCyFK+FDENNkXIIwnEg5BWWsz1aiOfX04HMr6kDnPXBcrvqEnEc4hpuz3JMtZS9FhmumVIlM4
9Ha4saLY9AlFyrPk3QjnblE0ilU6nAtzzsK69ESlSkUzcInRCY3jkaqpxIKcSc4zCuUREmRmSINx
LY0hZS0JSRGN+w5hsoEDHOsLFXML82ohh6eIr9N5f1B9AAPBcbvcwz39TfXn6fTILLwI3UD871+W
U1FRFANORGZMip1kU0AMiwBNiTQNIkKQmBIQMTEIJIBqUQaRJwZJwRbFAwBJgiSEwBoGhDiwZGQ5
RYKQNMEmiRFkmmAIc60SSYwASkJqIKWUc8PQgx74VhlcRSlqMpfcYqPbeUE65EJwZcVskKRGSkQJ
IlOoL3mdXKtjSjElCJdLMzXPHoLESqIwVdkzI9KM0dbMUtU45p0IGfVF1ZGURuMiSTArYcfs8mK6
dOdelDLWnRqyI2QqgaChG27mTOo+azpQzSJ1pDiTHfXMQ5lcblWeN1cMbIDwmyp0F1kZE4IGKI4k
AsiErqKDq4qpEisLFEJV69pxLOlAxSdQ/SeY9OfQQDg0bcdOq6JZAC5c7MbstCJVwgEJMphpkZFq
pKoa85XGCi0jZSGgZEmkCBAxgESUoAxhFkhRkgaCSTAaIylEaGDAQAgRJSiNxY0A0pg0hiBsiTUZ
EiIMQCGDTBpEosJJRLEkNygOVQPmdTmwteHUaIzhSQGe2REOlg0V0eJtxkQIaTGRY0RG6wnLPIuV
IThORTKwKnYFTmjWUzqwhIkRsFG2JGNoQhYhOKJAyMoyE3EcUyarCcoIsrnAMt10c2PUwGam6tSM
kCbISQOSiOaiXzyzTXZikaa88i+/nbCTx2GgpmScZiquRgnsqKI2yDPZStsoXoRvgZ7khwszlhXY
RlC8pLKhsRZOmRrtzs1PAB6HzXpT3wBycujBWvNz6zTCqtJwhBWiwqQDI0GurFEvqJFasRCbBkAb
SGIG5aSlaQyGpmRa0ZlqqKYKQCYSAGBFyRFgOLiMGNxRJRYNMlGLBxBuMiLkA0DCJIYKUJkWwQ0N
OJKVbEASBDhKJNKwgARYwwdHBFG7B0yyuaqMJxBIM9W6mKbqImpZplsVIqVwRjYyuSZCUWWODGpA
iTIylIgryoxtiDiEypGgyBohWosgwiWMjbXGr1VItcoCJBWr4kG4EpZqzTXmjE7KmTqtDNR0mck6
MTA9dK1KUSSTBNADCcAtsnjSUULMiFpANcaUmqWRGwySNGeVolNFDviV06IkHTYraSKatIwAsVNx
EjMVlTJTzWlnovL+oPfgHnOZu5NXV50W1wZZEqLFCospgBFsQwcWAAA4kk7CuxzKr5SBzCVKiTUQ
aiynFsrKZykJSCJNCJIQRGMAEMTIyaJJTIoQ0SE2CcogESaSG4WAxA4yGQRN1zBRmRlBjGhokQkI
biEgRIQDVI9/m+vFkZRoTiQVsBRUh0OyKHFjcoiUmQcgjJyIqTE2CsrKk64xojQy2MAkhibZFSRJ
1ouKqy6WVGiGa8ldBlxEqyWaBqhlIuqnoMktEjPZdChWIcWiLkFUb6iom4g5FRrmopq0SObHro5B
1KTDOy9Y5LqATBPVYYZaaipSQCY3ECcAulmkaIVTS2zMjUUhNORTZNlWboMy17KjNahVKDJ1zii9
T5b1B9CAPI8jX506pytFbnkkaKKbyg1yMUd8TI74FSspGZq4vrii/Xi1F5S61SzWGiWJGlZkaDPo
CUwhCdZEaFCcBNwJIiSjFjUkBGYmgTixogTExDkRHEbUwAEANAOREBxG4sAiSTAiASSJQYWEGSIM
trixkoCshMIxZXorZKBYVjQnAI7snOjX2fNM30ymVjrLlBFhWFhWixQmNEiDkyDaG4osVNZdKpFi
gDcZEXNkFbIrbiTdUhxnIrc5EZkqRYyDchMAUmVEwrLEQkMlGLFCYIaIloZzQiiOhGeGqERbhUar
3GLQgkKBKjRErotRnjOKggYmA5Fb3QMhoqIiuEpVklGJe6A2Sw2pshmsLHSgVqKfUeY9OfQQDwHA
7vDKt1Mh5yAaI3mgqjWqFFJts5gbeaaIqhqyEh3l0tMqyrWjOtMCiNjIKxlPRx2F8I1lqSEXVlat
kZo2MpV0SpWMrjNDSByTECETrHKIKcWMTATAcRjBSENEyuSgWKEiTUSaQNDCNkBNgpJg0CBEgQJB
OicQcJRa2qIgTrcQzauTEbYNdOrLeks90jm6p5jRKKLCES1QmESBKSQwCJYiqN7KpTBNRJlYWEZl
bsCM3IhJupSURKYKSkDGVzjIakhSJCCJCyMyMLEQm0DjMUJhCbiA2VOQAqxOiJOECJQnEkozFbWy
VdgZqtYYIdmJx30qFzdLLemiVLonSo15ZIorvoHXOwpq35SosFjqiIRIgRgXej856U+gAHgvOd3h
ladpVZZaMHTqVMac0ai2KQWxrNFUJFvQwdOrCAWRz1F0a6jVKi0snCJKcAVkZBWrSMhDhNkGpE6p
BGu5lULgpLoFJpiUykiCkiIgFLFE9NUTQ2qi0xpRJkLBoCM0BBsJwBoCQmABJIJQEOSgNoJJImNC
gRhsRYoOpkJBOlk+V0ufEHKwNWfQNIJViHVOJMUCcSQxSEpMQVljpZZXKZW7JECwK5udVymgJIg5
srJsi5BEkEJNA1ISUhsiSlGQmAoWIipsi5IQAmRJRUgU0RhdlIxVMaIVBcVhOMkRnVEuimTgIjGc
RW0yJAEpUsuaAgwk4VF2fUyvZkDVGusL81htrWyudHqVnDpjvlyeq8h65PoAB4PzvpeARq0yIkQd
VVZYnEiIHCcCyNdxHrc7qhKOar89dcToTJycyx1yq6MGXOhlsqmXxQW1NjHAuqJEiESwUSLheV25
7CMJTJVTiFV9Qq76CjHW41Rec6cgoFEtUQkRkCAjG1kHJClFk6yRFQmKYEWgcZxI3VWEFNkSxEHO
sTjMqhNRNuBKIicUiVEd5yp9CRBWwqmM1EScSuTZXJQLHSFkFYQc5FM7gi5SIkilIZBykVyaFGQM
THZXMaQDSGxiCRCSYhocq2KTAcZAMIsiSUGSUUTcJDhJkJoJZb8RWghE4kGBIrZFiJyTBxYRlUOc
LBIRJwYWAXUTiQphEGNU5xIWVSJ257EsK5nRnzWVyEV+o8r6U+igHhuF3fPijUgdbKxhONNg05kI
aolOquRPXkkSrpBkrSNgwGAwBkqhMCISG6wslUycqmXwrRZKMSycKy+zNaQtovKrKplVtVg0phns
wnMQSycbzqzgrBME0DQA4g4hEoSgSlBklIpKaEmyUWiVdkRKbK7HESmFd0AEBXGxxXOEjbkkVXXc
oz31ovrpCbrReqAtrUyt6EUlrIWEiMplQViFNIGMQ2OLZGRISQRnGYBIgEgjNkJkSQIkRY4NEnBk
ogBC0g5IhMROLBJMABtxJwJlFwymm2w5LtzxNRRYoBMUiBJkgkVzkyqOtGcsgIAgSiSszWGiKZgF
coNpGM0VSlJU00JRCQglZQjd6Lx3qT34B4ryfpvLi0DM1N0zNOtrdWWorKryMoonWRLa5SK7pyES
BEgTHQIHKEhCBzrsKxKLBSqsnIrkgE0NxZOJEm1EnKDJqDNFNbLON1OfGMnJa+hVuSZF0RnESZA5
KmQZZGLElXF5GVDQAAWRgSaAbgTIIlEkRakEbLCgkgQiMZKLIQiWwpRbBMrWhlUrgrk5FM5yE3Gk
5BFuJIGJDE5xBpiBiJRGAQmgnEkJSrJuqZMiDaRMjIQ4jBAEiFsAbgyxIIgwYhqq0mohKKRY4SJR
dQpY80bsKkQJyK5TBEGXyz3FhGgsVMS9Z0aXkZqeVl6qmKNoK/PcY3ZMhKcRRYJMIjCLaCMkQJNY
+o8x6dPoIB4DynsfIm6ogVSrmtc7pJTO1jrtRUWTKFrRmsuZWWoirGVO4KDRAqdiKyyZSWhS7JGc
vZRKxlDnMzu1kHIKydxjnbIolIKpyCmU5FDnaZ8PWrMF8dRzd1egqnaFZaqrhaRWWsrVkimN0ilW
soVsyssdVOSiDcyEbis5dOMz0FUObiKmVBOuJQVZZXNlC2Iyy1syy1MzPQGZ3wKyyRFWqqpTCkmQ
lOZRK5VVG5lErWQjfArjeGeVhFUriqXYypWoqVwVuUiovqCSsM8roFc5BCUmVq0IOTIqYUO0IlgQ
UmRUmJWIgTRXK0KY6IFCnKMeLu88xSu0GV6aiiF8jPZfSVaIyKKLEtRfIzPSzLZpEzz1MyLUjGap
GXSSFXugZI6omU0ooWhGdaAzrREoLkVO5FXpvPelPeAH/9oACAECAAEFAP8A/fAf/9oACAEDAAEF
AP8A/MNH/wDlNz//AGGf/9oACAEBAAEFAO17Ha1tr612E+tdhPrXYT612E+tdhPrXYT612E+tdhP
rXYT612E+tdhPrXYz612M+t9jPrfYz632M+t9jPrfYz632M+t9jPrfYz632M+t9jPrfYz632M+t9
jPrfYz632M+udjPrnYz652M+udjPrnYz652M+udjPrnYz652M+udjPrnYz652M+udlPrnZT652U+
udlPrvZT672U+u9lPrvZT672U+u9lPrvZT672U+u9lPrvZT692U+vdlPr3ZT692c+vdnPr3Zz692
c+vdnPr3Zz692c+vdnPr/Zz6/wBnPr/Zz6/2c+v9nPr/AGc+v9nPr/aT6/2k/wBg7Sf7B2k/2DtJ
/sHaT/YO0n+wdpP9g7Sf7B2k/wBg7Sf7D2k/2HtJ/sPaT/Ye0n+w9rP9h7Wf7D2s/wBi7Wf7F2s/
2LtZ/sXaz/Yu1n+xdrP9i7Wf7F2s/wBi7Wf7F2s/2PtZ/sXbT/Y+2n+x9rP9j7af7H20/wBj7af7
H20/2Ptp/sfbT/Y+2n+x9tP9j7af7H20/wBj7af7H20/2Ptp/sfbT/Y+2n+x9tP9j7af7H20/wBj
7af7H20/2Ptp/sfbT/Y+2n+x9tP9j7af7F20/wBi7Wf7F2s/2LtZ/sXaz/Yu1n+xdrP9h7Wf7D2s
/wBh7Wf7D2k/2DtJ/sHaT/YO0n+wdpP9g7SfX+zn1/s59f7OfX+zn17s59e7OfXuyn13sp9d7KfX
eyn1zsp9c7GfXOxn1zsZ9b7GfW+xn1rsJ9a7CfWuwn1nsJ9Z7CfWd+fWN+fWN+fWN+fV96fV96fV
t6fVt2fVd2fVd2fVd2fVN2fVN2fVNyfU9yfU9yfUtyfUtufUtufUdufUdufUdufUNqfUNqfUNqfU
NqfP7U+f2Z8/sz57Znz2zPntmfPbM+d2J87sT53YnzuxPndifObE+c2J85fPnL585fPm7583fPm7
583fPm7583fPmr581dPmrp81dPmrprXWWPO9/wA3/oQP1Q/GP4AP0en/AHZ3v+b/ANRB+o0/7s73
/N/6Qj84fw3T/uTvf83/AKQD80fkD9AP0+p/cnef5v8A0mH8W1P7k7z/ADf+ZmZ/iI9o/GPxCD8A
/JH6nU/uTvP83/mmP4wPaP4bqf3J3n+b/wBFR/wfU/uTu/8AN/6OtbWkbsKARs3NPW2p6+yJ824K
7tJlVtTAEH9CP4Nqf3J3f+b/ANGr9tKot+7ZPlrrINSlQmvWh/A1Vbx9BDA23rirars9hZV/IH8M
1P7k7v8AzP40f+OvbXXPVvsg1i0SutB+Afjt1a7IDfrHbc2Op5L7B7B7B7R+cP1ep/cnd/5n/RV7
FSYutiU1p+gIBFupKtpq3VlYfxHU/uTu/wDM/wCidli1qtltwSpVg/JH5VtNdoarY1W19yu32j8I
/hOp/cndf5n/ADvH6ey5ayKS7D9IB7MTZ0VeUbz1FHV1H5Q/gmr/AHJ3X+Z/xgMGH8YssINWsqP+
mIBD6zOyUIkbRoYFNjRfX267x/DdX+5O6/zP+LsyqKirJ/BB+jd+MRAv6Aewe0fjKhhsaL1tq74a
D2D+Fav9yd1/mf8AF9/XfYqopNFP8Wdwi0Kx/VD8O3oraNbdehlZWH8K1f7k7n/M/wCMA5/i5Q2X
/px+Ts6ibC1W36NlN1dyS/bqpmvdZcB+SP1ur/cnc/5n/DiQJYUZB4H4x7v4qTgIoUflD80fhH4b
aUuSyrY0LKuxptSnV+IfwfW/uTuf8v8A4edK03YI/HfZ6VXydTaH8VINl38DZVYbeg1LaXYLZB7B
/Bdb+5O5/wAv/h5ZlBJJ/EVDAqtXWfxRs4RQo/Qj9APbibfXBpqb7oVIYfjH67W/uTuP8v8ABj/h
b5C/kbi46v8A4EP1PumzTq2tRtPQ1VqWr7B/Atb+edx/l/q8fm4/Ix+jx+Y5BLAg/j3B/wDj/wDE
B+VfuVVCzbutmrrWPLdKi1LhuaD6m3XsL+Afr9b+edx/l/8AD2qc7Gwxaz8e3j6P/wADx+EfmD8N
iu4OkHejURT7HRLF2tC7VfR7RLoP4FrfzzuP8v8AgeP1GP0mrWtmzsLxu/Azoot7KlA+5sFevp9f
qra2qf8A4EP1mMze6vnNHtGqZWDD2j9KPyNb+edx/l/wXH8K68Z3d4Y2/Aj21oLexoQW9lc7PcTP
VMFllltm6Kmv3KtxP+Ej84fgH4d/rk2V1dy/QtquruT9frfzzt/8v/h+kwTb7TcWnat3r3j3Cc2M
IZgFUCannZtsr40N8bDB/wCBiD2j8Y/RdlrVXUau5do26u1TtV/rtf8Annb/AOV/w4kCXHIu2LrX
wcgY9n7DyMCdTVRZuW+ly9SL5q/4AIPaPzR+XieBH2aUUPddF1agb9LXvrv19vq79DsatxPaP1Wv
/PO3/wAr83H6vH8G7PZKEhmb3T3HEPv/AGyMO4E6wM1rPmxyudJ2ev8A4IPYPwj80e0ey2+vntA1
hfd7LK0tTf627Ss6vtl2R+s1/wCedt/lfgx+Xj+M4/DYwRdu033Yh8nwoziZ8hyZhY2Z1dLOnE4y
omqwruYYP8FxMfqB+AfjH4R+EfkbewKEoZlt3kyF93tKhh2fUGs9X3HMj8PJc/gNtYNuxTUF7LUY
qwYfm6/887b/ACv+G9pf6dYPxH4vZ75k5Mz4IGQSRoLmnmyxq2FTFWFLc6P+LD8DsEWxn2H1lLbD
IGX8OMzten5Tq+5KkEEe3a1l2K13dzQs1702KnsRBfbfcbOo2JqdettX0it7KNm/Rs1tpLx+Zr/z
ztv8r8GP4rj2Y/QsQo3dhrrc+MjGBgGZgyZ/NCPhmoeNFh5Sq0oLVL1dYST/AMEx+IfoN5/GvURZ
q08K/wAWJidt04unVdu1DAgj27NFN1WrY71prop3mau/9tQemcBNvstNNh6xfoX1WLan5ev/ADzt
f8r9Vj9Fj+AdttqiE5hGZ59mCYcmDAgOBgwZEpdhoo3xHiWQrEIrub3/APBx+MewD8m2pr7KaeMA
wPye36ddkdV2z6rqwYS29KoKHtPX82tnYpypX3MvG69czaTnRt63zdHV3MLPy6P552v+V/wu2xa6
9u/1785HvmYciCDEJMHvOSSMxlas1181GVZuBJBK6rc9b+Dj+Aj87HsPurTgg/M7fqF2V0O52NCV
9xXtijXWr2aNfAS1OaD3EZBGQRkVrxQ6/Dd/Lo/nna/5P8Sx+p7fbAQAQjMGIfdgT4T7MEnxB4ms
psv26mptDYIzlq6iWIR9D4bP41j8A9g/Uj87uOoTaTqtn5DeVldZWvH8hlyPy6P552v+T/E8fp7X
CJs2tbaWAUHwSZjEPmDAjewYyZoKRtWJVYxQq1WStw5OwAmu/Nzg/wAYH4h+MfwPu+pF69H2jUsP
0tH887T/ACfZj+G4/TY/D3OzxBDGEZmAIc5hyZiA+P3zgDweuGdg1ktZjkzB6VdM+pygYo9BDV/x
bH4cfjx/BcCd31InR9sbB+ko/mnaf5OP+E7FoprtuayzyTkmeYonvOcAHlCYBmEHGMTpjX6rOxDI
wIQsGDBs8CyCxdIEn+LiYmJiYmPaPwj8Y/WFlUJdXdO76ttVuk7YblY/R0fzzs/8n/hPc7icfE8Q
cjDAJ4mJgZyBAITAs6vIHFiMcn4KSanVuRCHCrRcfUPkfxcewfjH8F2Lloprbb7S8NRqV7NG5u1b
+hs9Q/U9pXv0/oqf552f+T+fj9Lj+CbNy0U3Wm+7DCcYTgZEzmAHJUgmZ8ecADJJE0KXGrYDnyJU
VIYkqUWPSorCelNbka/4uPwj80flj9D2tItlFT+lVRXV7L6K9ira1tno93ruwq36P0NP807P/I/L
xMfwzH5mPafE7vaHMEzxAcTBmMhR4xOI9gBmPOPODNXidc5YEYKAtGSFoAqqqqU12FZI8/xUfr8T
H5z3gNvM2vZp71O0vt3NOrcoHzfQ7+ptVbdH6Cn+adl/kfocfhx/Bsfl7+z8trs7WPgEkgn+WcsQ
kEFgCDmE5gOC2WIIEGZkZUKioSSwBmDGBIRQVXPE5dL1Z1rYWU/xUfw3ZtLLoPsqzV17dVvT7FL6
m65/B2XX1b+vpbWz0m7TdXfX+bnHsp/mnZf5H5uP4Pj9ExwO42jdsQkCeQOIwQSTn2H3ZmBCcQ4n
IypC1tjK7glQ2SwR0ARHFuM4/pIOL+oc6IbH8VH5A/IH5Y/Q9jt/LUaF1e5ph7dHbqsW1IyqwChR
7e46qvfo6fsret2VZXX8zubkq09Ds9hT13ZV7WzOx/yMfgx+bj87H6vEx+HH4uwvFGqxLECeIMw5
wJ+wIExgKPBOJxg8geDqrnZBKKAOTDLYKlSBGBjoM4HBSwXXbhcwwf4ePzx+QP1O5srq0adg7Cjr
7X0dza06ttOvqu1W/H33UDbq+3u3ZG/L7HsU1F2di69q3sqfoNnX2tidj/kezEx+px+tx+YZ3ez6
mxiHGFYifuT4yYcTJmAJnwPMIMELCdTrvftNWMVf1A5bk1mWrDWBWBJLMGChQWLBuNKOLKv4sPyh
+QP0/fbAdtGp9Lc7DrRstouz6xAP4lZW9v3Jp0Lb9vdwNur8nYvTXq2rTda1uI1ts+1HZuxnY/5H
5GJiYmP02Py8ezH6XHs39ka1BYu+JiHMyM4MyBBkzK5zBPiE/fwsBBnXMFsakKrfBGfkxrVpWRwa
tli+9uJlihCrCaZAf9Lj+PD8va2F16brGutsU26i+5VCn2M6oG2ddRtdlrhRt9he9b9ilFnbdi5o
36jFo0trW7bq7+o2On7Wvsdf8BIEfYorFvc6aS/v7Zs9ls7M5xmmST9pqR2M7D+/MfwnH4cTH5WP
yj4ndbXrbCgwiZwRie4AjOZgTwB+3xGe+HxB7OvXKgtnCqHJsABAUqqksIoORwWZYMVw1TFdhgM/
xQQfkj8gfp+0ua7Y0utD1dcmdADA9nYdrVqLXe2/t16WkK9vrLtU6XYW6tqOrrudTTsNt6tuq/W7
x1b7l1tmnZr2Oh7CnvdCzXu+4RLe87Bw29uPDbax8CNaYSYXMyWIrEyFn2p/7Cdh/f8A4rj83Hs7
LbTW1y0BBEwIfJABhzPAgyCCIck5Ew0A8kQYA63UZ9Y8S5ySuFLtzNaERwcKnIjwWyCGPNQJQ4sp
/iI/IH8DM0tc27qIFlaBBLLEqXse5DBy1979FUur0uzYHwDOw6YPOm2yBO30/XpYjJdglx9ZK3fT
t+JhhBOQnCxoNewwaqCNrIQ+sQACIAZ8IP2oSeynYf3/AMvH6rH6PHsx7cfibwO23Bs7BzgCeJ8I
AMJgMwIMTxmMDPMAJmBPeNB1GuHYH4rC3PkCqjiwWxvgDVkemzzkoArPrAlpp2ox9mP4iPwj8gfl
j9CTgU1CtfZdalNV+ztdvtvquN89Hr/K9be5Xe029Sm1bqpfpV2WKSR75sdXq3jc6htatde1zs9D
tWauvc1TgM5roAI8QsBGvrEOzWIdjMa1YSWh+EfaJz2M3/7/APAMfo8fmkTHs7bdTW1yRk+YI2Jx
JhBAEwFn7nImJ7iPM/fPg+YBNP0l1MAlSc8sH1Ed1PJfBHEBk4cwlaISRP5WR0rYkN+DHsx+DEx/
BR+QPxj8Y/TY/B3+6XbpdEaaP1Hq7Orf61exrkvVatqVo1d/4btAbNlWvTSJ9ydLW1WnusJ8w8Nt
jQEkYJhxn03MbjksAHBI+0hx35v/AN/+B4/XsQB3G983eAZjEOMeccvGMQ5M8z3T9swKQMQZABzP
dPM1nV+vJUFySQRg4Zmc5QqAeJZgAWbKllZ2J9Ph41GQ1TH4cTHsx+DH4cfxMfkj9B2e18rrdLrH
Y3tfDWYl1TJYjq6msBvxX3066Wd5Wpf7htm1t7O227rlG1rK76xWsdaa4EsslVXpOzQnLcBjlxr+
0znsZvf3/wAGPz8TH5uPz8fou43PlqM+f2wRDywRBgQYMxMTzD5hIhPnMziZh5AJTZZNPT2E1Sjr
GXiFIy3g+BD8J+IKORUEEJ4KEmElV0WUWEYP5WPw4mPysfox+Vj8I9g/EPaPaPxD83tg21boai6m
togiv2BVB/H3W36uyrjDeRnw/Fps0Pr203WlaaVUgicSwZG5WaV1LmtkhB5fai8d+b3978vEx+Vi
FWExMTH5WP0GPzbXVE7Defdu/YZxiZJh8wezMziZ88WMFVhI1rGiaLGL16xevURdESvTUHrddE6+
/rta5b/trWsm39s7CG3W2qI5AhAEIMDAkgCM4ZcgRQeKuqsCHX8nEx+DH4cTExMezH4cfkY/KH4B
+IewfkD9APyl109SIgX8rstr5XVLFmwMlTORB8EX1hlotbW2FUmDM6nrCp3dFR2/a9x1Shty25+p
6tt23rupp0tmbv8Ae/Q4/CpwVAZXADfhxMTH5WPZj8rEx+TfaKq+07V9p/ijDE5EktMiAM0FNhi6
tkGlmLpCJpLPS00h2evrmu9d6isQIIFEC4izSP8A4nsxHpqebXSaOwNj7WdRd1m5rgKUI8w+6wYi
P4YgnR2AWx7Mfn4mPw4/KxMTH5WPyAPbj84fmj9N9wX8rvEJ4w2gR2dx/XAtsuAt17bqOvG3tvqd
jZo7Oz39fobux2G630y9xf09+ktfZ6enRpbo2Lpu/wB72Y/S5Psx+Rj24mPyMe3Ex+Xj27+9XpVd
h2t26QhMFNxnytjRdB2n0zE+X0qpUNGxxrqIKRBUIKxCgIsor+daoGrRxPHsz7F9+j/ie3s2uSg7
29Q2t2tdzlVI2eo09gbP2xwFnU7NYZjxJMQsxSzg1VguTH6fH4cTEx+LHtx+DHsxMTEx7MQD2CY9
mPbj+G9nlt4a1S1mpVi0CMVBew5o1GZl6o1rtizrt59n6rWDsdfdTfXdVds38q7bbpoWetqfb+xZ
b2U3f735GPx4/Px+biY/Lx+Df7ajTg+5Wg+5Fg+5KTLu90L0DdfdeuukFKiCpZwXG/rV2MutWV0a
1GwZj24nLNgXGx1oPoQewYieX1gFo9u7hqiOZ1l/q+3Yp1mF/R6982Pttll3VbNAtVwdTYwDj8WP
1GJiY9mPZiY/Px+LH5g/gW1q127Nhy77mQNuxRY7GVJiaXYX6d19rOe2b1D122da5+FgV7tC6w62
5XrUVgadlVfYfb9CUdhNz+9+hx7MTH52PyMfk49uPa7Ki9n3NoLMWIngFjmOAYMAae0WII9mfG23
9RBxml4sggniO4Va1LUlsvpLx1M4gPsJAlLp6urdW1Pt2n43J8A1Ew/t7SlbdQ0X6Z0rrLq3pqeb
PUUXDsejt1zrC/jj8eP4Bj2Y9uPwYmPysfix/Bd7aXV17NmzZddjXollVlrOjoSjMbLcxFd2r1aS
H1qAex0VQ9dtNarUV21W1261unupfXY7Ut9vblG7vzc/u/q8fkY9mPZj8nH5F21XVL9l7Wt06rGO
gnG7WsqhJBBjKZ7gjlDrbYchhMzZJY2eRrkfOzIEN1YLbdCzZ3azSu0qEWt6VW9dWh37o+9dk7N7
Hm5OSo1dxzSnbWI9XcEyvtKGG/tUtVY6sNZh857do5Rhzq67+zLrUpr7HftsHzrifOgT5yvCWI4/
HiY/Lx+dj8Y/gQ/TkgDa7jVom72F22wyJpUa4mxZY82Kbq4HMsOVrPxYZVs2CZw9UbFVujs07i7F
Nla2rYllFqFdzX+ykKdvNz+7+PExMezHtx+Tj9Xj8F+zTQLt93lr5AEyM+QuCw2NV1mfiOcft+4d
llG9Xht3WEe9uYe2V2tW53tmPe7zk8JM5AwwZmPLCACJgFV4j4jNRbDqkchWxAS/BGw+a9krKd4g
09prvE2KXM7CzggAFmh/jnxN/fFtu7YPTKDkwUiz0iFr5yq5lI8iYmPbj2YmJj24/Hj24/V49g/L
x/ANjZp1k3+2u2mxks6gZsadfdrUipqrVbVfZ3988dqxwQOWDbY63670xCUl+m+4ura+nsLgzc0b
XrptOrf9sordpNv+7+kx+qx+Putnt9W4dtbZDu2NDvpk9lQoHYapP1DUib2pltrXMuFNpdgHypgA
mMw49mIMgEzxAJ7vYAcKhM48YQCCqiKCZ4ZawHnJS/XkDSNbBGNixuFlB9WtKr0siDLMzgjYsB1+
xtrdO1Qhdnrr5QEVez2fTrV0Z7F5ADIr48cKZjjMZWq01wEMPxY/FiY9uPysfmY/JHtHtx+vZ0QW
9z11Ub7k0hP9m1pV9x6bsCCIzKo3O+1debfYtsWLfyLNlQ0yMBzNTbNT6rJbt93TwuaddsojauuB
u3Va23a/VUo1V3XUV99p6rDqN/8Ao10tY3cdTdpn7Ivb5ubf932Y/ibKGXc6rX1bQgFttYDWoCTU
hT0Uydenjdpa3pDWqJOonL5YQa/lFCgQL5KgAAkEeAMH3HIi4znEPmciIciLkzACqHY4KTjk9YC2
oGWBArouLrVYKKSFwxj4KlBnJDZPMXKIl1qS/Za5EYhFYgc3yXDOCMZHpoSD5Ni2FHquW32Y/Dj2
Y/Nx+lHsx+Vj2j8wfmW3VUrufcFaTY3dnaYnyTg5MU+dL7hOvRf9zbDLfu7N8BJHqRgAa3wMkQvx
VRZxD4n2/thNr7hKg2KDGAmh1m1vvtfbezrjW32U66ab17Osduvtuss6nd6n5O7U7LRTe1PtbUt1
e7m1/d/BiY/S+Pbj8nEx+f2f8rE87XAHMGKcouOWV9Pb3K2rz5bGSxMCYmCZ5yBiYzGBA95yAADn
gQFGSeMJEJweYJICxMxAa0VsSj4V0E+IL49EWKa1QpcFi/03TKNZ8SckAtAhcK1nxwFsleStZeFu
3uwrB7LYIPZXhvq7GL3KhfrWrgdvqNF7TQyN/VB1d5Lm9mPxY9mJj2YmJiYmJj8GPbj8jH5uPaPb
j83H5u1v6uoN37gvtNl1ljGFhCRljynFkVMFjgDJE9TIL8Q4DBWzMkzzhMcs4h4PFrsra2y+9/TU
DU1vVu3n+WHXbY2qO06rX2xVfsdffo9rq3r3HXJ2Gn9t9k2huxET1Jtf3fyMezExMTHsx+Vj8nH5
uJj8XaHEbyVAaq1VD1ohrtvp1mv3rboqlh8KwgmKuJxh5CZwB5hU4LefJODkAzixgJUHxD5nHAVQ
JxAIKKCxYivMoqLnUrKH4oLTWbSuWTJDhVD1qUsGSZxGGAMsLIyiZMeuowEOt2t6asqFPTHqBEyt
NZC69TB6aTRRTU96oqCY/BiYmPxY/HiYmJiYmJiYmJiYmJj2YmPZj2Y/Bj8OJiY/Px+WzKi9n3zM
bLnsJPjl4yT7PJi/zEghchmb4VyYAQH/AJUJEPxFVBhaa2qlsL6fqZLRbHbWySDOk11zs2m67obc
X2VLZN3qqtuvZ1r9O/pO69SfdfVem/212o39JPfNr+5+mx+ixMflY9vag44szWVPWT2AV27RqxY1
ljKgWZBB8gLiBmEOWnAmYUHmVJckBSQEzMqByQRraBG2aFh26BDuVA/PAE77kjsLxPntjJ29oz5v
cITa3Fmq/a2U/Uu3SfWKBK+z625Qy8rLmsRH5srHA2MTLLPUd1DPnkZYxEDsqAeuVssSyygOjn+q
GAcH4B8Sv419U52MfgxMTEx7Mfgx+PExMfixMe3Ex+DHsx+Rj9bZYlSdr29m4xY45DHvGDBFRmPw
1KpyPJXGC3gfyxXAL8WjMMIpEJAiqRGMr8lV4zkROZg+Iqpo6x1Knr7jTtEmC0zsOur3q9jXu1ru
s7Krs9dLL/t/uNa6u5Jtf3P4t238oPxbOygFtwa8EMwPFeSQ21w7FAh2qRPn1E+eEbdYD560z53Z
J+a2YbLzMWmFHyKyZ6M9LyK56YyKlM9MTgBPTzAuIB4xOjUNqIADYGK26ms4t6fSZR191a2HtK2+
Y30n1OkyjtdXhW9DqGdkFpZub1F768AhQ1ShgGFj017Bauyq2nFlbji91QGtq/5ExMTH4cTH6fHt
xMezEx+PH61mCjue2OyfBnLxCIOQiVDDtLj8S+5gMHGbcFR4gCwkkqnEKphKgl2MxKyQ/PzyYAsI
GJbR2qNlOzegbCMRZW+E8RRid1oUbNByjdxt39gn2LsWt2M2v7v8W7g/Bsbq0m+x7IxbKhjPQyXQ
qwXM9OCuemIaxHUNFrgUCBfAE/Ywe+fuJ+59wxmft7ej/wANBlj4IA9O3xV2N1teywPpiEZj61Lx
tDSz9K1mA0N1Cr9tW4t31h3LaxX2GphtzRcrbU0YJaNip9a49i017n2tfUH/AJH4ce3H48fkY/Hi
YmJiYmJiYmJiYmPzsfkY/M+4OwKgtn2ZyFIz4M10BllmYow1oDnGI5GOPgg4KwZiKFn80Zp+5OIc
mKYQGjMFIEP8328Fz2Osfma6bLDruH067PTPZdzToze7TZ3mJMtcKfsuop3U2v7n8W+6bXroJJjt
8JQZAwUwTd/cUeAPP7ew/wAwn7/gHs/eZOfMy0yYf5T74Z02F0UPwn3pj07hitkVpYren4wAJgZ4
5gxO3sup1q2L1/uWLFcBXo12rs63ULHVWovY5CaNMp4VNp59f24/Bj8GPbj8eJj8GJiY/MxMTEx7
cfkBGM9NoKmhqxOBnGY/O39tNTWuuayzInJYGUQcYFOeR9mAYPd754zDmYzPOQ3wDOCVENlc8GHx
KFJYvyir4PiL/c6LI7DtdP1pvMla9JaLNLttmvWouse13dUnW6G72L9p9ufLVfaWPqs2v7n8W+7v
7H7ZAH/cAuQRys8uvuz5Bn7e8+8jwPwGD2fv+wg9/sPuPvHuPv6r/Crxw5ZYeAQMGtSwqqwucH+W
D+VW8FVeuxQrD+bGE/7feH8tcMB1wFUtSij1qABsfmY9mJiYmPZj8GJj2Y9mPyVQtBS0FKienUD/
AE4wzOJnEzBmJhQBxzhZ4gUmCuAATxMmAw4hYQkn2j8z7k2AWODD5mBMATEBIhwQcgg+c+39iMwq
IwMVswljD49nGLkkjgi4mRGOYuA/VWmvsyAw3q2TY6jZpr1d3bs2bTVcydX9tX7DU0VUVkAzo9Vt
Xv5tf3f4t93Z+W/Y+4T9197n4x7oPb+8/f2H2/t7B7/39h92PM/fqvGiBio+5WLAEYPmWYA4kF8c
AfAxxzgepxZrOZGOQ/k8ZGBHxz2DGqeys6zg1a7UUUf5H8DEBiuwgZzAMwIICqBr4bXnNjMk/iBM
DGBzA4nIGZmTM/n2OETbua+8kzEHuzP3Hn2eDApnAgkqIXXHqPC9kzbGa0BnsMBui2OJ8DRamJRF
WNksMQkiEgRuRdGK7evb8vd32sMavIvqdDVZXr1JfMe1Nev5qbX93+LfdvnUhM90/cRz8QPsWH2D
3z9/2h9n7/t+P95+/VD/AMNDG8IPAbIgPwsSUBjY4jiIxBFh+FCCvgTIxyAqB+FR4zl7z8OcUMT6
jkmmn+/+fj24/JH4cTExMTBgBiITOKiYGGrzPRnpz04KoalhrxOMx+o7XZSrUcAEzBmVM+DBAM4n
IRoSixrWMJYwTExAAQcKGbJLzJAUGxm1yArshW0MBGAngDBMaMcDf/k21+c67rH9PeWjntVVJUvt
T3za/u/qMzPszMzMz7MzMz+fn8P3b/hZhMzM+VIjeSBBjAInNZyE5icxOaz1Jz8cszkZyM5GZMy0
5MJlplplplplp8UGZ1zXDUN9yA7LmDbg2kyNqsgXL6fMYLLwDiZzW2OK+8HAb3uRwUnHLyvv2D8J
J9Bz/Vf4qqDm79aIDic4r4nqz1Z6mYGmTMzzMmZMBgMDCcxHaEn9V22gmzQy+cYmCfZ49nhA1jMR
MYgAgxMYgBjEJGYscGVat97N0u7ra4UKMGOisGRlNdpWBgwJyxjeZUOdHYAnS2OVCUdaW2awlS+p
PUnqT1DKWJabf92Z9uZmZmZmZmZmZmZn8/PszMzP4M/gzMzP5Gfb92VZpKzjOInETiIyNyFTmeg8
9IiYUTKTmk51z1EnqJPVSeqk9VZ6onqiesJ6onqz1YLPBsnqGc5yaaK12Xa1lFdamqwFNcn0qCWp
onyamNpeDpW4OraJ8vcIUvEY2iAvjlZOdsL2Y9S2evaINq4TY2WCeueDbJZjsZXW2WfY/gHj8IMz
MzMzORmTMmZmf1OfYcEdlrmjcIPs8RQWhdUnJiQSSBCYMGYIgJnXdbZuvf8AbVDAfbGxnU+3NKmV
LXSr/GvZ9U+sxhhAw9WIrFSrBgTie6dMT6uzSttXAE5E5CcxPUE9SeoZqWBrpt/3Zn9LmZmZmZmZ
mZ/OzMzMzMzMzMzMzMxgrC7rdC6WfbPUvP8AVesn+rdVB9sdSJ/rXUz/AF3qIftzqZv/AG11Xy3y
Ynyaz5NJ8mkGnXPk658lXPkq58lVPkqp8nVPk6oupTBp1Qa1U+XSeik9FYtCw0ifbfTanYOftHqj
D9n6EP2fqz/T0h+0Hh+09uf6v2Sz/XO3E/13t5/r3cEno+7g6Tu1P0vu4es7qN13czsNHsa69bo1
UfSdSfSdWP02swPQ60HRa4g6TVEp6vVps9mf1uf0uPz+QnNJzQHPszOQn3Hr+T5hZRBxEawmYgxA
Rk+D4mDEBnXdWt5rv1KkO9qifUNWfUdaN2VQh7VRPqyNNzXqd/Rt5MGU48OgM8gq/KH39G9ib3ZX
3VD5+8wb+wIN/YyOwuEHaOA/aXmW9lYg+2dtr+ym5/dzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMz
MzMzMzMzMzMzMzMzM+zMzMzMz+Xn25mZmZmfZv8A+FwgScJ6YnpwpPTxOAM9MzhOAnCV1YXhCk4T
gJwioMcRPtBcH8H7D8H7/gxCAY/h8zPszMzMz+TmZmZmZmZmZmZmZmRMiZEzMmcpznMTkDOSzks5
CZEzOQE9RJ6iz1EgYTMzMzlMzM5TlOU5TlOU5TM5TlOYnMCesk9dJ66z5gT5iHZxH7TXUt3dYJ7o
8T3G28stusJZgCzcRbZE3LVg3LmjW3GPs3sLrHsrbwDgQsT7DMHJQw8ZySL5mvQFJckhpkGZxOUX
yQWMcKIACSpEdFsB01Ms0wq2JkkYKv40Lvl9y6tdilwVblORMJl94rD2uzLTbYftTWersJun+tmZ
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ
mZmZmZ2B/wDC4icIFnDyzqkex2iVtlqmyDYhS4GYBnGYnGcZxMKGcZxExPtMEfgxNq75egdrlqzy
SMwEa2NcBFtzFYN+C+xRb6qT1VnrCeuJ64nrz1xPXWeukNyz11hvh2cQbWZ65nrmesxnrMJ65nrm
eu09VjPVM9Vp6pnKcxOc5wXGeqTOU5icxOc5mcjA5gczm2eRhYwMYGMDHAtcT1WgvM9eeuJ65gvM
F8N5hucz1GguYT13nrOZ6hM5z1QIblEOzUJ85RPqOvi3tVAs3di2GxzCYDAcjGZ+wEKgwqygHzyY
QcjDxEvda62fM8T4IAphZFhscz3zAwiljra4wxnMTM5Cc5mAwWkQuxnmZaZJhpsEsTg19dhdlBDK
Vi2FZ0296+v3FIS3MzLLCo+WewpRUsaxUH2zsLb2E3v7+RM+3MzMzPtzMzInJYb6YdvXETYpshdR
OQMzMzPszMzMa6pSttbQOhOYXURtihId/VEPZawh7WmfVVn1URe0rg7LWg39Uz57Wg3Ncz5mjB3t
afPa0G5rmfNURtrXWDc1yVvqaetXn1a8+ohnrVT16sHb1wTt64nzmvPmqId3WEO/rifUdfP1HXm/
u02apUTAMC+bbcRKuTpSiQVgWBMw1KRbqhZWzIRggAzBmIFmDAJgTE+1VxV+DsAp1f6Aen+y7cYM
ufSVQRFqVlZXrZG5D2btirtG5Yb1hvWev59WerPVGPV8esBPXBnrT1RAy2WMyhluUT1knrrPVGPX
Uz1hPXSevXPmEnzAnzBnzJg2cT5kT5nx8zifNT5rMFxMNrT1mEFthhstwL2M+YaC94dhhFvcj5lp
8y0OywA2XafMWAfNvG2rFJ27ADt2T5u2DZsz83ZG3HE+bsnzTGNtus+eaHeuj7d7gs85zOZp9dzT
Y6awVshBFeYaxCAICYCROZz6gwtgMFhEW05LLkYzfZa8u2wpex7D5MxAoE5Z9gmSIo5GirADYmUh
4Q4niZgBMPJZzMTasRGvsM5NOUNlhmSZmWVK0ehwHrKzS3rtK3a7Oneq5TMzAwjOqi+/1H+zzns5
9x3MnY+qc+s8+ZvQfOXmfO7Anz+zPntqfObUbc2p81sT5m+fMWtPUYzmSOSzkJmcjA7CHYtA9Wye
rZgbN6z5i8z1rs+tYByzBY6z1LM+rbC9hhZjGZlHIzkZmFgSISYGgMDHOZkCZBmSJkmAPMnJzkK7
Qq6nDQq6t5x7wMzDSut7DxYlNLasU9dsqlmvYjXU2LVxnGXPxArKAqAo8hyF2ax8U2TkvrHhW3Bu
MxMQKJiATEIGftgf+L+Ddqe3W+mbXNMpUcscATE8RP5LPJwayDkTsj/+gxQKttRnq0xbaFt9XX4I
9cf0ACKYq62SNbmTrAs1QmqavUu17eZKF0r4hK6na3XWmv8AthexAU7VYAuErNbzkA2to2brbejd
rPTqri5b9a3Wor2a7arKmQCyCykq97Ara2brwABc7qGMrpstZ6nSVX01n6hqsX2NYBt2sSrcriXP
YuxVZQbSvpNsByHssHI8k3tbg+xqupvXB2qcUUNbB1x47OmKxs9fVS+zRRrMLdchjqMV9GWU6ChD
XhdzVXU1+x0hRta+ollmzxPq+oECseNmGUGNwSepXzNnCc240D1H+RueVaS43dokn2YJmQJgn2gQ
K01dRrCygE2AxAhBOsSo14g1sL8mDVsUrNu7X25TpaLU7lGgiV6+nx369BE4Ez0yWKssoSpoteiT
ZVqAPp1MdnUspiWPW2hsauwg1tfjXTqtK9Ktp2xqBp09i+fbOmmtuT7nUfUMLPGFZgebGHMFbugD
Q/CAyYroexSIJjJCloq15Gvk8QD6Y4CliOJJNLrDTaqBDK6XsLatiq9SiVU0Gnhp8Gq0BXRZoqmz
XV6ujXo62z2HX65Srp9pnt09OuW0OhWtjDlTWWZrDpKll3X8TfokOQCGxOdfBrPh9bhBs4BsxPUf
ilFlkq182Aa0qq1ni7OhXBtqrNYNiVWKmrX22lWldwS6/tb0dOy3HOrXtI3cV3LpYjEKtNfq2FQR
j0zrNmuzLtWCrnAGvmy0TYqANLZWYmMTExMTHn7a/wAT8TksyqAuMQH4R709wGWdeQrOfZ3H+dSa
glGqL1aqrhYvByQXrptKc7K1bZQWs7CutyxBSwPWZrJULdsUDYqqrYrqXO1otoNbeNitHbW0qrXv
1KvmquidXbpNbY29DrdP1Np+u1qrbOVVLohFvKEg1Pxsb0LIK+Ja1kNfpFtzX68UKWDU5qtHbbBl
lSWJY3RpVsa+jHpnpZHywBoNlERWuL6F3Feu2VW/rr9erW1fVts0Otoq+h+pr61dtsPUtsUDKjTQ
vT2A3hu7Fmwqa2s9xOrsGfTbRrVpVapsrZUt5A2sTr1W2B2NVmrYnIENcKXdlcWWXvrGcG9MuiCq
11ra0cNQL6dRAr7Pa8nJnEKDxnkgYnmHAnKcnmtSzsevsWiqpVjIs1rNc2F9Kuu3V67hdX1q1G3p
0UbVQIp0dlVsqrssaklmJJruZixDcsnyWVfAStjXXqtHoswUFi7HW2rWCyNR2rNW2zfYaSyBaxba
i1TpEcb0+5MjsaNmhEtbXZ0XXtCdbrVrdSiPQELNq8gRwsSvWrS6+gmuvUsAq65VZvK3msO9jqvB
St2rw0t7XplvajJ3nDL2G27UanZ8Wo7B12U39dW7PeE+q7nNey3Sdjs7sP2m5ZH3t4Ftjd2JTV2d
6amzfVdXT1uyPpNBG90qiq5btexndzXb6aCx58QiWVJOdRPDmyu6hvTCnT5x+r2FXS1KKtRrusQV
ekakYrZ2mhyuCbmu7v2Su23uOa9V7hbqNTstpb1c+fWgtudR6Pq9PZrJ11d9jgLeBibBOdcDhLwC
3qcIK7bAUtqjWvYmuvGrEvYKifA/sxMTHsOOX23/AIn4bG4rT5McRrDAjwMylCCszh521aDs/lEL
dTTSnX99q2vtjqt5aG1NmpNXrdrZo2ux0vkD6Zal+vWtW0ub1a6w6y2LyKWstJZrKSad+3RjXvfd
wdZWVUj5RjrbOlpl+5u9NNreU1bNiAM1zCt2Nereq3ddcmrTTYnWa/U9pYNbqtrGt1SNVfVUjbV1
FliszrXVbaW5K1yNTNOhNgXa72KOvv5Hr7yaOruVqtb07fWOq6bTtU2y/qWi1NisNelGumtvUEAo
QdTULKupsVLRazi7S7OhKt4WPa21qLXS60zau19gtS4GvdpBNhKCz886xsSxN+0Da7C+p9fZNKdR
dbdc9a/MWa9KXL09L1dho3UQBlVmDVKuxjSHGhfNGyM2pUArUMlVjKzKGcvSKgWMM980dM7NlvCu
Lt2gG9VavZqdH3D6dDs1YGwy8Q59IpFRw1lVmP6OaxWQFVJuW6yKtwJLO67GyNSPtDC7+wD1u911
kuuZn+fu1GfstthZXyhE0dmss+DEAy9q0p9tbb39pO/q1fnLa0Vqa7rGXrNjgmuzbA6trAeu1VC1
5PoHkmk7HR1aA+3rrZWa2Q1aWwy/StpjT0gaWdHsiy3q9ms00W2Nq6G1dbvenWOp1ksve810P2IC
dlss+tqmq4PRoq3Y7mspXrNqsjpuV3a12VX/AG6q+h19ZW37lbFXTdhrrWx5atf8vZ9Pp3RtPaD/
AEjsiqfbdrTY6DcRt3p9nTReQDPaAhyAbATZe9etsbatpbW8tF+l2dzdd876Sj49+sW37OpSqVYe
fJ0sz2W/M7NSehrb9ZXYu9W20lUotWuWboEou9VjF5O62PWRsoZsWhmlH9nZPw6bUWF3ai87KhWJ
MPlamyn4f3+2/wDD/DcTlGUDmsufJVcAe5/MfbOhuAx1OFOV7Ta+V7enZt1no79qqtveO5sX9g+x
prZirT7ajWWy/Qe/dYMPUcwFUJNFo39rX5Co7D7Hy6WVstdu0NdadjstRqDfoW6eBhKAW0uuPqX6
e5WbWdBZzZOr7OjWop7oa5u3MwdhtF6u32KgOxt2lo2ea6e9elgvqtbsq6q7KtRbYU2+NvXbKRNe
xl1texC/Kg6+9br21br3WLQ87DYtrHc7d7trjnRe9vrBtNFfZpIbsa6bh2e4RRuX8WtZjr27hOxs
WofmLiWtKG693j9jsk6/YUepsXUPLaTSER3NlVlTkj0iyMl518h9cQ7Yqrr3SF2qULdfu0CnsbG2
5T11to2Pt6+P1GzUdRGrqq802UlrnvqqF172zW1nva25daMSSSYFYiuoO+vroLO21dY69DING/aR
qlRo3wzo9ijWs3W6699U1m21ddLGTWUJtUop2tdp81VGrsuR677BVpXsdKrWq1u03RubIJx7p19H
KCbDF2IwMR9R3RlIOhc5jsiJt7b3H7SrNfaTf0NXZI0KyLdC3gOrvLJ1To1eogG711Lr9J7Tj85Y
HSza9Nt5ymlVVfrL9OR9jY00S7tbmOl1ioG0qRPkqGnYa9WsmvuXUPd2N2y7bmvq66b2ixs7Hr6m
1Ow1Ns1muwDT3fX3vtrYvv8Al9wF+werRbZYDq+1bSb1NpadrS7Tsa36+7TuTs9qqsfcrLVs725s
io3iwkCo4LVgY7TX1r6uxX0tjSuBNd2kwr0Ou2q36TWRfkq2s19fV1Jdv6lNdXaaTVr2emGs3dOz
Yr7OjYvs3qanHaaZZt3Wj32MtWsSbeotv1tfrUS3f62uiz6dYxTTOvLjxr1EwhGRaAkUrzIBavWH
y93qA69vLY3HLW15Bq1wJsKDVrmeJiY9o/m+3P8AD/CPjt4rHVRE8t5LdqNta7bkrq7Wqyw66kVM
MrWfh+4K6G3yhWuw1LNfXN92pV1VdVX0KxiNCobYu2bnouqa2rk2ELV3sX1xZ6R27Fa1nteoXLPk
77kShqpaKUrRRZV9urW21tsPlKbGbX7Hqw9/ynY8a+u2zfVo9W13+ukTsetu69VTXsb5NUOymxSw
2bJTuXVRNuxhrbxoPX95Q77dlWxQj4tp3H1i/X7t6Xi2g6jWXXltWqDZZ7jrPe6aVKiylNhHG5S/
z6DWu28pRQNql6WWU3FKdfstfWrY222tVtVCg7zM1PYCW18YmttG2rX7D0l6ravN3V+mR1m1i606
msLrbU/8glLFESqqx09JEtstsag77qp7ADR7XY012O1bYam/WWxSpFB/obhcWkEzToW23YuKK7Fi
FJmQITmdFqZff0aq0GtqMh0iZt0LqihvUNevqvW/WWGJqb1MGuAQzGWMEseypR81o8PX1AlWx61m
/b8od/tW+Ss2b7a/3GQK6za60LRRbshjgE215NdfJ7NumsXPVayOEjbNpUtk/Z2fqk3TWL32tess
4Su3uNVV1Niiynd7opae+36H/wBiyi9ga4v3PtMtvb7+xTp2cdO3VpAbVQlddUcdjvcDt9gxt3O1
vuXX3TTq9fUo7Feo156A2h2erfpMDsmLZep+3760649t19Rt7LWeq1KtdbL9JLPkuism7Vra9z9r
tOKO63EfZ29y7c+n2MrdcHP0vXUfL6tKi92UGw2VjE3hV6R1s7F9ppsN9BVbm5I9PFtrVCcw63kG
tbK2SzbLh67LWqrsRba9zGr1+9dW3DXhvtY19fv23U6Hb012tRqjtLWtuuXcd3awjYPw0ACqbbfF
iVrxHNjdsWM9orFZbk7aagjEYZWg/HB+AYz9u/4P4D7qR8cvICAYFI+HbANW+1Fu3TrBa6m+EuJV
/LvabWW2abXav02933NLfpVtfaKMME972oH1runlu7sWWC10fZzXZSSr6qsyPRr1q9y3xm1HSnT1
tmp9bW5pr1WCp6hf1N4p2djfY6y79vpvt7IA3bbmqpF2uep1A9fg/cbf/na7Kkvcmldm4B9Cxrdr
TfWUWNlryYryq303S5G2KNlEst7u8V7mxt2WK9iql4rsS9GhuCsuwxlexYpsa14lYVBq7Kvq27KW
V3HNfYXpsbe3bsGvZ1aX1+2qMs7dBYewuanX7q6hbvuTULp3tVkHc9aqnbt3Qe27XWsPb9gVs09j
ibNqlzt7fBez2BF7F5ZsbBNW7u0Hru0tS3d3xs1hNGxj1eup16fQXXP9LbGbirSn+beILrXyBOfZ
TS11urTXr197tVbDAlINi0TW133b+10q9XZbbrVb9222s7N5mls2u9uymlZtd09psve4iosvkTT7
NtRb+1ovW7ZFsVlyqZLHJ6WgWbG/YlNVloY/ulrrC7GWJyi0JLdasqHKQEGfZzL9Tnedn8tuJytt
2LLWC12A/LK6Drhl9Is1XWM7WaIULSnLQXTpsG91Cpb2XSVLrbYvlu/u12Dud1Jf3fb5+ub4ue/t
mrG9vE/KnabW0OwrR9bdEN64FtNqv0dls2TZq21WeoCbHr16Lq7X1K9mdrpnSs45muKudLqbM/C2
eJGTe7MtTnFRJtu7rZruPfblgF93aMbNMX1bHToH3OmJbXp2TZ1SWvVo3UPs6vGXKrsNWzkn292L
zX6taDbr61D37TtUzXbDUUuakeyu9atus7gpuHea9Jr29+qoJsPeNhSQlnpA3WvOHOx6VUR7vSmv
UQHQenKXKT5uHbJlX9wfgxAPP28Mdf8Agf8AlqIBLAS08pZWApZUXZ+5dZ79HSC7ITJrAwZWct9w
ttptt2nYU11d12oD95vW6x2LHYVhwaqVUjXIaikhUrE7irRagZyibFy/TOzKr1vYsfpfaz6R2mfo
/YGDpOzg6DsVs6i1Wvt2depDsGxbd/WXWovVXp+4utoos+5OoaW/cGstex2jbWrqtrI2zo1vVr1t
sxdh4u4QN169ipfBDklWOcECokM20HNu/TdFc1raqNr6qoA1akqEU4cGhiG7AJS+o25YbGuWzWto
VamQ11tXCNexRq6Tj07FBOxg/wBTWr66jYqOkqV1amrw2dSqoMwua3XuQ0vuMbtNnqXrXCUaW8z1
dDSov19fWOzvNbWlGzWb+ddYsZj1fXnZG7TRS2qf6W4MbGwmAoJDMtzWWcjmEGdTbrUWWbCWaRoy
WrrzbqbLWaz3VTfS7bh1toSm5aqfmtcnWKEbDBm45PptE17mhpsE8zEAgXMWsrOIxRtXasu2rtgE
TEAyQJVU1ti9UEAptWP0ezuNbVZq2fZfH6nO79Jew3WWooLbFQKIo1gtW7p00Pdex6oOy7d1Pzh+
NvkjYUsQO2uKrfqW6KxtbN5OyCbNzsaEDJtKu5fVVSL73S6/Wb6tvR+y3rAX7KwtTs8h1/YOvyFV
77fXjUf6peE0+2qsV+9uN3abg7CU64rU6SLNetK9le00TPq2jiztdHlfs6zC/fo1kHfXqX7Ctter
e1ElG/wq2dbi2t167E+g6DV3bG11ttnab7tRduWne2rdLZF1lzbW3ait2lvqdZvXFK9zTttt1WuG
l1VrVehrsadf5a3e7T0W1Efdv7rrbtmq7r9t9vVosoXayQpwXsZgjBb7bEZU8v2aK78DVM4oF1GA
QTmyH1IrobAPbiYgxz6EY638DkBUbMAUS1vitIKfc9tq6Gv1OmdepFRQcmleQ9MSqfcdZbbsS/1O
n07m2LdbXptoTIf5g3VV7pNuvaouSxNm99yhxvMwo1aVFWrdj5i3NdmzYb6ezpoPYXFjt2Cmjs9v
r21u319pNBLK9m3WZ7Dq2rPSvyadqxq0W9l6x2n084TrLCur0r3tR0e/Q307fZ+y+3tixPl8rRrW
EX02Ia9O2xW1dhTZq7aOWKQszSpHc63xI9VgVkNTE0iWXU87G/pKFCXbNJVdimkOUurdEULRcxW2
4z0bM102Kza5YDWIb0ytNTbfXwvsbtex19lGtR6uwyF9Y1djddHtZFXb2yqNsvNYvr2fXteyzZ2q
qEu3NRpVv01he2Fk1a6Lnqv6ep9ja0Tbpvyp3mA2S3wPnDQgGZgHI29W6D1rl13GwB6log2rEnz2
1h77nnIieo0FjCNsXMmDPMy2KdjZpaztRs6uCZ6TwV5gAU84TAJyUQ49i+85wWIle5tKulfY5s7W
7Wu7LZbbH2Myntp9xet9Q67U1tkremF0er9S/Uou226ulFbQ1VHX6dVFHdV69Jp21rs+q6wf5m5d
pd/acbt2ydZ7tonXG8D6F7q9Fi1+qVmnbuser0KtjUt2jr7lT7SW3djRTr3bll1+72Ox8lpNVWqV
1Bez1+q+WrIRus0Rdbu7JeFWUXCswqOVFRIrp0ano6LR3Rs/bvUJWdbXwtWrx27dI6/S6+ndft26
tA+YKa120tdR3KKJub1d+9rjWrf5zSrfa7S97tlr7nq+b41Nt8idmqDxFssCi24zc6fYoqv3771d
nubpDtUWWDeUGm6W1io7I8VY4eI9JtuKcbP6TPtoyLaxWOf6c1q/UB1KzH1ik1xnYH4MweW6UY6z
8D/y1pyIrUS9BxbapN/d1Wbc6vZ17dZLAD7hWeC8hiofD90I3q13PQKtmupNRdegPs1X2X3a9ljb
RKFS4VPRDX1OK7aa1bad2pextjsuur0LNXcs1NJe23LDv7YRGGvsaY2aialpvOUV9m6qk19lqkpe
Ldne1bdLr22VranYB1dnbqVOo3bL9ni70NTZj0JXRWH2Kn+YXkouquld3GPe7WLfxO1qU7DN1dgq
r4oU2aya7BNv0mW3ZUD5d3r1xuMH07LdS5WWutHLqDGsrQ/OBGs3ksGtsq9aLrWLZZ11Ao7LQsfY
TVZaq9dxZRbrXbPZsB9VrE2dyi1anSo61dVltWNQ11DYnY0tRTRdSYdgGqzrrHZuu2VGnSouXuNU
Hbsq27NTVqtq+dvqpfhtymjc2aDTsI3Bowx7Kzh9LdrtXsK6te88WgpphpSeks9IGeiJ6JnpGCuc
BjgBAs4EQuBErCrxJJQkYOTXgfuPc6Hjhlg8zBETDS+tlcJaAljLL9i8XHYa1fsUAdxO1x81qbXE
71Vlu7tlEfU7izUV/uLbcDttu1k7bSFXab1O3Y1PI8DnCEVcUUkOrViqPRsclsuC7GyzTn52N6ux
9b7j9Hr+r3RTd147PsL+y+qJfsrrk16LWyvRsqcX6dzbVF73W6LGvrOrvq1nG8X27te5nvWxdoKV
1Oqd02ema22jX1Nc2a2iz8emK7I6+kWX6SVafcDTu+rWtbZ2exZXTq7dp+S21lXWbVgW19VtW9to
LVa4p6be2Vut2aZe11Q6zXu2qW6/rjW/X6s67T1U2u83FfcAV2S1KrPt5K7qvT8dp2+xX2AYtNgf
BVci1WWs8p2m10VLb1uT0a7C1zMgVcsREdqTXalgubjVqL8ftyPYM8uoGOu/AwyKvDkgR8kdcW+v
7eu21pfb61DXprVQ55MMQxPC/coqFdlOvYteprbF1fSI2xX9v1VUP02tVH0euqlep1lkr0utU7e/
qompt6D0X6Na6vy3q7Nmhu2SzT7BkqUpCjurUklte4E6mwlOuu64v1LXS7rlVuuSmtLOm9etvtup
p2ev8hTbchXo2Pzot1qqG2+uwNjQCC/QD3JY9npOI1fIbWtXXBbxZLhEKKtVoI3Naq9bOrK11a91
iGxAa1Ww6zAHWcNsU2+ns/cPWVFi9gZ7rjLPT9PVZq7PnbijuwC3WlgarUq1PmLE65jBpsANZn1O
w19c0XamFNHK4algVtR/SD7KxNh0btb6ruoGlimywZ9a5W69jNm3XSv5ylofVFnVbGrVN7tNZqxs
31PRv7VCjbOwhVOBZWBXEx8FZw7sztxMAJHJVnqZPqtMuY3LILQEzInHwpAh8xQC1VL2tra7Vt2q
8V/c5wonEkGnZUfMmLfyLbFnEbKLBuVE/Nm0LTzU6GC+qiL9oVhe1ncFxthnmtvLqa2fmBbo6bi3
WFD/AL9RrHZtv1noJ4xV1zZ9O10h1dNVvwiddRo7TcKjbu+vQ7ZaVPTy8A3bQsXS3KdY/bHb1XP9
x7Osm39RoU0d9ZSo+5LSlfaagq+vV+m/3BcQ33N2ny7dpvoE3Qsp7TYps7Df7LfZNntlSy/uN9ad
E1V7IoQG74n3W4G+4lGteIFdK0DjWGbW2tuivYt12S9M2aAau3U5C49jrslra9+5t7Vu1X8rTxTT
oqbsGs+Xba2LJwsIBABWxZ9r/wBkjz3NJs7gKqixcohOApLNsV1UX7FpG5hbtbWRlwIrMqoMkZz4
5WXO1WquKx+EeD1wxo/hPwuJuXHX1OirDaOXVdDTSqoMFFKkxlxGg8D7rpW3rqr9nQLbHzqa23tU
KN7aI1d/5jVuxUX2LMttrld3Vh2+qrBs1WXasyV7vspV2l2zbpc9l21NHWmzsdS2x12hrXa2vrWa
830s09P6vtbNx7GuafddWRT33TWCr0L03erp2E2OltR7uo3FbVs2U1vW7OGztjGr3rLLLNblbsKF
XcxN2531gqNWiFkax1ZbVw2w/qVX2GXFbE2tFATZEdlNW5dW6b+0z9t2rvWUpuDUCf8AcAwY/wBA
32chQ2bitNc66vWTYXbpUDboaV7dI0ew7zXtqs3k5pttypqsue266ixtsgNslmrvsc8STu0VitkY
q1W9rgbW3K9u0xewUwODMjDky4MkDStC6snolLkLLUXFuayCQMjC8SC3keDge3JgxMCZlVT3WbY9
A65YNrU3h66gE39n5jYhf4VY42dvgaN3YlGn17htnT62bGxbtWOSspcE5rlO0EbZ2Rsi3Vu1tT7J
ust7ybGpq3WjR0lgp1wN7pk3bV+2tWbH2/oV69ete70aG2rbS3a5L2RnxKyOdLNXGuZ6UX1DuW1V
C+5rmyRFUzmVOS01qTbZqbfUap2+x09vav8AlOFVVaqKqgfDIqLgLiEWmejaYaLJ6Lz0GhqWdb2N
nWW9j2e52SLpkvUaKtZ+rb0icEMQuv8AyVa7bFy9f2LV19F2a2H7b29my/p7FuXrv6pN2rsK4srS
o+n6Kg8KgXaoBqXQNpabP8rp4FGqDZVXZV9rbNOvV2X3DXr7OzuLbapfiYw42EYRtfkRSQeHE0WV
IhIzXW9rDWrCMhQkMY/xuo4j8I9/X/4P4bRmK3IbVL3062jVqa7VNA1mK6GMHsTDNPuhS3TnYtd7
a6FOr1aXU19N/WTqNgW7/TV820NZKLdelCddCEvasVPTbXloVVoq0idJq61rHT02n3Tq6+vPt9gN
AXJw30rs1bV1ENl2oQirbEVkr1b9mltTsnOrb9w9fWzbfX3V6VRbXbXsxts9crc2XuE9YrRwJTlY
tZ1Beyo1xCq4wawwUcTXZ8ZccL0N1bPg9Z8e1V19IsNSpd90KidbXdUkF6Gx6qvUt2Kqyjgw2Iwr
ADqllsCOLba3JNfFjUccVMrVBH2KCE2GrL7bWQlHhV2ZDsKtavrG/Y2QcYpGxei3vU0DKZS9VsN6
5NdXp2UZL03OW11KaWjfednU2WrXR3Q2rQ9U22/8kGYYiEKZxMxhhghQYFYQJmHMzPWdJwIlZXCb
5rr2u323FdjciSDnMJCrdaLHVWadI2rRsdlp1NG0VZ9rqW16iwgwYQ4mslmvVbsLYPsavj202rkp
Gt2lVzfPaQDdp16w95183O59cVU2I437QNiyu9fk0DV61JWmmul9bTO1ZZa1A2cPrG5stYc/zEtg
swnOIzEishKNpKV17Wtvo1sJ8nrhbr9NHovR4bMzmSLLWsm1fYdzE8CNfQJ83rgrt0szUhhSlXpF
6VqTr6Lq7dYAULSE1NyvTts+7uzcr3Xb2Szu+xolnabLsd6/k991jabLXds7aLLd9SG2PmRRt+mr
8d7RpszfW4sqZlSPsKgL2G2v5jcu1emo1qLirW4mwnigBmtsKIh8itbF9NCV1a45WkAghkV1cGo6
iEtPP4VIJ0RjT/C4yqNxZfAPkiHwR7n8G2wcal4rPuAZ6fV0tTbA0+qr2dTc1LqvuPdt17euOymj
t2rVO63dm7Wc2OyW3gVbVigWOj6+9dsGhiZg8Or2W1b6bVtT7xIFPU9jTqdZ2HfbWzZT21uzrmhx
ZX1tlrXdQWfW6uiuHq9MFEp4NXRSSAoRlsr2rutoru39WxafSePcvrs4VFV7G21ZEetgGcBldVld
+Vq9DYFnGmxbnKre7DY1DOqJXerA5Ps2fXPu7x1RsPI2GLYzAlTFZDAiZ4rioZTVQC9qamlmuXZt
bBsr4TWqyXorcjWonyevPlqBKaqa2K1sbnfLFS2s+nW202o8Hy8r1lYLp1gnrqvTq+mV1hkVrbQr
WtTZXqbt2ot289hV0Fl+zUGtfLhsQHznznEGSQypBcuWbMUMYAAC8ZiRXYa2a1mKNmZ8KSIfTWLs
ozFQI+yGGlq0ioqplvXvWlV+xRHtAW6+yyZldYL2W1EPYxK1gj7J/wDaz7gWx+xcsjobBfdW1b59
lI5V1qEsLeWwQmxTWOauus+zrnYZbnawBdsWK/L4ROWYSZjJRvh5ExXUTTD1ytnZbLkSbx/qhVXX
P8jB0qbKp2mrfrsdoz5i1hpaQ2brem1qtvboqr2FVK9DvFY1dXq/PbaV61eruUOWdRz4sZoqghZg
LFFiipuLIxUW8SzcnLZbPnplqPYWUOLuqray27p6zNrRu1dFnOEdmsWpHGpq1VrRp/M9e+A0IBA5
U2W2ciQfTHgE+F8rcvJKGjMFDZudECL7PMEz7FwJp/4f4rUlduQvkp/K38yH4bCFlal29ncpz6r1
GWC5lmptVoux2XU7Y9ayqhrgbLFV6q6cbFenZZfZqGq19ewzrNLrq9a1umNeheuszF2Ot3G3rjtt
i/sJYHWYnVk+tvIi06lTV1CZmxtLQH7Gkhu2eytW25p7N9TvetlFVVnqF7KzXvbTnc9RNY7qGy/t
FesgiPWCaqtf0l0tcrs6r127+pbr212AQWCU+k9etr0/MK+Grqost+7F5dO+AQQW5BYGVoErxyYK
CMa961lLFFgsGOZAFxEAQylNT0mTSxWmot232FJY9hWwO2ioeyuDbHbU4t23uAaXgNYqVqx21ydk
8bNm1oSTK9R+LaF8ZGRlsdCL2z6iu1mpb6tilGGIFz7C6rC7GcTkCVpygrjriYLGxvOTPfEPl/C2
uUVmYwDJVWSpgGKUCvTcnK3OawGxtbAdxliKlUGYAmQTxn2VzXuJ2l+nrb23s/OvTobVwHW7HqXa
N9TUdVcHbT65RtbWir012uDQ+baEFg3XS67YbauTDy6pUH071l2+tRVsVli5MUERVy4BZuIlSjKb
mGbtHx6jNbbro51+pt29PdpSnU2u1uur1a3qVlXaLUItlmvW5oT0Ft7GvLbbmX7G8467V7XdlP25
s1Nt9bfS9g1CjaaurIbNWnrrGlmr14ho+K1bxNbXa2WU2a1i7CurDDEzpwjdj2tIRkKaerrJYtfd
Ckad60ZLATX5WBt2tEft2x7ba/UUHEPsxkUnNb/yN5NljOaauA/F7os1P8X8RGRZXgrbiVkFW9/q
hTh7GVQo9m8vPSPwk5Jz4XkIuzbXKrrdkJt26mt1+1fftG202s5d8NlUYoqlyEXAoZomntAp1l7x
ehFk7Xo6tGjqVb5kUs81tDauazV1aHtfqalvvouX00ht8OGWa/HYqe+ujYfcc2uug7tbXVWvYdad
fdfWa3CNGYCEUNGosUpbbUzb+xYnzd2LKqLXq0djNehsrNc2VXIfi2Ny5O1+5xy6V8kpRa8ZorYH
IsUYg58sSoqtfFN99Rr2FYJsKWSwGMQT4zcmyZxQFUawcTFssrnNStGt6xaxKmXc1XatessiaWsQ
unUsv0gQ2nTy9BDPl7rV2dbfR3q1ao9WlFoNLLrbNg29XY1nwQGzFjEkhvAYGa9LX23LTUATlmGb
bSVggUmfCs9WPyL11FyiItt6pxqX1Ln3UYjSDvZqig7u4tx8k01Weo+vsKRp2GDUVlocUqivRb9p
Wi3tZ9wbldGwNvYsKdrs1D6xvmHcvNybu3favb0667i6OxBsp8psbHrTY062m1rJaKKrNez10eyz
0mqRDUuPUW/WotOxq1VElAatIFXqUQxywKscFuUREeIwRLOw2V132LHFdxrYdhfirc3Hstv36mTY
3bn2aex1pXt7CzWo2NyvQ63otYr3PUVr9e6312+4eqMuv6HdfZ39vq7bN7Ztmxss96o1t+xr7i1a
yrfZt1vVbs1HXFl2vXBtVFKzUz6u2UjO1la11htvfWqt97b26tqsJW+xayWubLL93ZWmo1+t+Cyo
OPjrK7HhWANVioH2AR/UsNVIT8YniD3a3+N+M4MNU9IQVqItRwFCj22jlW9QJOtmPruIUZRx8q5B
r7JzNCo0XaGgEX5VARTzarWodqm1Fg3dZUHY0Bj3Cqn1u5Y33G1Fvbdj1+1rdNXqHZr29P1O57ur
QNVtlynyuCI/HIsKAjyGapbwLJXYK0YlmzgA+CMwKmAoFly1CxaLai7E22VaxrByYviDcuRBcNq9
NjY522aVtva7tW5oNqa/r0KpD2UennFlWS7lVCe/kTPOFsZFruIAOBqXc6776FPr0MdS6h67KVdU
qIR9e3FgtVSfGkv9O/p9VlPU2MbeuvqRDfW9fZ7ySnvSh2+2q2mruWxX296qz69tiq/Yv2GxierZ
xo3raq7+yp2K3AEI9nGYQQAGdeK9fXcqSiMwDVu1qjkBmcQka0tMQjxwUGvAVrgrXbt9kLsYGaV7
+3W9/Z7Fx5BpXqVKtlGmI+x8FOwGJAM/p2LbtEn7OdX7WfcVpTsaq7di09frLRsVejbaboaGWuoh
BxrUVksbKUxtWBUose6c6y12PmLWvUV0Bal29pX0S7tfrhrNzTqFvppVTd65arVQrai8zyBStiVV
ED2qtCkFXZgK1YxmImqlyHYss2aakIOxsPsDhWCSDPEyJ4h8L121bpV7L/M3enVPTq5VXINje2LN
1UocB12y7O5psUIr0XRGaqa+z6Oxfu7Nzm60wF2KX2UjZZMa51aGPZ6YTZezZfX4C8e3PsIDBtYG
GmyCiyLrgQBVHsz+EzMHuoGKfx2WBAlquPxmbCNXeHYQuplz2MQOIGTNar1bb3atCTwtcVpzVyvl
adW1j6ylLtiyux9y5Shvvp2Kr8+g+W1XRBsbFZ06RtWqdelHrZYcwAkWB0Wx1DbOwfSpHwmoE+my
mtcj0lAtZEh+FK7Dyt3Lng2rFDWuxe2c1JTZ4D5hci4MNTdOvsaqI1F2zXTuPWvG98XrcUnIGLre
rauswZtV2K6zqPQYRdeww1WLApy1tQOreiLe6EJY3FWFK6u0WD72skS6q0kKw+Soss2dKjXryC1Q
KoOS3ulNi2dZqvLemcrZ1mwLBRYkrbacPqbCQIRCqziTOLexXwXQiHImIFzOQErfOvr0PsPtbDEY
Ag48WsVRkn2+RBLXLQie8nA9hh8+xLHUpuWcfmqzPTbYrr2GrIuVpWKrLPtBAncT7iT/AMzX3bdY
X7zuyWLYWsS9XU+mWlSoJbXYztUxnpq6Mi61L25d2vRa76HqsurYYoNi1+kz3X2vwBWu23O2hcVa
w9DYFGslthuav+e7grB/CNxb1hddZ2HV61ZWkGva9RxfqVUarHXuvxZZYxnq1ALajH3T5mgQubkd
WYUI+xd2OjZo3MwULdSt1Wy7s+zYoo27bNq1yr7lV4La+2wWoKDWnGxh6bBRKrKkK1o4dhx5/HqU
AGms1imjlZ+jMUyr+3+La2xULN17CtlvKi9iF2EJBBH4e0zX2AtIIdTCgYPrmMs09ivWanYu2tld
qy2ENx13Os7MC9rCyqu1ddF2a7a3ReFe/Y2nrNo42dZqBq7IS7d1j6unZ6K7NdTy3YUIez29VLPu
jZ4bG/u7saveLaXVmyk1mqchGIMQ4jOplpGcsYS6WFleMDzLeSpaMjICDGUrE8H02Udc2Ota2m7Y
TaqtfcrZdvBgbBrv4wbAM9RsHmw1etW6jZ0RWBWtb2+iRbrUigatqVqVwGVSbOUa4NGPnT3G10p3
arBzMbtNlhbtpxp2CKX32N6WZq5EOG+EWHkeDpfp66zK4atHj6VZb6cpj694hrsEKLForsouqats
TyZiUqfl7t30VJdpjEXABzkTHgsJzxCwKtAMnAEJmYR4/bzMexTDYSgcyt4CzH7OdT2k+4rCN/dS
4gEGUD4q3VA1972a7lhajrBW1RYESrbtRtxmsqZWak3OyU2cH9arlq3U3vtVlFW1gdgAV8JW9KE2
qiXbdljk/ErFCTAMzGIDmFjMzrvOzVcpflLdqlQ7lmzD82i6+tt7hp62mkHwAZ1ltOpudz23zkO3
YZSyG5rU9G8iqV7FINm3W00ur2tjU2PU1rS6ubWbkvmPXczJRWrc+CrV6qUKYNgY48FDq1X52Zn8
KDAq/tfhfPG6s5pqD21PWl+3tf1KrGZtcjh+Hvf6fbJaDAVMQkQqjx6VcX0Ct1ODSrLXnB4+WAEu
5LXeyNWuwyk7JQWO4SvKkXvYOXFmdhaHTC2WXUNczpo72v8AL7fX8qamNZs2Ud7941U7WwqCjjfa
9VVd3ylU2Nf0X5AmqvJZSYB5VeUsLcg2AblE+YoaXXVsaSxluxWdfU7LaXWreze2NN6qNnfPHc5A
s9RY1oynU11Q77aVVdt4RbN6koNh1Nl7uWPKBp6j8GRHFVKZXQWyBS1liujI4BDtxp2a3j3gWWWM
irsuK/WYtTv3Ia+zqa07dQQMGsD4rtYxQcFsBrGDUOoNbBUPFq31qidbX1Fr7PQ1drWKYPjIzlrG
ICwxVLGwgweIoEJ5E4hfJz4MEJySvkV5JTLYIgGYYRBB4hrYmqkqcED7IsNndT7kZx2W4gesLUrL
faLD/WdKjxtV6q2v27bWtzXyPqrfX6YYqDXcq7NV6ONaxy6Xo9DIh+Yq2DbTh6VLv8sjJbbr1x7d
jZb5c1ggiZg8SiprH9FWNqLWfOccirNVNTFrgLw3FrWwcXYad/DSqZqG3teu2xSEerkr07ZsuJ16
wy8UDO1aVoiWNzsW2wvQ6zodLUqlu51HrbW3XdefMdXDU11htXXN13pWup1traZeo2XoIWlxxyNl
7n52t+lT3Vf2vw3WhBsbHOz0mpRmLEeSifFSoVPw/dShe5HwKr+a2PEn4bNmysMzQa2uH9RM+qsG
ymfmaWayziUu/qF5fYvylGwK47cSz4juCEdiVsIlW0aW9SVbBR7+zDG3bsuu+qMr3brWLdsCw6+y
aGTYNuxtbQ2b6tlePEZDqFyPV9RWb10qvu2fVf8ApgOXMByQvKKp5FyZWWRqWYWaOyUnZ1ctziAO
IEwRFexJ8/sYbbS2sqTMlYwnkEe/9xAYGIAfzVs1Bn1XL1q6BW4T1Mmy4mAkQmCw5Wzz8QOndybn
/Rsc80Z+XqsEa5eVdgZqwAMZrY4KEhdNFU9v1WA2IxzAuJ+/GMQq++AT93cIzWFyp8DOIKywFSLM
IISmD6WRWkFJhpYT0YK0UKqGIHaBQossrA+xeB7qfctiJ2N9vONTSxSytWvdwKNihatXYa+DX10h
2TYNuq8tUhp1GO16rhF1kYOULCMrJbbzZS1aax9Z1pXYrcI9lFtKqaHNVOnYudxC1tNJsss0zTYK
Ai81UEryso1FpaxFsZnaKrwbD+mdgNWja1Qp29h2Or2NiaPVuderpaqjuirXvv2ErGump2DWIgtp
qVZZq3KzeDrovqv2FCJdvljWXuVmHJiM/HXP7g6rbx2J3rGZMvNhcliYp5qPXrestz/Oz+KvJmva
r1/gZuM33JK5WyzZexSDhaSFXU5LrKyVG1ILFY+37vQDteZjcMo3EG0FeZ5WVApeVekO6z5mwr8y
5epwsXZaWk7Fw1AwSrB+X2CzOeNzcj5YHJUNDiFzjkwmTioO1l+pfRFdjLK7amOYo4qrAMpwFUPF
4qWDcnJSHky/LPhNcGleYYtXY3pqLSgWVoyys4YMRDdYis2VAJjV8UwJjM44JWV1eGPFvhMNaE+m
whzknEHiAw+ZnEUTQ0rt6/s/tao69tbVM0Q+MnIGCrBTWTj5kkarM1LuYl9tapvrgvXYaUCtW9mV
uIq9VSA4A1L05+ogXf6ai4NUayBMknjiWY5D2DAG0CQoi+4DMCwviAklvE85FQikCNY0UM09Pz6Y
iqBHcINrbdD6xz9gf+4n3NxPZ11Jw2TZys2HQkNciajlVpKWu1ta+qbClrVS7ctut1l517C7DFLG
V6Ne61NiwJd6rZN6VijZQM1QWqmx2rYFnfE521qPTKLXXiwraoGuurfazOnmNXbxwOPqORTsXlba
3ylBxr11tdT2Gjom77hGD31llFPeV161u1S626XKWaGvUNtRrr09lL63bbaXXV1W2RqUr16fMevA
1n9M7BANGuA+3XYsGwwTqsfVWRRcKdiyDqbKH2VVF1mHpA+Lc8EY4zM/gP5mYTOp16rn0R6VanI9
hOI7mx7lBs2eGRHoCUFuSamUW/by3B8abt6wPt+6a1ftxRrKaVpVmp0cU9Vo7K2+mLfVcVPZYWGW
FnH08YiOwAubA2KgUyaqN1QrMHNOrqMfpfXE/RertY9Fpiq37dtFOx1e/q0BHaHXUyutqZdqU1Iu
y/BKOJ2Wq2kXUZ1KNWV4WAMuVYBvVOBe3E7QAp2z6lmxUEDPpP62rsKyoWJIi2qVUgrUtZg1qyLE
1vRFTME8NeLrmq6/daHXNU58iq5hDBeJhUcSJ5E6TWr2tvtKNQbJUiY8uVIpqLsyVZ1+ybRpp+5N
2o9lsaHa1tpXcQoSEmc8kjLLc3Gu2pWqvwvzJLCytiKlZjrcJRsVmxHw/LCMAyNjI2aOWzu8X+dd
C1ow19RgZJ4JYEsqEwIgBcNLlBGMHACqhael4Ndc4KCVQxEXIqzDUIK6gMJkNSI11CC3dsePceDZ
Y8TP/no//Yn3Pn5+o3rRa11lx0l5X66my2i3Wro2Dnc3E9JNmxXbZrtZ1r9V2DAVtztStS1or0KQ
tT7u2LyOfKhddma9DNm5rXLKgWu52eh2tuICILCtfpzc3qRGuLgGeq4HLIAiMygnEUs0orta5l1x
KTpsbtyqkWbV1kXatVtS8ONnYNaW7Ntp6jUs39nsdRNbappfWFqW8Nil0YF+VaOGvqRDRYqLcxsf
ZVUXp7LPqlqcbdbsbqGv393bLULimymtUOVvb4U9w/RE+Op9Qrr1FVLrUtWwthmzYK6xtENdtHGS
SBh9lxZU6FWuf0qn3ON57G0um38Wr2SgfWM2G+oJ9z6oO0djlDwyNyvL9netVbqyM/iwkTwY4GGG
QG4lr1y1wYabf+IqcoNi6mxO1sibGncNccWovt1Xo7XZrWztdXHdDWDZoYVrYxstvCg1MqpaqVX2
Ixbkdi1LbAGBJzMmfEYqZAAzTsNQ925e73W2PBWQtZ5TjYXGpSCevBFmslbUarcLzYCGDGlllLkM
27SBdtC0GoNZq0pZbs7VdjeCFU5vTgwUtLANLVbzCpnHMCKDT9PxXToZTQ1sLqogXzAJtaFGwNvr
76HALRnbiX8qQZ66LPVsJVrOPO2sVWubLnX5lHYMr8Y1qqtmwGhsVi+TOXCEZnJVOTnyJzcQXNDY
rglUhZHUJll1mY8QkYz4mgqzPTCwPQptvpnqIZyrjX0LLNpTLHGPjJCYmASFE+wQB28+5b7quxN5
YXem0q2WwNa5m2sVV12ZJb4l1PDpesrZ0eu/XZ9m6pxxBGbsNZYWYIikrYwDcuXFi7GcyIbLJZtt
cFsGET4bbBYK8ZYVxsiAjK5yCCeSwnEpVLIbK6ypdpW61x7w7OxLZxFtZS21a6GwAddvnULXerNc
lrNlkZrDsWixVqdz4oT4AVsljutj7SMNez0XYO5LU0h94z17LLLUL7IZQlzNxrLQGD2CZ/EPZn8P
vmpp+s9OlRrFtm2ia3eOuxrb+obK7a7F7O7La61JVawZpy80tU1K0FrO0YKuxTb86+pYStdtLZ85
htdh3Nji+F8StVyzKFoUGsoBXY7mtWBlnlD5F2eGT7NHB0zzrS7+5iZ89Ix+YYKR2/qDrqex2Ejb
OnfLtKiyPS9LEsYLamCDk1Vd7O9brE+XJVtHnZQOVdJwKCVrrcAACf0jWaxj0jxrBeVmtXqsldG1
YtFFfqW7lenee0oer1hzsvAdX5yuxy9enyW7UeqJq7DReu2GGp9ulpX9tVsd+vW6piSx0qkQWO1j
kZhmAPY2I3NJV2O1TKu9tEHbarlNmiwWKzA1sZtdMrHZ63eqK6e6zfJ76s2rvmV0bRCdftIBp349
C8EpbxpNtQe92LkkrmKhmeI5KYYAS3ptgo8OVhsMNiwWiV/1CqBTySEgz0gYFAlPXX217HUr6G36
VQu1bvS4WCMbKlBABrd6+BrmSI1hK/EhDKD9hZ+sT7qqdt7S1hc29RfdfbWOu1k3FM2Nhdix9rVS
nWtRr23LlsW1g1t/ME11B7xg22ca7uLGwJDyYsOMzyKKAORJaZKhnwabQQbWnpryb5ZRWORt41j9
w3nAgYCZBitxJbM9TMLGJkFwS7lyoggqylVOAuVZfVFaBCtq2FrTUYFQQ3LiqhmewuxNVfJtmtJZ
s2NPeWxFP9W2wrsYHC2L7hAZn2Z/KzMzMHldTPHav4Ju21qOJMqoZrNQ16dd9vqWatbXH6aeNqFG
mpaEanjjtKzepuShmu1KW3diu7ZzMieSe+1baGBxHIMAJFhX0qFZVrspTUatOT6tfF/Nfgy3+17N
Dzp5BnpmyxtdRCpB6i70tquzXedgBVXmczOs8tqIvqdhoaLavpFlai6qIxzSr+nwVQBXyODFOYMA
cgA6ZlgKlVKuyhWTNa2jk+vbZUy9sq2a92siWudylz44ePiJprQnVrOaNK14nU7EfX6vVh7zrqYe
7dq07Pzb6G/df9tAVvr7Npq65TNfqLDE+39YzvtSjU2iYXEYlj8PFawYyopJIle3ekTtNgLq2bFz
WVAS421E7uTTat4fYKsNm+yPfaY+yRHtsYEsSoyWrJIrKkAwgZNlSxtjyDsMDgQkZLeMxeTRVmRC
AAWmTEbI6arX2NkVrSna7Qpqsdi3JobyIXLlFSsvZUEXcA1SS0KEEqcDln/5+5bt59wVo+/qJXQu
o2oz9x3/AMwDS1h26DSFDQYVRbUIXLP846j+nbHSvOCIuAM5PLwM2H0lBAxMHCkiN5CpK6/GK0nq
KVc5PMBWd+BHGZIP7DxB4JY4VvALZJAOQJzKznkYzBkBbMQP8QZBPVruRblQOT6ieo6qbSy1RrES
PtEx2ZiZjJMwxKcq2NnrWBhwt/mWD8Y/KrIzXcqm/ZsZrMtOI5azYvLliDidWktYImy2ZmAyvdKp
QU9Pe2Vfs6rPWlDl7VVnNnWPTqByrd2/PXJEJzCcR7OVeq7kBxljWB8xaQwxWMSwHgUecGmgcaiu
mOSgs8Csw1UWvZW1FHYWgtmZ8dYxD9daBdupnVUn0RZ/TqWrLMiGx/JBVy4xyiWeBcCxIDPxwhDI
AGB93HmoQrCH40hWY7dNdXMFuJUnGUcKdDf0tZn+49m2XX9lsRdanKVWLF1/OK0iG1yul2GxKup1
kiLRUDa0yeX3X423cgI7MHaAkEFyPldhWcKhODCSwp2dimV9tepVLn19roRa5oOs61WFjaFBtwGu
Yn1CSrZjWVoW2Wy1nnkYoLFddQPUVAbWML5h4wfFEpJgXA4Ge6EkwLmBJt7XpjR7O/Utp7/T3Kt2
1rL2xGfMFZJ4qJW2TwBHph1r0w0vrFTYUwLlfsKtk7ed12PyO1d2va7L3bG0bDY5OkyqOyZCctkl
uNwOMRWMJ4gjKjkR7oXbPxYVfHIT9g3jBMOSVtABuJDjMycgQgQNg3PkcmMDYgxMjCYKopAwcEho
EELLB8UBGP8Au5EFVZiCc61NgJrZWroYitBUrbCLHusImPHmeBACAFAgWeirRKaw1Z+Cxsuhz7c+
3Mz+DMH4kUmBfIUmxKGI9AAeUapmKe+dXYQu6fg2ioUmZnLy2y5F7Y3BWyVUj49HUCW9kP8AwWM7
lh6WYfcfAOQFAD4USzPDkctgqreVrUj0xBrAipBWnrBBzDFV+OxAi0WA7DMQb1LhkZSFJHXHFuo9
VVuzbWKK3xUyZI+CHlmwkKWOOWSxIOeBRcs2cochwAiIAltjKGck12KY4YqiAIwcEM6sto5DkIDk
+qwOtumkattVw5BZzJlPU22Svr9SkckENjGFwF5Ex76lZ7bQvb9bf2Eu6PsENPQ7E2On1qJy1kjb
FssvtsjKYGIlCerY2pcs6brhdeHILs3o32sbMiMykqrtOCrAxaPbwBbPswsVOUGKxz8lszJhBMr1
i09NK1Zznk8qctGAgUGCizjfU4Tr69V9jtdPUo2AHQ/MNMM5wAM+SSx1urvtrQMGGVnqLxesGMXd
KzYrfYjE9xPuuyuvsE39YLZuUgWXM7h3Jt2UJZiSXIKqrQ1B5/TraysBjiNaAaiHbiqxlUMxEX3Z
xAyYVgxezDKRBDgxmCkWMQ5mTB4BOYvk/wAwKRcg8jGfED5g8gpkA8QCxAJEPiI3lNdnKoxX+lWG
2DHdnPEAAiOQYMzixgWBYEnDECwACIQZZ8TrjH4vP4czP4B7/TBVa1nAralVhb5C0Cypa2RgURcn
Q1xWvabGGexnmfZ+5j1h969S1KKQddAtPZkDRY+e3z8soMIMVeZc5ZB/VxOAyKo1WVXXKgLhlySj
EG5x6JSwwFVT4ET1SZUwrudXAv5maXWfNOequq2NbWq1b9b6b6vaDXaivWvKsVmolTbe1ZpuluGL
BmKEEu3J85FR8+eFTGDJQEiOeTJWRXx+NALFXKq4BNTLysqLGnkkCrnmQtfFl1/US1ckUJzu5EKW
GOXlr6lNde3dE6ssLF1dFd7bsucvmtb7BG2PGtt9bupuaPBj5DKSvWWqTbpapFXXipyOQ6peQFB5
WABNul6rSTmrXYizIHBTLCCpaEzyJVSzxuKDlVj1EEFwgtlQNrgYhXMZcQr5Q8QqFprHUC7G3s07
z6VFC7ti2bDECU1nYt2+r3NA1Kti2aVgUgh0blNTbtobss7RXiTZXxKIhK66W1XI+pZ9huH7ifdV
tNfZnZr9XautZuTmFyYT4DYBYA13LWTsO9huVkZLLIFwa6UsFdQQvxLWHkyYxyxAORAUsHIhAaBA
hB8kZJbAZy0yYQ2VWYWHGOeQWMDHJeeDMYnM45YOVnLIVXsNPX2sK9Siqckjva0MPiZzATFDKOPn
ECEwVmBcQLAuJiYzEYBS6iV54j9AByarWyi6aYOstU1/Taq3ZBF6O6/AJS3x07FQTcu9S/Mz7MzM
rUc9lqhriUnNPebKsWm8FbW9KNQ+Hqfi1biLUAPT8iqCs5s1lsCaSoeHj0BOAEbGfTCqrcQQGv09
JGtGlWttiu0rwLVXWoos2H9Q7lzJXsrWa+5s3ZQX9KpKW2Kwa9rrqKWuvrYXWat1Njj42HkCLWxj
e9mCit8pYQoBUylnAUDJ4rClbqUEVMLlJwHFqshVwiqzjR1PAnWJy2m2EBWveuI6xFgu0tcauy1x
F9bP3DALe/lbP6a2DJYGUBtfes3nFYt1LDudRpVpqaS2bSkPTWh+XrrBNViVSvc2WO3h32mqagWE
lnLAKZ7kUKsxSJjWAbZQRr3aZYwBp8UXkSus8RhWFtqaEYjsM4LGy2qhevNuztaVVOv23e72hfdt
blY6OxgG4mVgpXq9vfrqlq+pRYyzsNYWV69teS7A6/ZNSNvV9Yamwl1dlKhq2dG2NX5xfsnTu0+8
n3dqHa3KnfXVtsmM7GDyDBmHJgUk00MXrUCBS4sW0WKDWG2OaJaEXkCB/NaRyUYAbAPxBQwJnLIY
sIOZnBYMBuQhaZEGZnMTXssllbVtg5zDMTzmrTvtlXXVLP6VQa+EsxrHFXI5AmKCJxEAExAhMFcC
4n7Aewe4CDjGxOJBDuILjBapgYH8OfZmZmfwZmfPV7RtrruUl3/oPgMtTNNuzIPiKOKHZJrOc+3z
MwOAjFiMzTflrdhcG2mslxZx6YnGBDnh5soDz5NREoWsGvz6Yx6WY1WQaWA9MglJZQjl9IEjVtU6
dbLWtVAba3Ex6KX2HTvK3o9Fyjmzqnq6gSq2+i4bHZ6yK9Fi7CUbfF9LVqs2+ys2EckEt8TGBiCR
7E9wDCFlZwHVsvjHIkcCPI5/Cq+GDmA5ALGcuTUX28gcjqq67LRbp0CzeuaM7EloXInW31er3V19
z2aO1ZPlt1AlXY1sL9xTbiy6hCI9dNkCGueptMVqdWLEDJMS2qpvqPhB6jX9baumyokNqLA3IsXa
MUUmxDFtGXAMXAmZynIzUBCB4F5BlXHIkJQzG7ZFbpWpbptVNbS7DtbX3GvYHb2Oy3etC8Y3wjmz
wa9RqFdKjWt4NrujTf0Qli221HRv6zYC6ai8aBulvJYw4mm1lP2xf6vYT7x3qUtawlnJUkzXpUod
Gh0GuGdtC7FYND0W1NbXsUol/YWWK9tTBn8A1KrOGCBgC6YNbQICmCsawsVUgMQ0RkWcVYFRhsqS
YGMVeUCgQkmACa70DSu6y50I4nzK6bbJV1pMr1qag1yiNZYxGOXwAchydmYmt8hcQLAJwgQD2ZAH
qpDcs8mYGTPeM5n7fviYOMwYgZoLWEF09VYGUzP4x7Ne5qLNe71ClgU+tWFbZWOSzAoI7Yet+DE+
3M5Q2KIblhvYgOxlHY2VV2bJYmwEnjCoM9NSOCQ1icBOOYwEwJhZ1q+pbvtSdogiccz0/JqUxqmg
QrK7jr1lXqIetqfToV31k9N3uslFyE7C0B7bKrm2mWhWd+Oqtr2mt+bbFik7lpqIHLmQScmDJgXM
LERLPHqKCDWxFVcfiCD6gwItJ5AEQlOI4qKR6liFQSlPP9tEecwnEu26Khd25Jfd2bDp7o1rd3tK
9qNsBobrIb7RPmrhPnLjBuuCvZOInanHrV3NzsWeqxnqIZZ5AYCfUhqWbvbbO0zWFopJPBKkuuJL
I8/YAsFUGMnE4J9lac2zxWvJhcAKrPKaMSva1qg2vsb1+r0aa47D7lproexrGwOW92D29dWoZLK2
yus2OBULXyhpM1bOUur9aq2lLAyvU2r2NlRo7MWI+ydpbdb16SHQ/ZzE9lPvj/3TFlW1LFGHMqJS
atpc/KUvs7XYhket2Pxk10NxJsEVrGNiiFwoV41vkHiWtZ1UtxdyYqgA2sIfigqEPJZzMLEzBIET
4Rs8Q/kGpOdm86Utrf4+z19p2auvpScq0DXEz4mmISsGSAsCrnLRWAA4sVqUxUqwahn0sz0Dh9Yu
flAD6IBKGY8cRjBwQZxJPHMxgeCP3mZ75kezIgJgdxBa0FwgtQwMDM/gV3rKb7k02Px+HiSMcpmE
4hsUQ3pPXMNzwvmDzMNAhipggGEEwjEJELqB6k5EzJmYxIBJEJabW0NdF3Li9O8Wt84/cwZhZhMh
o/FpXY1b1J6tp1atduy41W0mzU1lKmW7NF9DUvGbBS0JWwZmt6zZGnbTUpYMrAYIOCfJgBwGIhYx
RDyBDlYXJItAVLGEW3JFxJF3GeojwcOKYDJdapr3AQNr4NfuaqY/dUst+xuXECcYQYczlxBInmfv
Z7xZY154gFjjkZXbwZbpzBhYYs21WWbVjBmLT03xxVRyQRrg8RUFY+JrNYrOHGKQC/8ALjxgkqoU
AeRkDhwWnZqabFOyauv6Wki/sut0K+y7TY3HGWJ8TUoV32iS+ucikLzu8MxLTByFAlVZeUXBhs0c
HKVsL9PiVssqOvvIwS7MutS1fs6t07SffVme2Q/EugI/Xte92lbryjjW29sm5Vf07bNiki3QZauw
2aQU2a+RuBUh+SqzFVEYeU4w8if5YWzBgLxZoqsCrAw5AaIPJZTAwELtORaV1ta/B6bbtoFvntt2
+ca1Gd2gUmYEawRWJPHJwwA8HMGR7MZnmZMDsJzYgWeC6kciYuJ4wAxhrM9N8lGBIhwADmDjnMBy
POCCQVy3uIBINYJwRDynmHEwICwgssEF0FyQOpGvqFkq01UbBWuw3Lk3T1HMZi0xOJMwoPAYVYVg
Hjxk8ZyUQkTkWJQzJIzg8jxOSMePBBUCEGdmlt1lTN6erq22uy8G4kz9vGTxxY6IlRa7Xc16jVlg
ew269kA+b7Xdx2emaKxUzLWazeqkDBGtXY4u2dnNdtlIsZrHyYTkDEOTFGZgTJldjJF2AYfl3ny1
TQ6bYbWu4sliHkEnM8mPGBsqLOBq2sGnbapvqNlqi6utV3BaK7XULaCxCQUs4ZWU/twzPSYJmchj
x7CBCpMFaIDdZF2C0tt8MCYXrWNsjLXMxLGAHOvWC1twJ5Nmq4gbaK1bhjAxBqbIatUlTq5L6yEP
qAVWPsrp9StNWwNenX3u1stDsSSSSKyA2YtrlWNL1UHDr77lLqcTJmvqPdFrNMvZqrnvNgEdSVtp
Zgddwa9i2o19nPsrYqt3595a9lnd0nTqLbaFFNpjahubYq16FZsGtk5VPo1nse1N6Ul7FPkiE5i2
WKAHM45gKZd8ngzKEnkFIRFBBJyDww4QTksZgfZqbT1V1X7m3bsaFxuGnWkFXGKpMwoPFuPA5Crg
YEGBPiM4wDE98xmDInmZJ9mc+wwMZ5mTOYJ5+BYQBZieqAeak4r5YSFFMFWZ6RA9JyfSsnpOA3LH
kzJE+GeMqqkrUrj5fAWoQe9eBgp12lTMhsLORSrFqWhpsnptnixGDjjgY+LxkFcZ8ZE5EkMYWAhZ
iFLTkcZyx4mN4gaF8Ric+Mu7BHQtp6Wi9itq16i7llbbORjkJkYIJNqM1S1qld+Q6PZXVwBTR2Vp
Ot2ymb9mk1S2kA7KgWWFoS2VLc9z0qrySYHYD2HEzn2ZmfCnBCrjgc4ACuVBvIj24A9KwHUqeHTc
Fte5QtKisa4SsMQVLKotLFblqAsMr2kY63Y6/AX6uwxcVxbHeG2hJbY1lOMe3HlgmeNmOChqel27
ZVq9bqDtanrd2cw5mJiBTEXEYrUhZSc4gcrNe5LqhVgtXgi01uS1kyKkRXd9bpm9KkalNVN3qX91
u83ezJPJzVS6BsQrk2YRQxaKcMh8Bhi4mti6kaOzUtfY7lOvXtdr6667KkNCOCGU2G9K/m1zamne
lmqQf/nxYdzPvE2/VK9VixZXREwabF9Psb6yCy5+EgN4IzOYVfHICNgRiIGIOQZj2BgFOchSxVQo
LeWcYBzCRkqYmrdZLtZ62FFpPX9azMlGvQu5Z4WolrMLMllq4q77SMClZhrUT0iZ6TT0jPTcQo0K
mANMETJ9oA/B5EwcYmcmGAj2A+ORgbIDCBotgErtbC2EI9hZc1sznXYsuu0arXJGtVPQUn5azPp3
KA1pHrWqA3hbSILp6xITYsC/MWCeu2PUUgMuPUWA1mArDwyFWemoHFhOJJIII5KD/L4mRkKMEgAF
c5XAIM5DDfEt4Ppa+0aKLtlmot7BiF3LQg2GCrf4XadYu1kLYrh3VZTspWGdVLWpGOYAACRBicjn
JMPITiWnpeCjk+g+PQaeg8+Xsg1WwdY5bXEFSicmUnkQjAMjMzMhZvTZY3BYoIJsChbGUixTAKiD
q0NDpkqmka2fX2M2JaIpaqLdKHYRu4tKL2Lhqu2YRtvQcJdrmeqpZBkimeoiGrqmYJZraoZ3sbX0
NnYKfb+s1Xc9U/XbfHEIxMQDEoTLXOWsgmIMq1jBq7iC5QmV69YRutbYs1NbX0Dds6vp61+jSLu+
1dZdnYsusZjNa16bbe32NpCFB1NpNexu36q5dq7T5HBFL5UGXoXQpiL6glrO5ZWU0NmrUv5C+nmK
Urs1NpflrBsZZdlBPsd6n7afdjEdo4tLU2uoS5Fa6xcbtruQCxLFYHzCxWO3IhsT1DOYh9nEiKuZ
xAhOTlVBzATPhnEiLW7lNJiK6a6wXWNhjWmTSWQXbIEawuyVgLYQSWczIxkzzMMYCyzm8FjCCxp6
k9QT1VhdZyWDgRxrIFaZNcIbArYTDTJhOIMmAkHORkCEkweIc5GJmBgCWnIzlB4DE4LZAfJDmB+U
5ERbnw1zFfXYE3uxFoYgoxAQhgOIHjPjImfOWmMTBmRgEGMRGfzzYwWMCHJhfK8jORALDJxC4yCY
RiBgZ7yMBSOIZMPuNWKmIDciV4YVg2Qfi9cArfBcCeaGNSjD5QZOqQTrmCtiTWMemFnFSMJOKCBg
IWADOBD5OTi24qckxgVBZ+RSwwa9rGutgfhAVLLXUYDMzzJ5F8lyiyt1UWEgMeKraBOdySvcsyuz
W051NGqSyNpoxbTPE69ijjaIyOEQlUNzgi0MK9gqfniYLwDrb1FyoVeaPVa9aAACfcXXLu6DjBMA
gl7cEOTMY9gGYPep/ou2DQfj1U9SzY7DT1xtdhs3w8soGZnwAzwDkeJxjIxkKples7Rm06pdwZ6j
hlJEGCpSyuDYt5PsLx5qQuAyMQatkkLfO2U2lyBASJ/89Oe6n3VYPqIU8rOK1vQoqv27HA92J5E8
EEjj4hPsAJhIwpwB5nwifCI5IIaFyZWoK16dzyvUqQYVQbAISTDCPFTAQvY89LjFYJCwYkpDxMFY
IFIyasT0/Ppkz0yZwbODPHsx7OGR4gxPeMwMRFYzkYHMBENimfAZxBnHExggGFXmMzOJkzBMPiDO
BCTnIniNkzEJnvGMjzBOM4gQRTMzzATMwHzkTMJMZ/AacvBzMmEsfYPMQsA7eD5BAJzhszICE5mc
FiA3Pk22Wakk8vAY5aAkMWLQEYBxP5mRiCbGBNjNDbxnzHkX1GLxccY62AO5MZnAAJDVPKK1VKTV
zC0gCqpp6aGcSw9B1jVWBErVSbWDNY+UCkEqrsSz+rwUFzKiJzAh4KOfJghxz4hRMOYjstaXZK7D
Bl2WgvrM/omHUrYtqlTZUclMKyDI9yMQeRA19kgaP3dXTr6f3HpbE0e41bLNvuNVj3vX169hGCcS
nDO7FmzMwCZzCYPCHxKxwbbu4rkhhnI8xGwbGyAORCqABg+IoCxNhUNlr2HMAyPcVPhWIl1TWyzw
PUrhxhKHMVcBSViuDGUOrpxswZ/8/Rl7qfdj1Df1axW2wriLddc3Y1aiuoUAnjCgmDDMzMAMAwA4
xyJgJE5GMwIJyadW66UdPKtSiqV6VlofRStbasu3piYhyClL5BVYLxOYnIGHiYABPeASYH8c2zPJ
gUGFAQyAALCmJxxPE988zBEM98yJieIMEHMxiYxADCTPMJxA8yxgzDyyCZ4EJGQvsOZyMyTMwNAQ
ZhhA08kjIOAQABOQnvBEGPZ4MDBYbFBNgMzkfFC2SGBhsZSWYQnMJJJ8meOIzxDNxJ8e+HmWHmXO
BVWOVTAWI+uMtVapBKzxBgCYjEYjHEUnOQYAMBADzwBeBK78qLAYti4DAzJM5hQrkPS+KzcxPqs6
q5MDjFjhFQ1GemhPFOTKonpvZdZVesNbIxdROKtLAxCsyHkmVZCzMhAAVP6ljE8EWs4ZiF9WxHrf
mpPgORPUZV9RhPVqMKa7RtVWDa9ojJYIjcBW/gXkH17M1dkqC3sV3NewlSzT+TW9nunvnulSlrGx
x5eUJeywBj5UqcgDyVaMfCoSRU2T8I5Nl4i5ngQmUAFLQQaj4BiFq32dLX7FLqbKbKlBCrXhHCD1
ARW4BxkXjFmJ/wDPz/8Asz7x4HtRZSKzu3KTdYZa55+cnM8mcvLDMIxBPeeLQ+/JgAmMmnrr7ZR1
utXEWV6VjCuiiomxFTau5TYcNF8zJJGQSS3sE5GYnICAmcyJzM5GcyZzM9TE9aeqBDdFtGPUBg44
HAwAZKkkHjBCcTkYWzAwIGYPEB8488WJ4NOEYYijA/fHn2EZPugJ9gxMDBwpGDM4i5JAIi+/M5eC
/hSJyAnOFiYGJblhixyHYEOxYFiQYMEr4ABAPEQOAORMZjgEzJwQCDgRsMd7demzVuF1XgggTda5
a9d7dhaqNlHasEeiIaAS+uAXodSVcTxn2qRjJMU/ESYHOSMmvAgyHRmwljZF6pF2A89OtoUs5Vpw
mGAN7iOSylX4ip5W2yJzDxURmyeJY49QMbdFeK6l6pabs2P4KPgV2Cv025mluSMyNyOSziC44ss5
FLHSV2K4IwtldzpqPe4etgDgV8ip9SwqF2DWWQz0dVo2pW0fRsMfSv4rVcsLW5AR22shvM9098EO
ZQmEuPGsSgeEqyDwBIGQQJZYTK1EBCk3MYTU89NI9bADwcZXGJQ2J6isAODKYDBaUfZ26NqnjiKc
FnRTdc2Bs2Cau36g2+C2otdk+xaGr7iffd3DugcgmHwGBDheUbKzkYWJPNoTmYgIE5GYzKqLXNfX
EyqiqucgJQGsahK6g1lKpZtO7sytL+ObFM8CYzMDEAJnD2cRPEzAZ+5AmPZiEez3zxjJEXM/f3nA
mJgkAePhM8YwYp8eTPiyDCczmRPUgfM9TwWUwmsl+M4gk0pPRBPomeiwhpYQ1soCZHp4AHgAEAKB
nMJURmBir4HvVcwYwBkgci2QU8t8q8atq3EOGHLwSTOQAPgD3EEFx5PiNYojW1AHZBj7IVtu/wCY
v65gKST7BYiBHr9Q3V1vjmVTJVcLwBU118X1yBZrjJ11BOs8ZHX2Hj7CZyJnvPEEnxBA4nhi2Qyu
yK1xC+u4i7JBFysFuIPIE8znDGWm7C7JC/MFVN6lmw1i3cUe8LLrVsrCC1mxBiFzyKhwlKKLaxUS
jQsGB4ZHAyo4NaF1sJop1SwsXGXxhsGU61ty67rVT2YTBXEyQVucAXtkbCQ3UGGmp2t1vUssqao+
+D3/ALIhdvAF7lmHmBc017DoWYEBgwJGOCk49mZmZguYA2NBeMAowGQQxjZi+QGxLDybEC5Ny1ql
rs0LXGtqyJRlGvAKq+J9i2l+1n3/AP8AvUdjC6wt4yTPJHtwYVx7ArE09fdZKtCpIFCguBOTGL5b
WrMAaoPbyKIphuopWy3JImBjjMGYgBhmMQAzxMg+zBmJiD3kzMx7M+wDyRmEQzJhJnkTlmZMz5BE
8kkETHsJzBMjJJmZgmAGcWBywj2hFGyoHzfOPs2Opa5ovKMyCesoi3Aw+Tlcj4opBgIgVRFIBZgC
HBIZyx96grDdbjnkjBGa+IZcc6wDsVgHaYz5mfMgz10ML1NP6U4rDSBN48IZqu62422Ui4TYt/qI
0qrrtRfgQjwxWeDAgycchjPprPQBBrdVfXLR9TBNDmMGWAZh8TB4++fsJnzyInItFwQMGKcD/tWz
Dm45FvkW+fWUQFMsMg1XTN1SeuRLNgtPXYItzPFcZLusZ1YCzyb04tYWGXISm9mOiyiqqmtr71ZE
Zla+2pq9RwrBxgHIKnOjsrQ192vZqqPB8gqSSMhVweDQpyOHrd2sD22eXqxMYJGSnGlWsLE+T7oV
8NWrg0rFDIArk5YHkTPM/ZRCqxl8BAZ6casgC9xM5Ktk1mMoKn3itjCGB8z3FrHYmMZTmxTkH/5+
Se5n3/8A+8U8T755yAMeIwx7QTNfUtvlXVrivVpqAQmFOIsZRM/CCSKvJqsWpbbmcmyoBrWaZMMM
4uZjAGJicWM9PE4iBTgoZw8DMOTMGETGIPMHj2eMZzPIGDA0LMSJyAhMwpnj2ATyIIcCD3DzCIR5
8zJE5CcxA5nmNZ5IJOBhXcDlbgsRDZmciYIgLHOYMCEgxcwBRM+WyB8RnFcH0yDnGSAVEIzBxMGC
ds203JcHHIiGwQ7FQh26wPmXJ/8ALeCjcAV3rt4hj2VKV+zUpuNhtQgitjuVoRU01TivIJUeQmYK
yAAVDCqf0cCxMhuRIOWVcsvItSCWp8trkx9eufLPGrdJ+8yRMT3wHEY4h92fC5n7DxBObQWEE3cQ
HDoqVvQB6gNDq6a1zV3UPWVTYdGovDLq2mHXQMmlQQKq62FpCnnD4OMz0zFpJm3Wa206xZZ8vWsN
dc/qAF4xRgKyIVMOZnIURKGYOlQZdsrL2ygQmMHQlcxVwz+9/EAirl29/sCkl7AsUZgExmEeUEIE
I8CYnDIbWOSj1BSOSHzfaFRHQFthMNaDDcgiAuF1nMu17KwQWgyD75/8/A+sz7+/96cQe4e/IijJ
t09utFrdpT1u1aNfq9aqAASvSvsh0q6xsuaw1rEqnIqiT1ZVyJW1VjuWOcQsTMmYMHgFgZmZmZkk
DGMmZOcgwnwATABOIEZBPSABXwK5giEZngQNOPIkLMiZzPEzmDiJggAEzHk1jPEwmcoGJnmeYDiE
ggxWBnLEcgz4DOCmBQIWUT1DCrMOJECrEr5BQEHAmKPCrxhwSAASIcYHwzCljiYCgOK0J+EccA4A
GCQAr6bqydfYxqqZmq16EP8A46lrgT64YHY8s3J0blO1ZWGCTo1kIi187HrC3V+KFy1YFSG8Z9Rz
CzGYhBhVjCrgGxJ8wgNGwWbwQoXH7YbDIyqeJFlWA1biPSuDrVYOs2W17hCpWeICJgmDGBmN4hPg
QNgsfOViuoHpNZWNUlvkxBrKINeoT0FletBRUIFA9hfUts2dCmqnV1bbX2erOuTRroBxE53zdYvb
qOK7q7K8sikPUqgjw1ZacXErZy1noSqxVNzXWjBBJJhAifFUqoZYvgDEsGAfEOSfdKh8ZIM8GKuY
9oMMxiAQzj4WETjkkATknH10WLsupt3LGPJYl9YNzVsSoMGu7FEgqRJXYFNT86wZuuWbkhXjPsD/
ANxPv7/3vgzBlepfZG664H7e0dDXTu9vR2KKjXUmuLbZV11jSnVppj4l9lapu1WsSPiCsAMgDiIH
EzM+PeApnHEwRDifD7OWYPZynLELGBmmWM5EEN4DZmeI5EDOZyAOYSCM5AScMDhygQmHIOCTgQzn
7M+WBmTnIMAmCABmeTAIQJlSAMex0zCCIGUQvmFRArE8LTBRForDZBAMBhBmWg8zyAG8JPdCRCVJ
Vw5yASfBOZkmFWVf2azB9S7CZVjl4MlmrURgpHHIYM0yVFhLOvvyeLVsR6FgjJdinKstlggvSI9b
Q10SxwpV7BLHdolfq1Lp1ALTQo4oF4jiyoVY4Qg4YKUKfCyMwNLEekCoprgrURkaNWRG11y2qYaL
ROLCA4BwIYCYTmeMkYAYcamPAHx++eUDETIMT1IKrGICiF6hNqhwatjYSV7e0hFnY3t4IaoCEnlf
1zuy9fcGVDlCywuxKUqqD0yX2QQUbIqZg1OJzZQLUJFiMSVJrUh2s9OxWE5Al/iDjwohlYOSIozG
clsQDyMzEAJPwziqqzrlrFEZ2YefZmFoo5AIwOczGYMqVsDR61YJr1GwUNWaVtxssXrB9n2D/wC4
n3xo239xodZ11YvSg7VFdLs6Ci17ea6mrbs3a/TVVxKVUWoqIuGGxtUSzcBXcU8m4gnzOMIBmIPc
FMAUANCST4hImRCDMYhgwZ4mPZ4HsWHEAMxMGFSBmAiAwjJzgeoMB4HxGLFuII4rOPkoVjDwAZyI
ijM44hII8wQeCfMC4JEHg+4TMykASBYcwEzIMxxik5IMxiftnxhopM8ZGc8px5EqOQX4v5phQchj
YuYcAOVKlSQoJhXyV8JfU49QY5sR5hxCohBHs5os+YUCu3lCygc3LPSxI11ECIszkFTktgW8edLo
F5KVYLhuIBRyCrY41kZUQuRC5BNlYYtWY4YAqrKQpVlOPBXgjA1iNUJ6TR9cZ+Wry2nYA1ViEHEJ
JOcwDxWwAUO0XXYqKKlgSmKrQJe0wyhDU0GBNlFakjE0thKLj3Oqq1sLEPuI5xXtIN5U/OLyr2y0
4o0ajz6KKTWhi2OgN4WWBrZ8swgorAPytZfaIhvsJ21YwI0D2KanZ1bzMfCF81+4ZJdoB8WDOMXg
IDXFYNLUQg8p7xDPeD766FcnQrIu0nSAEQDMUgj3ewsVBsDr8w4sp2VaD0Ga+oA48/YP/uJ96OR3
SK5gVhF9W+WdXvenRr2XWaHX+hSpwqLbZXYi1oi0sva3O99ewUWwlyR5/bOJ4nEE8DAomBmfuRmB
RMnPuhJIAnuhIgmcTM5QnMHs9885OZ8MGJnHsOCFnLyWBmfAYTzOWTkEZmPLJkcSJxMKEwpiAYg8
wYmcTE8RgcYJIQCFMxBxnmcQxVDMYhGZjwJyEBUjkc/uZliFByxKLyJhinACkQKSeJJVeUIUlxxD
+mqLbWxQLCnxbGoTKdlSDsVxb+TercSTtGHWuMGsCpP9WlEIPwm48m9RVgsCqHgtEPLKs5PxElAY
q8SrEkFyQ2C1lYnrVg+uctY7nDtOIEIGPEU2YVmERgwCYJVeXFVJwGZQSyCGt8soKmkECqzHoXEf
JVENoIImgCKtY1stInprPRCkOMm1RBXa6iuxS3CBJbz4sPBPw5M17yuuLm5euQPmrFL3hp8LBX8+
tZWV7AhV3fCbFNkX0QW2KADuy212AGIxBAOIKgwfi9aqmGVUUHJKZh8Q+5R4ckDEA8GytTZbZXZ6
ljRWdTzCrnM8zxjIjP4BdyEwB8C1bLcat2zFtnKytqssFzzyCYT4a0CeoMNkImVqVTh1Cz7A/wDc
z7woSzt9Xr3tOv09YldVFIcWNKuurpNNDMpalItlvpWAJLdhUW+v1S+uULcxCGEyZkRfMAycKIbR
gsDOWSxEyMAieZgmYOeOJ7/YTM4mBAFmcHOfYCBM5hVccZ7oFYzOYDCywYIIUGEtjLMAoE8TJUgw
kkZxMicp5JwfZ4EPuwJjBxPAgzBBCPK5hIgjNiAggpieQVGZ7h8MVuID8ZlzFsPEFjMznkEtjyYt
jiY5TAy4Yjb2EAXIbw4bwpYKPl6GiU0RUHMv8dloiBGXY0erp6RiCAWWcZ6ZaFCYF8lZwbArYzgR
MVgBkEFhE5vk82mJgTjMNCCIXqUm+kRtkGLs7LALuXQaTllrFIwQXIhIMZPAQmemrqUr4llUfGB8
WGxgI3EFs8AzHkpWwrPUUzi7EVspDGckgsYB7HsYBRCiEWKwsNFs4MSGKIHIIsZRyJiuAS2GWww2
EszeocFYx8qxBAsaMAoyOTKHNdTZCecmZ/pBTxQoFCJHcR0zCPC5wUjPUptfLUFvTt/uKVEyVAsn
LMyYcTLYZNiV1ooZ2IctDWogOBD5gaHBmTNFUctqUsG6+nN3XmyHSvEDPXF5NPsDI7mdxqUW71XB
BxbiAzWEcQP6hrrbj6mvQt3ZMse262PYohZmjFQeJMNOCajBTPTEbwSWhaclhZTBgzHsyIHxAQY2
ACVwGxPecYniDAmfOYWMwIMZOSQMwsQQTMw+4DyQ2cGfGZgz9hxEBz7P3MMAgxj98ifFCZiZQQEw
gTiMe6YxB5HgQmcsQMYQpgJhIhIJBOFxCDkKM8zGBwGUhVaY5MzCMchMuFJDv8JZTxa5FqsY2MpS
a1nINydrFcBrXxyGMFIATAOS9QwDdjob1thBIAYhUJgUAkIGysNmZyfB5tOM4qABggEzi5hCKTdQ
IdoRdm0mw3O/ytpC0ViNUgC1gWDjkKpbDBn5AsmQFWepA7ZLOIfMZRhs4VWKhSZ5WYxASs5BpgME
97AoQlTNmGyxSSlkUVEGpDLPWUUmx5sUL6rfMLChUWZmIDgGA+BgCDwCxyX5MSTKrCreuxYOztXy
A5EF7PPqWCLepnPjri4ZLnjxZp5wjPD5AbEta7PpMWVUZrVcBQVlx12UuUIduKpa8NFiJ6bicbgQ
xwT4xmYMbAgdDGXxkGDz7BASALnWDduri7zCDd1iWfXddgViz7Dwe4nag/MIVrAUBeS49RrA91dN
dm7ZYWZmLWBSWZw1iLP6hgUcfVRJqVi+y4o1pC1z+rY7UqkurYwpiCsmLQYFgBmBkqoGIARCufZx
MAmSCVGAYGZYWBi4ngQkmYbBBIyZiZ8eRFXAzPGcCYAniZnxQAz9yBAJkkgYBOYMQgmcRMiEqSMT
OZkQtBkzwIDD5PunuismPOcZmIBAcnJUH3luQ8gk1shsJJGXsB4fAFrpIVTlnJLBectPMWMi2g1i
arOITWTzxOWD5MYFgi5LBVbr+wfTZu7HDmcFjPJmOM8TjmYacTC9cN1Kk7SCHasyl1xb5e5gNZYm
uhhqWss4hdDOeAymBAQAQUsKk2M0VnMBYzBErU5dTCoMCgzJIGFmCsazE5gxcuAzqSqkqrICvKOU
YIWyRakYhgmGhLLDxyQCqtaY1ioyXo6smuwvQiOrvD7z5DHwB48D2YIgAUrjkxGDmFuURyAbvHr4
BYOSyiK4A2W408vh9VILRGuwTdg+qBPhwa1I4kK64Xg7M2swSwmsKYnKyJS6ktmEgwlRMgj/ALSz
ciWBVTYnFcAgg8VHJSfeCCIDAATypZ3ZUb1A78LEJHn7Dx9YnaJY11VAKqlNcv29ctdsO5z4ZkUl
neM6qGW0qi8ZwJIrR2ZKiWvqrRt4gNaxhtsnK/B9cxaXgOIXecjPecjHIzM5ATBcrrEg67Q0NDS8
9F56Lz0mhraEYGYDOeYC0zFxGHKcfOCB4MwBAcQkmcTBOfkOM5MJEDTkxPMCN5J8TJEwYACGzlQc
5OQISZ5x7oCQPfBiFeRCgQiA5APgYEOQSyuoADcwzEggjiFVVnunpnlnJ8sSRydgACmK2ZV21DbD
qANWstq1oGUoi228GB5heOAAuNnXtsFdxBsFlrfFgEiE1ietSpOyoh2bJyvcejayDXgorBRKwb2B
KnKiwLAzAFuR+KYjKpAOYFMNbAcRFTwBiKBCuDxIgEBAnqoIblyLyIbmYl29gIEW05HBhzbCupgd
VMYuIGUkqVjq5ihbAF4wJYIyRmoAF9cfbZoNnyUpsjaNbR9O4Q0uIZmZgXzg5EDGEEt4BZgZieIA
2QQZrV+psbdytaMBckRGKg2clZ1M9YiKyGn1MFdnyXUnnxLX1tFSuW6pWJWa1VrCHawhtdxPmbQW
JEqIsDU8DUjs4tdIAoYgBnYg2VIrKowZjz7o1uYL3KetiLZ6rOQs+w/Hcztmb5izcFNb33vOLZFh
AZmMNowSWKZMf4KkcmMC7W7KqDsqA1hYlsxVM90zOYmSZ5A8k4mJgiYZpxIKa7MUrCzBhExOImJ4
nGWXKga1mYkNP3/b9ofdkZOCM4mFmRPGSCJ+6gQhIXE+IQqxirmN8MHvyYSc5nLEXE/cAtGJEGTG
YLBYZkGFgsLFopXB95IxkQErMAkHJzksxEC1RGKwhSVOFAYkvlsAz1MQAiceU4lTwACKjLvJi7Ds
dVHTUAXHFkBKNCwLMcHmYeAXY1lsVrHUs1zQVu4Gsyhaq8BEWHBIbJ5eWY4DsQApBXEAbjF8Qq2B
kxQDOBJCgFgpnEMCpIULCEUhlJa0LDcces5hbE+NgAYQoPEsOEFJM9ICEqALFnrGAglHLrxwWU4B
Uwepg14JsoWHaAjbRINxIxax+VuB+WJC63wjXKwCxZyYQsjRqKSfkamjaDLG1r1LK6jxkeJkiHxP
fDiAYhPhciawWmgnkcCGIBCczPEK5Brt4s7NkFSQ4LlzCFI9YLBdxgbCF/K5ZWV8mhWg1CyenYYj
sC9jEhxTZlbCKeItcB1tsZm9VWpc2IbRzfgJxxBHwCjqicyZ9hKo7ed8f/ObhhmLKXStDc7Qq2AW
RFK4tLZyrBspHtyCFwRkhfAAE54nvOBB4nkzyJ7pyzOLMRUBBUTFq8DxFVmLIEQlZ4nieIzKsttb
i1jMwBngQYmcQ+ZicRACITkFiIATPdM4hJaZ4gsZmJ4jHMOcAHBn7AZhBE4iYAmMgYEJXAOYxwSV
MUkwjEAXGCAMZ5HABJ8ZOMcszkywgGElQOMBxDkktiYJCDnLFCElCAYgJLgCErBYwVjSo+q0KEuO
z17gqGbMBKpwck/FFGHsZuLAkMtKm0oQvuDZJ8z44oUxhg4yRAFnGBZxGVQ8mrYexPdg8yMEmoD1
RPWYQ2sSXJhbALMwB8EAzicKGEC4OExzQAsoPrtGuOPUBIzFrUhRXg/DMEn/ALeVKhtgCNstGuye
F7waxi0LBrqGNYmMTJwVBgAhxgqIVUz01BxYs5tgWAw8CDr0OTo0MbNB1ja9qziVhGQqlhK62ss3
nVVIGfaCcEYmVJ/fBA5EAA58iIAYSxiWMILsqLgSTAX5Czg4vAnKsqVAl1JljhlqVgHaxSwNilSC
K6SQ7Vs7Bp654jyBgxgCcYIGJ9gHPcT7ifHYM4WNsO4bCqjWNHPGBREL2G6xFB2HMLZYt4AECieJ
49nieCSQIGUDLEhCSFRAogryQMQDMWsRrgsLk/gyzF1rpDq9k9IKCsVBg4IHkkLBmZxM5hwIWOFO
Q2CScQZhAJAmABAcTOQSc5z7PhwMYzk/CVb3kZGJieYFEUCe6eJls4M5AzPksTDYcANBgDlhiZ74
uGBCiwspmHCoRgo+VVmjZUZRQhYKTyJUTdP/AI6+/TsPyVeDLSQXOBzrKoSFLHlaSZXTZc191db1
almzT5SZySwwPdiKuQFHIquSpnEYAxFUksrAnEBUDmRDY0LEzOR4ABmYEYz02J4KsJQQ2gQvPUYx
nABICl8xvVA9N2BFmQAYpyETEymTeBPWYk2glvWaDXsIShRFrwOOCoGSD7B7iuYPEDYJJngwqOJR
oQBAOUAAhHlkJnpmEWAi3jPVV5hGjalDT5LBfTuB1KzUl1b2OMg5AgDMa9ImHUqAbXqMbXYRg4mW
M4kw4hyZgkciIpOcmM0UwOwCv8Jb4g5IW5oSxgtDCypbBWOI8KVFdobWpKrqYhXBsI4gYgGYDmMI
Mz/5/wD+4n3QT9R4qRzCw2cwDkWEA58Wm4lg8AyMHJGIoMbPsyIXAHqLLLsnlKmHIIA3wha6cQKB
7FQmF1SMzND7SwEWl3HqkwBKyS2SAIV5HOYyiBcgAGNgTkJiYzOExiAiHxCSR4z4MIIGRAfZyzMe
w+ACxgBnEAZAmYPZkQeZhMe6AeRgw+8jIBUAHMPMMVPFGwMHkOTRyUAbJ+JVyGnvZySMNhXXHkty
Zp4BPpqELBbQrAVKCpIVVJjNhv54xZTwOAq8UdUdbl+WGhQ1mjUtdezYuzshcEqJ4acWwuJg5ODM
qJzGOZwGIPLMYZgOITOBMCNOAnBRPgENiyy91HJzCcHkBPUSBnI+Jjgz0hgLyihxBUQeKT4RPUEN
gz/UytXKLSingoJBwIMwDEGJ5B4kxlIg5GcTPdCzMVVxCgM4rjGYcEKyKfJnETxMAjgM+mTDUsNO
CVeIxWKwYvxZ/SrJfSqYt15BRFqUnBzmOghHnIl9PEYGD5JHkE8gfiOMgYLZnujEgNEwYMFs4nJS
htwqWeC4K1M8VmVgzwWgQMsNdDlaKiDXVwsrKQfEPBn2B/7mfemzZX2rbFxgdjKrGVBa0LIo+eXN
TJcPTDtioGzhWpYsLQqItNjQowFziuPcSMweYASaqnDeo0W20T5m4RL7mC33Kp3rTPm2nzkO6gg3
Eg9S5sVUw1kyx/gUM5ySX8DjiD3kggkiYM5CFhkkwHIczzAMTIMYQAmeYWAnmATIEIyVVcMBMMYM
5GCMGcRD7/2L+FVcArgHEJzGGYqQD4nUAZmSJxJnJpwJhY5OQASScq5Ymc1B5OgzkBgCWLkqwVOH
ElRYx5KrAqSDPEzHQcUdOHLi9jc1MCwV+oht2dVvn782bG6E1Sj66kYOCbLlqPqkj1DC0BzCTMTx
FRs8YEEwggKZLrOZnJjOMexVhsMBbitjgtyM4BoBArKTWSAjYChR8ODYoJtJgLsFQmekonA4BEHs
BMwJyAh4mLxEyICSeDGeBMEwgiYUwZjYB5LjkTMNGVYeJBDY9JjAFAAxCBCBCIAZxBhUzj6a+ngc
CJnEL4IbyTmfECPIdcxlMAIFisgHgTxyHkAGFcymiy4jrY+gQzaF+G1r0BB41UBlNYwaWmAfZjiW
bJVpYTDaeKvyCmzLBmlahoa0aMpn2Ac9xPvSs2d49DpOXmsZSy8I9tr2FQTNY4sW4CC0KFbM5hT8
bv8ACTdt1Vl3Z2r1Xd2Cg+4IvnXqYoaCYKGw9LEKliriyEuJ6zKfmFMN1RivTyzayjjXMrzs4sfX
rQlCqVrYwsTi3FYo+EqxJ4z+Ue8gDORAPPwzOPYCI3mZMIUnkuAxhAMwRFUwnMIE8zJU8szJnvA9
yg8nyIAME4I5GDyPInKAnJU44jGVWFgULGDEAaHKsxRghICrzhChmYBVrOOIWwtWAhdF82ywBYQo
VGIBd4vIkkYAJmcM/HABIPwnkonYX1bCKgYC61q9YkQuwBTzfQtoV7KWUqwCtngTPTUTCT1EE5ie
o8+KEmIGhPEllxyZo4bHpklq3U/9pRWgVoKwIQogPk2cT6gIzYx4NgVjCocFeJHmFQQhwHB5A5iq
7QLg5wThpiDAngni4nqNjkDOXGG0RmYFebAgNCAhFoafFjj5CqARmEPFLwcoRmEDGfPvAOJjJJ9R
yRHdpmFTCcRTGUCVnM4nkwQw6wePr2YOldGotUhST7j5Ur4GuR8vUeRegQq0QMY2vrWS6hKJYuJm
EKYaJZrstbMYQUgc4qp5RUVRkiFyJymWEAJP2Bj61Pvhsd1Rdxe5ani7FYVnVnIM1QwgRQeUJGeW
JsHJFroDa7AnM1nBO1ZZQwOYJr0m1lUKMLCAJ4mI1gEzllTEZclOBh4hhZYR8bSxq81gE1oVa5gK
6/SlqNABgqhYAWI9ZqDKoJQiBCQUUHOYSVg5NAcFjBicMkEmMOIHmAkE+QoGCcQtiZnGKCQR8WYu
cHMLCA+ScDnC0CsZ5E/cMZ5MILABYMkkkTCiL4nFrC9YVQ6AKSWsVseDEKhrCGX1DByyxDQgKMFQ
pDTC8h6hClSr8QWd8AEAcOZYEj+aqf8Adq8ja4cEhcKGltC3hRdrt6mIXsIQnOAs9wXABbM54PqZ
hUxUUziFJTMBAhXJ45h4wERnxGbMHJoa2MWmsRVIjAgweSPhnkwHEFvGcx7A6wE5mUA5NjHIqq5+
DGTCqk4QDEasZB4kHIIBg8TInPM8iDyMwYzyxOQmQZg4KDChFLGXWFV5Ng2Nlm5RD5JySAYrFS+c
Ujy9gEUtCxhcErscYtiMDVruX0dYyzRxK9QCVItMDAH1HMYnIdhAz4LF44+DGBKqmsazRuaNpWhj
XWoRVEzOULCGxRGcAhviLEz7B/8AcT71Wtu4FRD2H4sQEAqORUcRiLky/KzKIt1gJyGjNmZbNNnp
vsbDXxSIMTTHGnMB9j2KgsvdyW4iv+4TybhXFYmIWyzjgiKI+eItZIVDGtSYWZQSoYoywIzLhMi1
DLQwIGZZhItZYek09HkWYqwGSoEYQZxx8t4UMoAKZCEy0+mlVruVPjznJYK3kkQNmZ85xFInxZyx
AUcmPwjkFBLQDlCSIYTgqGMDEAmEnADmBfLKnFWAGSGLMQCMDEySWAAV1Chjybm0YLEYBc82ZW4q
9AQluYLOz1ogNtKA8uZ/ur4P/drkiwEklqxOdmAQY/AktWI3gpgTkoYvmHlhFSEBSVyA0AbLB2AU
LMiFxDYYGdia2wta4NfEhRnBMGMqCC04oBgg5IHITi5nEkcBAFyVBIUiYOAZzM985EQNmZWACeIz
LAMhgcAz9gTkcs4U+wie6eDOJMKieQvNiWQqbbSqIxID8m5LggsRSIUAnpmFMTjHQspwioIGJjEk
vgTLEA8CCrDxkq8Q1w8cZyFEOBGAi/ErYAZPUQqSa9S1jVTgNSATYlYcqza+pp3Kel12Dfb6mH7f
ty3Q7gjgKSHaI3IfYQ//AGJ99MR3acjW+BNcVsX11JrRalEZhhruMe1mL2EjMEEA8McFWmPNNbWM
oCiEgB9ichLbsL6haVvxavMLHAbJZAYDCWgsrEZeLkHiUaxstyKmlAVaWI5YV5UH4SQUKlAUBiP6
Ze9TWhZlOuzMKDj0q0PwEjJewFVC+HK8hA2Jt2F7qKqwabQr45BuIhGWOZxwBCGAWtocBuQMyyqB
zD8AQ4gLqpIMAPLJMJYQKogYCE5HqETInImEEDAh45DFjwfCBCrlDDawArYx8LYSrQ8wq5aNhXyt
kJtSvgsUqLGY2jjaRYqJcAmENStSBZcvpqGYMTYcsqxLAgZ+ZJmcLjyAMAiL4hViCAsBAjWAD1Mw
sxnFzBUuOHgLPOAoIGFnhp6cAAhwYA0wDBXAhyoUQjJ+LHEweITk+RGOCrie+ZxAwmQw4mACYAni
eBASZgNAMHxBmFZnEKkwYUerACSuDPE/cjkLgOVzeAcQN49QznA5yCoDFRGtYT1TFs5RgWJOAsOY
+TApgJWBhj1SJmvI45UYBsUQKzxgqH1yIxJmvZQlYs1Uj71fIbyZO5SwZlaIATxQCve4KmwrguQd
vaNWuWYmnaq1uv8A2/8AnvNuzn3nUG7qgf1Lqw61UsGz5yIWAj3BwTDG9nuVXGQZYCSElVKNQlaV
iWXqkZ3cvaqwuzGxsrXXiKROWSuYMKc+FAMBdx4UVpWDdYfW9ZwqfAWT1Ir2M2JW1aRxzVCc2c3I
VVHOsPUCSNp3e4sKqy7qyAW1heWSjv8AEo4x0yVQsdi0VRuTObjWlDI0XySPHmeMhkEGQOPIHAmQ
CFfkxOfAi+Z6bwCBQISBFBEYEzKksGBQEh+KvyTkiWqEJcWcAq2HAL2ParBA2uoXn6n9SyPU6oGo
Kslhc2OzN8KpYvFQzs5cOy1LK7iE5PY45Btink5rxFq869KqjBK24nJUsxrE8EYAbAM4kAVzioPJ
RDZiNaDOTEcHaCsGKmIEgQCBAJgiBcziccTPTM9KcQAyAgK0GYykxYOWCASVxA4nIZJgM8TAwMTz
DWDFCCA4J4sDyEAYz4p4x8MwZmZnKc1EL5nLMMCqYfhnAmIrg5UwK3Ej0wVzAASByPHB/wC4H4uX
ktCMkBFgZjAzCN7xhQfexGc+eIAK5CrMIs9TELsSlc5YFoIPNVHqYGSYpMCklhgqckkiLYcrZiFw
Srup19outllbLbrBbLKkI9GsT/5/he1n3l/7ateE8k/CIbF5EmXW5lZ8mGMYD5IjLkJYVOMlVyUX
ijWIgsvZ4XVY1zE+SVRmiKFmCR5ABmTAQs9Q55s0DjIdQVdstxAS+tYSc1F5cvqMteC3GuGxWi2l
mschFpZ1qQCwMFGsXmyX9KvLB61MruZhYjFq6UU2H+sqHHJhLrS9lVgU3hTTQ4F4BnkxvdmANCxJ
+GLYihmZmVmYsiYUqAT8T24AKKOYnJyxrJikAI2T5YuiKi2VqAWex/WVFrq4n0xYLWZhU4NaVlW9
MhnYT0zy5qHd0ZSbyoQMMLW52QIi2iVg2C1a1D22ACu1YxYWVcHRtalmVK1nOO3hSVJAI5QrMKJ6
iiG4xrYC5LLZBWIqicQQBMQKc8Z7oDicTMAQHEycjM5CMz4UkwK05iBvORFLCMSZk48w1liKmUA5
nHMFcI4wYMORCC04vniMgzlOWYCsyBCCYRieJ5Uk+MzJJUHLK2F5KC2YVzAhxZa4W1sknlPcFHFc
DOckjB8lj7hnHjIJEYExSIBgt/MxyQuYSQGJnNhA4nARQASsH8tg/p2NyIOQB5VAIfc3uHiGfuPd
jPs5ETkDGQzhcRYton/z0N9an3j/AO3j3cDZfiFsF3bDN5TOeUJE95AbLCxYpbkEyyVF0qQl7Ngw
kmNZDkkKTFqijEOfZjMBE/fAEMHKHwQYrQuCocAc8gPks5iKM7FoewZKJYix2tdvEYIHcKQvqhhX
ylJCS52WpOcJrSsEmMjPdhFlp41H3wRcq6qxZq1RcgBuRigklFgAAZsnzAOMLqIXAih4ACxKAG0g
EMxs+GckEHOKpcWKikWAgLcoRS6311qPU8KtjMwdXzUsW0rOVtssRwqtrorsS5dyTWuFKKDliPUZ
jZorSGSp3ta6Gu3glaOKHCKdisFtnM52GLmYUQukNpht8ZYzFhJq8BRFAExmemMKFxxyQBkeICYf
BUCc4BmATE94WYQwjiMzBM4QriBp5EJYAPPiMUEwNGJg8j3TImMFT4zMiEiDjDmEHA8QlhBYTORw
QJnxgzAIXiIciAmDMdSJywUBK7HmXVIwzA3ksZyMDAA4M/f9v3A8gwkwjJXzH96pP3I8pxwcQjzz
xFYAq4IAJOzgVExQSQvGYhjDwYq+APw+ZkxNhknqJdPsph9Tn3j/AO3dwoZiSo5NyHqPOInNEOGY
rUYVICL8CoWDVgNx8IMRf5mcKGtYkOYqs0VAsPmcsTKkiMcgCcYFImAIcQ+SARCfPIQGHOAphPxc
hPeR5A8EWJK2ALO2aizBmVrK3gchi3gEICQY9tnP4M1j+r2OyWQ+wS4A10nNY8AkGFlwkLeeSifF
F5M1iIqr/KA7FwwmEyq+fjLcXIVVEY5PNi3AKK8BcsHZ2ECqIQQwVmPpOFVEItCieoQfQeVqstNb
IbLApUtCi8soApcCvNjNSOFLArcuCzqB8QdkZp6arOaiczA5J5qSPUiVlp6YWKFwwBOCIB5CNkLi
ADGMwrgfCYGxMkziDFCgljkeZg5JEBBmYxUTlPEBEDAwsZnMxmCHAP7e4+DORByczIzyJA8ElZxb
HDxlYCMchCswMB4DymDGIMBzAQTwEVfB5CDPs+HJOYhLWlFM2avTaZmZmcoGEHkccECCqwwa9hg1
sQJWpIqaNQhHoPDUVnuit5JhGZ5BVsSuwyxS1QyYoAnvmDD7iIq4OJ5h9gJgmcTlC05EH7Btd+2n
3gg+pO5JVLLGetKEZ8A2ZJtZoKwoHpqLCwKHmK/ERvTJZGJKrC6mV+bLP7ioWK14ghzkCEGBJxE4
wNmc2yIzAkHyvgnMwBPBA9xBADmEMYMmZ4znPOSTEyAWHEPiEkxbHBYqqqyorZsOWaxVrUjHNbsj
ZYvYffACSut/41YrWr+ngocIoYWrhVdAALGYrYCtSrFC8ucYmY5Q4yrZhVsr5DqMciIULNxXkCgh
yDxBDKoIaDlkOSSV4i0AcrHLl8hagFPBjYzn02ARUMtVeHIAK3Gw2OYFIJCT1UBa3w1jT1MwBiPS
fjXSmGRVZmwOOSijPEGYAmRgeRhiMrhWxORgU5bEBAjcoq5AURjg8iZgzIIMzMDKqAMAQgiBZzUT
n5Fhn7chj3wEMABAwyWOTgwHA+Ekg5ZxkWFSHVhwrgRMDiISphXyR4BgsyCVeKgE8T3zkBCQ05eB
hoEJN3GuullWs21gW7FTKwwZxacMwVz0xMoh9SvBbwhLIz8Q1hM5hZ6mYbTPVIA2GB9cND6LT0cw
qywlTMFSrZg+Kpl4OuTAhnGcCRwhUCYjDz59numYTicvPmfv9gf+4n3vsWV9jTsWrE2WCJqWu29r
rTZbqmqmzHLmeNVfKLXlV5IyOrBl8gEE4h8RD8Qr5FUxOLTAEAGcTMMGBGYmYfAXEK+CRnLRQeXF
RGfJ5+K2JhwYhWMTxUMQ2JjIKuJiYAgdRCzGKpIdUWCxRGDucuGxiK3KNYFRTgbn9TZ9N8+gyjWV
AzAMq+mIjcSbWaKpAGJjzzIh8k/zAzDgAZNnFQHzE5rGZsrgAMTDyIw5PBBASpwzAgg4UA2DmbGg
TynEMXTARgBTzj1ooBGLPDFxDkwDMKosNqT1jGdsFhnySEZg1WItaiLXg8cAJCpVh8R+XYLWicXK
KA2WKAzHA/EZ6RzgLBGVgQuYtS4cspUknxCwyD5zmEQEgDAhGICRM+R7s+ChEBE4nAGJhRMgT9zx
IBOG4men5bxMqYFeemCPRrhDLBmYIgxjMKGKrwVpHrJGSCAxAJmTDh56bcvCwNgcjLzzbZayuw2m
cjFIYY8VA4RcRnxDYTKk5Fzl0+IM4QNbmc4XMNnj1DC5MVzC+IDmZaCxxBYDPTraFWQ1kFWr52KE
WcswkkGH2YnmZ9niYhUziMYxMT7A/wDcz75Fw7RLMHXeuujZ7Lmuxf69t+w10cAhKg61nEVhzsrB
UKMYOPTaGrE9OLX5XAHIweYcAA4hMCxsCAqIWM8zIgyYQRPM8ZzGV2IqgxkwNxjWZUMZkkK0K84A
DGwB8SjgzRwchVikF/VWBrOTZIwgmGERS0srsfZobDWVrYtS8HXBVgoYEzhmADLcQoOYQcqMzwIp
OCczBEGMMVwLIxyF4xmGRyjKYvEQurD4sccy1AFDrji7xkYAIuMcTnwtgWeoWADYIE9RBGuOTYWj
MDA5n9SCoGCr4gmIBGA9gOCz4KsARmOMwu4IpryAK2LK0yBGLLFdifIjERTDnKgmYMzmEAji0ypg
YY5mcDOJECiFcQZmDA2YxJA8TKicoPMwmGBUoDOSAhwRymDgkwFBFOTybJxGLAc4pDRVIjcchpkz
yZnB98wDCMTwAMCe+EjAUk+sgIPK/eGZw5T08SuosXTjK/hRmJ9iIWLOEA5GIQiWPks+SWMyTAPJ
ExmBIK/IXE8CFjC7QJc8VbFlBBa1MTgsCGEEez3+w+PYRP2hgmYDMT7B/wDcT73Gz9QbT3WOnVu1
zZ0trI0d2DR3cjS3CKdXdD2ddtcn0dwxNbeUNq7eRqbxC6m4Z8ntCfKbefk9qDT2p8ntY+U2p8pu
GDT258ns4+T2YdTZA+U3DPktqfJ7U+U24NbbwdTanym1PldrPy23Pldsn5PZg1dkT5XbM+S28jR2
BDrbORrbU+U2wBr7hHye1n5TZAfX2S/y+1gau7k6u3k6l+E09wA6m5ybU2FUauywOrtEDS2lWrV2
Sx0tpbhq7eLdLbFq6u1wGtsz5Xbz8ptx9TZA+V2p8nucvk9skae0J8ttAjW2oNLbnyW0pXT2QG1t
kqNLbg0dggad+Tq7CwUbc9Dcg09qJrbKwU7ZK6O7x+S2ix09gBNXZw+rskDW3IdTcIOltZNF6xq9
qNTumNrbYg1t3PyO+YOv2sjQ2BF0dvI0tqfJbKgam1Pktgj5PbnyeywOps4+T258ltQauwJ8ttwa
e3G09kj5TcMGrtmNq7Ji6m0YNTZA+U2p8ntT5TaJ+U2BDrbRi621PltkBtfZM+X2sfLbUbQ2jBo7
Ig1dkQ6u0QNbanyuyANbaEbV2TBq7Yh1dggau3Bq7UOrsmNp7Yi6m7PltkQ620V+W2odPYcJpbCt
6O1Pl9poNbah1LcDW2MDW2jPldkn5TaEbVvyNfZydXZAGrsmfLbAPy+1gUbU+S2WCamys9DZz8rs
mDX2jPk9ghdXYBOpeYdXbWWa9yrRpbPpnRuK2ddt1v8AI7hK6e1WraW46/J7gT5Lci6G4xbU2lX5
Tci6W5m3V3AG1N4n5Ddi9fuz6fuQaG5Bobc+Q2wF0d0xev3J8hsrPk74dO+fK7Kw17ojJuY1tXce
1tXaJOltQ6e3Pk9ufJbWTo7c+T258nt4Gltz5PcnyW5PktyfJbk+S3M/JbkOluQam6J9i699XbT/
2gAIAQICBj8AfAf/2gAIAQMCBj8AfAf/2gAIAQEBBj8AG3syAjpBZgal81+oPlHuX6g+Ue5fqD5R
7l+oPlHuX6g+Ue5fqD5R7l+oPlHuXxj5R7l+oPlHuX6g+Ue5fqD5Y+5fqD5R7l+oPlj7l+oPlj7l
+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7
l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj
7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPl
j7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oP
lj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+o
Plj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPlj7l+oPkj7l+oPkj7l+oPkj7l+oPkj7l+
oPkj7l+oPkj7l+oPkj7l+qPkj7l+oPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l
+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+rH5I+5fqx+SP
uX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6sfkj7l+rH5I+5fqj5
I+5fqx+SPuX6o+SPuX6o+SPuX6sfkj7l+rH5I+5fqx+SPuX6o+SPuX6sfkj7l+rH5I+5fqj5I+5f
qx+SPuX6sfkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+qPkj7l+rH5I+5fqx+SPuX6o+SP
uX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6o+SPuX6g+SPuX6o+SPuX6o+SPuX6o+SPuX6o+WPuX6g+W
PuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+
WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+WPuX6g+Ue5fqD5R7l+oPlHuXxj5R7l+oPl
HuX6g+Ue5fGPlHuXxj5R7l8Y+Ue5fGPlHuXxj5R7l8Y+UL4x8oXxj5QvjHyhfGPlC+MfKF8Y+UL4
x8oXxj5QvjHyhfEPAL4x4BfEPAL4h4BfEPAL4h4BfEPAL4h4BfEPAL4h4BfEPAL4h4BfEPAL4h4B
fEPAL4h4BfEPAL4h4BfEPAL4h4BfEPAL4h4BfEPAL4h4BfEPAL4h4BfF6Avi9AXxegK/oC+L0BX9
AV/QF8XoCv6Ar+gK/oCv6AjGRcM/YP6B6z/2un+n2jsH9A9Z/wC10/0+0dg/oHrP/a6f6faOwf0D
1n/tdP8AT7R2D+ges/8Aa6f6faOwf0D1n/tdP9PtHYP6B6z/ANrp/p9o7B/QPWf+10/0+0dg/oHr
P/Y91SATReR4Lp2S3FfpKu0uvbIVXieKLSHBU/zc/wBPtHYP6B6z/wBjYER5kjgCujZEQcZGy/O3
S34YUC6IgS/FIavWhI9Usz7h9nqiCn25aT4hNIa4BMemWR7BqLPb/ND/AE+0dg/oHrP/AGMdR7sV
+XDSPxST7szPgKBdMQOX+I7aZZhNLqhhktvdgS0TUZFA5h/8zPL2jsH9A9Z/7FqmuSqfLhkLlOA5
zNT/AMAxDhatr5UNuYaOPBPEuP8AMjy9o7B/QPWf+xTVLuXQNEcZG/csziT/AMI0x34rXtnVDELS
emeR/wAxP9PtHYP6B6z/ANiYj8U5fDAXKE96pFoYD/iDPa6ZjDNeXvBwKPiEJQLg4/5eeXtHYP6B
6z+zLguLPy/znRth5nwHEo7sydzdP3pYch/w7W44r9WYj+EFl0mQ46ii4Oo/eJcrVDq2zfJUpLGJ
/wAuPL2jsH9A9Z/Zh5EADErVGsSSR4/5wAKylYIm5NZHP/jGIcHBeb9PhgLhDb3umdnz/wAtPL2j
sH9A9Z/ZgQ2/i1BzwxUNonVpAqP83MpWCO7P45YZDAf8fr2w0/WvJ+odhQE3CEolwbEf5WeXtHYP
6B6z+zJ5keH+b1P5e3hnL/Ia0nhJaJgmGXuWuBcdjEvLJa9OmGD0f/Jzy9o7B/QPWf2PcqQ1CowP
+C/GXr/zdu88Sf8AItEw4wOS1wPQbHA80xOncNNPFGW7EE3Bv/lB5e0dg/oHrP7Ia49TypEXqVx+
3KbOyH1WzJ/vSDuKkP8A5s33NuvOX+SGMg4OBXnbD6RVhcIbe7SeBz/yg8vaOwf0D1n9kNUaSFQR
QunNzf7ZjIODQgr6UbTiM4kbgZn0yLf5rTFCI/ybzNmkhUxCG19RYUEsuaBBcGx/yY8vb2D+ges/
sgSL/wCD9EeEvX+zpOoRmL1Z1opKGIBduSEoFx/kp5e3sH9A9Z/ZAbYI1y+GL3Vm/wAD6I8T+zZr
qkMAmNIywC1T6dvLGS06RHKQuhoLwJqcCFSkxeJ/yQ8vb2D+ges/shs7uzLRuwkADmCiZREZCkmx
Of8AgfSRxBfuIP7NaQdMcTj3JzSAwxK17gBlgMAO0wmHibhef9OSYCtLhDb3undzwP8AkZ5dg/oH
rP7IbUJBwZWPJTjkfsvIgMjo6pYBqIOI9YpHgcVsxM9UhE7cqvUGjqUJhpRLEfs+xR3fpw0sY+5e
R9U7CgkbjmhKJcGxH+Qnl2D+ges/shtc/Yt0fzFVTykAAnidZyC6OiPpRM5c08IvxNAoazQkDSOb
Mhu/RgbL6QYgvEmL6iRxUNxtP1AH5oFjkf2hM4dO7nmvJ3wdGIOHJCe3LVE5f5AeXYP6B6z+yG3K
REQCamgst3X1S1fdtXijXRHABdUnPiV0xpnKiqb5U9KoGa5/1TjBQyEhI8hW6/KJMCBIPeJNwhIl
mDS4qlsP2hMpsJi0jTuTxrtm8TkhPbL5jEf8eeXYP6B6z+yH5hAiKnBGrVZyX9C6iZHjQeCoG7m9
af8Af0ql+T+krP8A738E1Xxx/gox3p+XtgSJIBng1fFNCLRyyQYPgSMYqJfU1H9X7PVWqUmiKGWC
/KHlw/HIdR5R961SHmS/FPqPpsvLnANgQKhDc2ydGBFiOKb4d0fFH3f8ceXYP6B6z+x+jbkRO5Io
QMlqnIyljqJJVO/9wsuSfHP/AFT3PCvrTX51XDj7kR6/cpkFp6RpwZyKehGchjUW8F0WvxDqQNhY
+r9nvIA17h+7h3qL9UzjhEfyhDl2mG4BKJuCv9x9MToGV4obW907w9P/ABp5dg/oHrP7HGRsASUZ
kVP7jsZWb0L3LM+KyyTDG6GQW7OJAEWvwcrUajE4JwOmVJDBEPpNy9iEcvf+zlPjNgtnbN5dUziS
VGWDsUOX2CJBwbhH6j6QM1TEYckNj6otOwkcftM4f7LGQB5p5zATa25p4lwcR/jHl2D+kes/scIi
88RgP4oki6LVbFVPcnPZUoYDHNelcB3IhqmVGLEMPTdGOBohKNYEYIuNMhQFQk7kdJPL9mzKVAFu
TJpEEhbMz+B/BaTY/aqj9R9NSQqYD2IfTfV0NozPtTio+wz6Zj4ZC4XlfUDXHA+4qO7CxzTzkI80
Nn6eJhqDnclSnBGY3hIitXQl9R+Y/wAJcuFuQ29wxlA2NQxR2NywsCqUkLj/ABTy7B/SPWf2NJkW
AFTwRJONK0YWRbx/1VPesu/3J29DetPf0oPT9+CavqqmJZ7YfxWXFQhTqJIORfA9yAkOqPijEH4q
gcUN4MDaQxBzZb0CQ1JM+Jx8f2bjtv8AFdCJtuRkoEjqiCPT/gnf+nDbgqYjFD6X6u1oyOHNAguD
b7BjvAafxGjcV5f04EduB0jdINeUVqPXP8UqlfTzFtWk9/ZubJ+7JxyKB/8AuRbvC29RZ3jqGBwW
mdTGoOBihONQa/4h5dg/pHrP7GnYvKVTWwyKJN/BVw/fFNQnxTfwRNhwHvTe1fuyoGe+CfE45d6f
Kxv68Vt7UYRfbiJGX3i4BqjPTqhaUeBR02LsDlzRfwOKhuxOkPV04sa+P7Nax8IICgTeJPpTf4R3
9gad0VI/Ev8Aa/VvoFIyP3UJRLg1B7AC8pn4YRqStf1FW+HaHwjnmVvTkaOzYdkSLwnEjxQ5KO4L
SGk+xQnjCQPcaKTXj1DmKqO5t03Ih48eCl9PINRwMjj/AIh5dg/pHt/YwzmWiKkqe6QwJonH7+Kf
03X7+xUHsXtNeygb0djAJnrYY396egBcDuoyeEgJ1BifYnxGGKewllgeSlAFwouXlCh937MlAf4p
3tkNui4H3kfp9+JlCNgaEKO39IPzpliJWiMyjIkz3JfFM3PZucZnsMexkyY8kI5UUd2I6ZO/A/4h
5dg/pHt/YzyI/erI8MlQmiYWHZ+4VfQmsBmsgFwyVK/vZU9624XkZC/NS25l5R42TijZJzVqnFAw
oDVjZP8AhuFMP0TDxHd+1B3toNui4H3kBvBonpkTghKJcGoPZLiSf+EPLsH9I9v7FykaCIJUpy+K
VSV7qJ8T3p/Wv3Hcv3Ky9CYehe3/AFVTbvVPBQkagOa8AS6MQdJi16iyIlceHinIqaDA+KEBX0FZ
m7rQKSlaWRFUJC0g7/tQfqNkNuD4gMV/s/qC0TSEjhw/4Y8uwf0j2/sXHZiamswPQCrHjUBMAzY/
6q/tQPfWic15VKf0Et6l7cPSvdU+Kzl4/wAEXx/fBB8b4dyMhhGVWplQd6nMUIali3BBi74i/ggB
0tdrPm2C1G7NW1E0gCDj/FRkKMXfkixcAvEYgSv6f2pP1X04ZqzAw4hD6T6mX5kaQkcRl/wp5dg/
pHt/YqW4TSIcg48FKcsSTfNUYdyJvxusv34I4k4/6p/aSsn7lmMyHTetMcbfuE1vR6lQcBgtyc4e
YwAvWpwUommqo/gmN8DgQuk1IYxVeXFCUamw/itUYsQWkBlyV+mIMSMxf0ftQ8iAOKIh1RFJHBf7
v6ZxB3p90obW6W34iv8AMM/+EPLsH9I9v7FD6eNZODI5J6snVTQWCaNPQnPvXE/vgqYfvisS9Vxy
VaZYKj802WF1IsCdQAHIE05OgQOoF+4H2LUHBA7m/wBU56cyLOjq+EXN6qULjDmhKJrYhAOeqgOR
QkKv6/2mluytEOiATHZB6jw4KO1G4FICsijtnTs7UsCNUmUd7aOuILjcAZuBQNBux+OPt/4M8uz+
0e39iZbsqiIqFLdl8Ui6KJd/SmNPQnZMmHLJZkXVT6fcmAYYk0RHqV3N8+9cvQh9QQ0RMkh+Dexa
oUk7w49/FanYjArROji+RwTO+rHIIuKg3GWKDVP3ZCncoTf2EFSBseqHt/abbjubvl7RPUMTyQ29
iPkbItI/Ge7BPEPI3kak9/ZLa3A8ZBihvbRJ2yXBwIyQ3ds1+9HI/wDBHl2D+ke39iHX+3i4IYzI
xOHgqOOdFx4VVac6Onw/fNZ+JRz8AmjU8BbxQevpTnHuCsmu/eq0x6qd60x5vzxqobRJiDDV3kko
Bj0uCBUcW9aMZFya0sf9VQ0IxxTg9QIZ8UCT1RaljkqVrR1pkxtp1ehCDsYyoJZG4RYv+0nl7Q17
hwFhxkVt7svzN0VBPwgZALpLTF4G/wBiWzuihscjmmNdsnulFR3tovGXoOX/AAJ5dn9o9v7ES3RW
QpEcT7kZzLmRcklU9A96Z3ejf6KneWA9aNic2f0p75C3qXuDetWb0pie6ypTkPeq+lyyZqW02CJA
qchfg6pzclySowrqjECI5ABu9an7/UfYq0ngRcBa48XiKVxp6U7WFRxODcVJ6vXmOXFEvqF+7kgw
cXLccV0DrjX9/BQmKuGLZ/tHLbgWYfmTwiPepj6cjchE1hKkjxWjegYnEG4PBDd+lm5FR90obX1c
Dtb1nPwy5fYO1MNIfBLIo7G+D5RLSByzCjubctUJVBH+NXsPLsH9I9v7DuyO3Evt7dItniVlmv3K
4G2Cdu9nQdzwP8EABTwWBJ72VfWm9S55rJs6KlOQTkfvlRRjLEgc3NlTbG3CzHHKQbkjEhjcjB8f
FEksQzP7+KFWALujIS0k1ANObHgojHHkEAK3DYsbt604LA/v/FOw1AuDwut6DvHVqiOdf2ikYVm1
OHFGAYSZpjjmnIIYtIZhDcgXEh2NIAjIphYfYLBt6PwS9iP0f1bjbdq4FCUS4NQRb/FLyaZI0AGr
rUCZgV3IXpZxkpbUIkaYGRJ4GI9vYP6R7f2H3J6tMmaP9RTyvdZcRmvhfC+KpfhZUKA8U3iQm8CV
S2atXxTGvoquKL05oZZWYLbfAvmzVQk9AHji9PanzscGNkSRTIZYoxdwwI4oDMu+HBCQwtyUSKQl
QDL/AEWiPcf3yQN3p3hQ00G4dPebA96OGX7QS3ZYWGZW/tzPXuB/cvLnQE6ZBNINL7sgj9Pu1gaw
lhy/wDv7IbfgHp94L/Y/VFmLQJzy/wATTGu6bDJGW5IyN0J7ZMZAuCFLdEfL+oG2RuRFj1R6h2D+
ke39hxtQk8dsMRg+Kxp3L9yVSizTesosPBMBzdO/sV6+Kp6aJx6AqmpzTvZBgHNnqeaEIh6HIXBD
1WiJcQqDZxgUYikgKDMYhR1DBgeOBWkm2eX8FKDsRYcrhCOpibH1jvQEqzjTmtQNbaTxVMuniUYG
kizE3GR7lGYL4E8R+0A2I/dvzK2BI9O9D1o7kKbmmmRIUNdJx6ZA5hVr9rpLtTtH1X004+Y/XCOe
dEPp94tvQDB8R/hS3Z2FhmVLdnTUXbFNEAccVdbjkfoy/wCaHZ/aPb+w0pmlCIkX1HgUTIuTiVi2
aqQwzKoPAKrDmU/sYc6qrE95Pcg/hYKhc5Cqx7yy6QAeAcpzQDM28EQKnID0OmLOe+v8E4cl8BdT
mXBYDVjWXDktEDwfDNu9R3YFpggHEcCmZoM8QajiFqI0mr5Pj4oAUex9SBajagbhv4FdX3aFq/vm
nlhQtUEfvVEYRr01/d1pJcZjLkt3adyWnHLiRz/Z8zN7RGZU5yLkl19J9UKy2yH5IFEjG/a8pCIz
JZPLdgOcggNj6iBm9hUladozItLSBQd6aP00BKI+9Op7ogojVHZa4jF5D5kB9YJ7385kf+UMEY7Y
j5UxXSA6j9V9KSdp3EsQcigXbejScfb9mtE89yMecgmgTuSyj7ym24xgPmPuTbkiYiwt6lWvMqho
mC3CT/8ARlT++HZ/aPb+w3l7ZeO2GOWrEo+nBMCHvmuXBDPxTCg8Ff2qtlYP4qlfQE4YE1zWNMbJ
hTlUr3n3L1AD0LjjX0UU5vpIlFiMNIkfaonVU0NKHEFamYV5cYqAAYuKjPAowkKNXP8AcKQhUg19
qBtKPdf3oHLE04BSZwRhxwUTZx3UQIt6v9FtyJbQWfMGlUcsP2eGzCoiWA4r6jzI9YpF8CowkMwg
O0gde7hEYc1E/VbhECeomwGQTQ2oaZBnYF+9GUBr2cJC8eau+1I9cfahONYyDgo7kD5e6bkWPMLR
ugVsRYoCR/KnSQ45qW1umMoTDEOhubMn2iXibgxyKhumbSIrEVLptjaJ4yTCQ2/6QH9K6t+R/uKr
InxXWX4JgwHBV7GC6i/BUW5/5Mv+aHZ/aPb+wspE9UgYwAu5RL34r1qlMys0XduFFW/ii3uTZZBV
s1gnuuHgma6ybMsvcLJ7NxWWNFrJ0xMiBm4AujpDROGHFS2pPhb0FATFRRuCLXHSH9vNZAh65ii6
rDHHl3IwdjdkYyvIucv3KhGPUCXWk4e2zowekww71Eu5h0ye7j9np70h0wJbmi33i5RAoHJ7DPck
IxGJQ2/pZECV549yG27aixJ9ZURsH86IcyP3lP6PecShWIOGY7Du/S0leW3geSP0m9ScPhf1dnmx
+PbFswrEISgXIvyRjuvLJ1ol+nLFOD0nFVJKaI71S+a6iyqSVcp41TJys1u/+TL/AJ4dn9o/YR3Z
NGsdvpiR96t1w7KKuGZVByVTxWaJZuZRHqqsT6FRuLVVS2NSqDv9qAfjVXTZZrKfWRxYRdRY9Quf
UgJBpuw9yGq1A/qKMZGo8GREi8TJ6KIjUYnlig9TgRcDiniRIXY3bEKDXOIupbk/ikMPhkAiTWJL
haWMZSiAcnFQf2eYCpLntluzLRiHKGxtUjgMIjMqH0gLyjLS6O1Cm9fzMX9yP02/Tf2aHiM1H636
em9t1kB94KO5G0g/ZHegNO9EuJZ80HDHFNgj06JHEI7nmxEcAXcpoPMvYB0dyUGlGoH3vBHY3hUG
jqgZPKvBMKBVICvXgsSqBUDk5Kppkj6Fu/8Aky/54dn9o/YQxvubgMYj1lVcg8WWGSJDn0LgMVTu
YLMnM+5BsbsFUVvUqw4UVaZ4L3BVFOKpXkLrqtepVA5VaYhz6VQUx48aoS3A0hr0kY6mCBGNwgTU
HEetBi8XIbjiEARWQrzwRiaeth7kwq9vaFqF7EHHigxMRgeKOEgSARX92Qan4SOCL9L5YMtVweql
xWqE41jKoI4/tBH6PZOqT9QGMjYKcZsd8sZnJx8K3vqRIxm77J4i7qo07kOncicCFHf2gBvQ/wC8
MYoSj3g4HIqQjEjamH4CWP2hP6iRMR8MBQBNtQERwHYfrfpwIzjWYs/FeXOpFlRXKqrd60isjgE0
ywyC0xoAgO9OO9bg/wDwy/5odn9o/YN1pgfytukOOZVA2VPesScME6yGQHrTB3528F6/dVe8+5Pj
y96OJ4lZNkMe9D0VWZGQA9JRq78fcv4MiHd8S5T25UQDjO5XlmOmUZADcFS383NScWpkgfw1BxdD
M1kMRjdCbOJX9o71T4D8JKLnTRxjb3JyGLAvHNOK5txTnBvGwKBB5Pxo7+hPIDv8LoCBIANHrb92
R2xfbr3HH9n5SHxy6Yo/UzrDaxOMit6Yxm3gG7B9RtDqFNyI+9H3hCUS4Nlrh0yN8jz+3r3ZaY+1
fl7RlHMlkRDYiDg8iU+9IkC0RSI7l5+3h8QVSBPEFNqC6pgnAAFyv/tx/wC8VbvzTmpRyWac1Lst
3/yZf88Oz+0fsGDAkbk3jDJsSiaucVl6UHHoVTQI/wCqLY9yGPc6c0Y5ot71WnCgQQJpw/0RJug1
1cAcOyg8ArEHj/FTkISabVAcG7spagzlTIRcVA8Qi1m9eaDl4hOLAOyqHcUPArUc/wDUIC+Eu+xR
fIkHitLtj43QMg+Hdimlf4XP4Uxwp+zw24/DCgGZKjtj4ryPFSe+uT+PaSKPcf4HlxPRtdLccV60
4w7GId7rXtlomxCAZ5H73Ba9wuTmqLSKk2C0t1OzIw3RpliOaoExwW5/5Mv+aHZ/aP8AiKhv8qlO
VIxDyPAIzNNsUhHIKyp4pzUo9j+jtd+wMGCsslV1ZW7AV9PtyALQevGqYwAe7C6OiRg/h4InaOuj
UUhu7ZFGspBsAyJsQ3gVqjy7keJ9CEhV6NwUXDgmuaBByJObIxFWBZuKBxBDNyshuROoSx/Z47pD
zw4dh4l/8KUx8ZpHmiSXJqeyhutMv9ewwl8Jx4rTuVjaq1XevBBR+p3xW8In1rb3HEduX5kiaDpW
kQH1G8KAiw70IQYGRpEKY3g21t01Yk4o721Il4GDHiYn2dn9o/4d1U0Ra3+US3JMIxrInJaNomP0
4sLPxkqJyUaWth20DlWYqqqrKrAC5XXuR7ljJkZQhp0kCvEP9rZ/oH2OuIlzDonyxGZxFLonbm4y
xoiTtkxdxJkYyizysRgcUC7EOyE26WAfmicaAIZi/cjIVcI/Ty6QfgPr/aqGyLQDnme3NNp7yviD
L4gQvMlFpRscwh9PsEnej8MQbgKMPr9jUYFiJBjzUZfQgbkpCr/d5hat0u1hYLVKcYZVdbX1Ut2O
5sykAZQd4nvW1tsSZRBGkXfFGAjpaOqvMdncP+Iv/lA3JRMjItEDPiVpJENuNoRf05qgdUDKvqVS
U8pADMlde9ANeoXl7Utc2exZlQfYIZwQox0hoBz3DUtqLV3Jk+nSFvNYTAHcG+1sizQjTu+wJ7Mz
CUZByA9GK3hIR3obJFumRErGtPSo7ctuUJyZnbGyYhxkqwAJRlsydq6b+4qUQHEbC60yixiQeWao
1HCZ2lQhRIZ76jVja2S1M0o0nHLj+1O85fqZAmJMz93iiwEpYy+7HgE55lkaph3IAQO7vYRiH0rX
/wD5DcGxA/c+LcP9uCH1P0ZlFi+2SztxUd3eOqbMSbxK1RrE+Eghu7EY0+ODB4+OCNWHABS+nM3j
uioJoCKi6Goatz6YkNwPuW5GTADalQZ6odncP880j8zcwhHDmg/0/hL+CrsFuEgq7Mh3hGG/9PLc
hQ6ZMRRSOxA7UT8MJexUH2IuGlKTdyJiK7m4IjkFuTAZyYx5R+yQvqd04Bgf6iy2oG21Bz3DUpzP
3p+z7MY5kBbcHfTGI8B9g7f4oyI5xAkv/O2G74D/AOVbE/5tJ/tPuP2H3gASWErF8Kp3EzhIivzR
Yp4AgCtDqHvRMRrEaOL+COqJF3B8UIuGDGQzCcVBqP2o+p3JS0tOhwR8sylEYnJadIAxCMYihqOC
JOOC1y7k+yR1UMTZHc3ZvMlySVDQXAFQtEvgnQ80YSDiVihOBphkRxR+o2abn39p7cRwUt7ccQFA
13PuUtsF9j6mLV/m/it3bA6xtHUf7o0HZ3D/ADoylQBHa2GjH8YPV/BGRqTUlexBe1cUGoboQmXe
x+wf/wAcJHvl0hbQP3IS3DzNltxxMJTP9x7K9s5H7sSVGOO9uAd0f9V9Tu8NI/uLeoKGcurxJ+wz
gKI1xjV3JpSqgdUdRAdjj9jZexnKJ7wAtkn/AOluGEuR/cowN9vdB9Y9n2JRmNUYmMiORU5/S784
whuA+XI6omG4HAqidxtQIFE8oAlFiz4EOEdzagdEfvRNvFGO6GAqPH9p5bpqbAcVpfTEl2wriUIw
BnKrmzujOO3pEjQBMQXTyFAumJbwR1S0AB+lOQZHORdA6BxWvatiMkNmR64ekIidmvkVqgSDhIYh
eXI6BGJ1wvqP4ggXrtF4n+VS3tstPyCN2JwkJQr2dw/zmpc8KhO5azBGTkHPBUkX5UQMg4zFlYMr
dyJzqqeCeqEZUlhx7ZgX3JiA5R/1W8Y/eMdqPcpjCENI7mHY5sqzHiqzClGDyMqOy2WhIjbBpT4i
6lt6WMpCRJOSEDXSGHABBy3cvi7kBqPcmqTzRFHiHK2yTeNE53DE2lGVuaA+JDV0v6+9RMSdUZ6r
YMy3yKCWnciSGriPSiRUbsBLvofsaDbc6fEFkc57I+baPuROZ9nYdyZoEYA9BLgexFo6hbxQ1QIq
x4J2PoTgs+f7RObDFGMT5sxhG3ivzC0MIiwTZo7/ANTMCMC2k4lA/SbZZyTKQ0gvZsVE7hA3JVMH
c9lU9nThmCIZMQ4OCE4hgC44oTj3jijE9xyWoUnFSnEdUKTGIei3gf8A+vL/AJ9vs7h/m/XICWEc
Sg3RHGNC6cl8jRO7ZoxuDihc+K0yBIyWrbLx/CvYj6UCR2UcNzTbp0kXNwfBAa78ComBB0EyFMSg
BK0tWF0Z3mbyXxMqyJ73Vy6cpgnJssvBN71/D3rPvRkQ5sLrVpfuKFGcuzYKG5ch3FLAcF1BpcaP
yTOaWetFKEjV8D711fFxBYjnGifbkQ9TEFwozMIzMbSHTJ8mWmXRLIpozBOVj6eyEnrGcSO5ytP3
Y7so/wBu6KIDEFjzC4IQiT5YBAuLIGF6GrYc0ZRDtEGQpd+C1kMSXauFXTgEAUutESA2ZFuC07lc
pcE4t+z/AJm9IRGAxPIIxiTDZwjnzRwxTfFyTOIDxKbehqkfvmpC86Ffwxy4qEmeAPVLCmCmNLB6
NSiZ2WpqZoAy4MoiZ6pDURk9lRGEI6pgOOCO3uUhIseBXTV7NivMMdM41AJ6iOSG4A8PvRwIU/qN
ttG5sStZ9e2ezuH+bGUQJ/Rys0aDhNqqmwZNfQfegDsbowbStMtrdDGvQUIyjuADAwL+pVkYj+aJ
CruAEYMWVd2NMX/igRuwIFjqC1wnES4SDFGEpDuITYdye/cqfv4q1Vb1qtu5X7lWua9Vl+9+5W50
95VPYrOe8pvGnvVb8wmxxJclBr8m9aNebkAUyTuBGPwxckk9y6pAAVIOo25LVoiIgFgIkhAAAgyL
0ZqYFCIk7EUdmyoVq0uQGmLU5hCe2CN2Ba7iQ/mexQkYMcCKdzihQEnBuCQJepEAsPwv7JLVGTSH
3bW8QtRAkMxTvo4QjGRAydx7fUiN2GvA6WNBnEogS8uUmd+n4bZhSEJCWomThseS8uFZSrLGiluz
AgTTSAWA8UK0xHUur4iKktndlKJwrEMPemNAca40KBBzD/6hMWYY0cOhGdYjk6eJfl+yuqREQLk0
CrvajlAGS6Ybku4D2qmzuHvCETtzgScWI9CBFjbseRAAuStO3+bPgWj4o7u4ZSJwwHAOqkQGbOvi
1cR2P2ASJEDQkXA4Iy2pPswh0AW6seajMU1ivd2H6beI8rcsTaMsCm+oaEIF5SJaPIErXKW3ONmh
N5N3JzveXDASIMm7l5Q1T/FJviQ3vpNuUPxg2R+mMtG4PhmAHIycrTCUpSlwKjKQfb3Q4IsJYhb3
0sqgbRnE5dUQR6ezuH+bGMgDE0INQvN2BpjumsMARktsG2uKmRn7USRihSolKqk4BthwUwduNZUo
MlqO3GsQXZVCIdFiQFSZbJUJ7y6zT3KrdUt3plz4r2VVq8qKvsVnRp4BU8aJzXgjRu5Mxc4M1M1p
HfUBGDucQDR1GAYnEuT3I7cWhISByfCqA+odm0E35OhDUC7gGzqURe7I6CRGXxQBo+YBQnEVvZwV
Hd2iBStfQQUJgNI4WY42TCoPwtg+C0yDEUL+i6rSQ/eiIIL4GyEtqbImdCPiNaIGI1CQIcg5Igmp
FL14oaq4McgumkRYZOpAMbvzd0BiLhEC1u4pplgzB1QuPWqUkLg/slq3ZiAzJRh9NHWfxyt3Ine3
DLg7Dw7LqluwQ+oidzTSJjfvR8uI2gaD7xX5u7KQOBNPAIsmPeE4+E4cU3h2Es/AISZv5Th2HZNt
wMDkQtuIrKpIVSAmBQE5EbYpqK8z6SfmMHMRSS8n62LgUc3WvbhEvVwLAIQjENucLQQlH4H1QlmF
t/UfTxDTiHOIOIU9iTAmsJZSFlv7W9ExnHYkC/8AXt9ncP8AN9vmVA5SUq/u6fDBEA3lJS7gfBEk
/fN+AAQ2oVIDE5KlU5uqVTlUDpohzjwTd5Ty7kDguHZS5TswTOjVMPUhkmFMqqpcnBGMQ5NyyIap
v0rGuQRnMExdhX7zexb0Q9BEir4pzXgfatMKxItyyRrjQlPIuCL3rkhKJ6DYYcQpAFxKoxBCeNJC
4s6jKbmJoSK0Q0S1Dw7kDcckX+KKJHw4hSIuhHZjE7kHpNDb3PpY1JLjinl9MaPUHuVfppCoBYr/
ANLKJq7ZJpbG4K04smlDcGVF1SkGsCE3mEcwgY7t7UPcvL+KWYFP2PfdmxwiKlafp/yoZ/eK1TkZ
HMl0/ZwHY5qDggZF3sESUYnmEEHxutUUYlNZDNVXFdQbio7m2ag9JCM96RM8SVmc1GLOxcra+n26
GA1Sb8RQkfjjSQRmSIbwHSR7Vpk4AuMCox1CO4aAKW23XGu2eKl9F9QW2t0tX7s+zzGGtiNWLEgt
2dw/zfa5lRb8SeIjKTtJ8nRjtjpiMMFFo6hInWRhVbpkQ33QK4Ih9MHJYcU5oM0wxTNRWqtR8E7M
F/ojmnPqNEGqM2XvZZ+Kz8VY0WR7k1yeKaNTwBuq+oJxTi4Re+HUmDPm5dMfWSnNByKjKUWjD4QI
3zKcg6pggBhWoRBP74XXLPNRMqiWL25oiNSbxb0heXOJoc6H+KOI4hVFJYAoAVBe4TgC9QC1UDhx
V3i1EZCWl8MCnjY1a7FCQ+IUaxWjdDgOHR+9BjXJ80J49BH/AHVXETHqKDignL1RKBIFIOH4EqAM
RWMXJAwCcQi4iQ7B/iKEZQBiTUEUstMQIgYAN+xplI6YipJR2vozpiKHcxPJapkykbk9nEK/YGqb
IizXXMIIss2uuKjyWSeJ6gtRviFTwXmbu/HaAwNSpQec4AdMgwcotQIbMgNMXMS3UH4qt7HslvTH
TGpPAKe5KuoupbZ+8KIA2cE8WRjJhIDokMD7kdubxlE0OfEIfS/Uy/MtCZx4FD//ACGwGjL9RsJZ
96G3uSff2WE8yMD293+b7TZlQANSSD3BQImdM/u8QpA7hEnIkKXWnaJxNbOUZSOomy6mJyTmQAHN
Ucr/AEC6SwORr6lfuqU9T3e9cuSOktlVMTqzFSVb0Kx9Cx4uQnlIDgZKu5AcL1VJgnEgepUMjyCc
AnvT+WDzKpCIHJHTGPgqM/JX9C+MsFSZqvP29yMgCQISBuGURvfSCYJoYGtfFEb+zu7WEiYm/c60
y3RHJ6etPtbok1Ysake9NIvPAkX/AIoiTCY+EWBbA8UIxLg1ja4wTTAEjiXHetQLuKHhxdOYgtct
66rRKl2EsvBMzaSxzHggYSBre4QmY6oGmTHmEQCHwchz6lLamOrGBevijLYs/wAPe6jzkPEFSGAl
64hROcH9JUBiREegKTYAjwKi2Em9B/Y2W5uHTCIcko7e28diJpHGXErgqhUsjTsoO9XeRRK7lQLg
VTxKqLoVYDFMLIE0yCc4pz8RVS3BZp7qysmZk0ATPdaIAumlfEZLbkaB2PegYh3xVRQXIWg0mA8J
8UduY07kCpf/AOP+tbzJRaJ/EPeEX+CJaQwltlR3NuQlHcjqicwezu/zfb5lQORNuShKZrF3kVOY
BYkt3lUTEiuLqsh4r4xycq/oVD4BfebKyYQfmUw24jxJVIxHIK48E+rgnMj4ovI+KcyVSjWgQ4ov
guDOexmfsd7YIcEUE0qAyNeDrbk1YmJbiEdsgEUccn96GragQQx6RgFGe3E7RIL6CRih5P1k4nCM
qhGRO1vCNyzGnJkfM+k1An/6ZdjwQO5Dc2pcYlj4LQN2JjjE+x1IQ3YufhINH48CtQl1QJF8FoYF
8wi8DJ6ljbiyaohO5b3LpYRkGkLg+KEhVmZ624otQGrISjTciHHFS1i83HyqL2jt4YsZLb0k1Ee6
gUjqJMYl+NVD+r1/sYZEsBclHY2abETU/jPu7LdjlMPBPMNwWmNAqYILguKi2K5rPs1GpwCfE2WZ
FBksuSoECORTEprqvgg9IuKBQ2oRLxFeCmNqLye+APBAipe6i46SAx7uxhZHdkRt7u2OmRo/AoTi
WnGoIzUJ7wiZ7Y064hjLmt3YnImMNiRi5t17duzuH+b7fElAR6iHWqRd8EQCml3K6Mbt2DsKPJAW
LKqHaUUEUOSKPJckeX2Cghzl/wAxUTxHrXoVrP7VHIEinitnQSIkASAt8SkeBXeCrWJHimntRl0N
UDBEDZiGIsGp3JgZwrL4ZnKiJ2/qZAFviGpbYluQ3NR0gyBjUjgpSlsQmC5kIyyLWIXX9Nubek6T
p6ouKmiBaUALiUTYoGG8IyyqFGUJh+BF1p3K5SGChtxnLSBQtQxIRBlelgpAS6YggAhQ/q/Yz/ab
RvXcI9XbxHaZE2sE0fFAk95TxsuKHJcU2VlZVC1Gp7KWsqYKnj2UumF101lngE5vmgOK3Dq62bTw
zW5ORaGo9XHgjoHTG5wbmtudwIV7lDb3Pv8Aw87stER5m7+EG3NPuFoi0BYJlpZ5HBbxwP00v+fb
7O7/ADfZ0ltUiD4J0M+xwmyuiewP9of4BVrpmVlUN9mP9x9JQODj1p0cWuq0c+xfdIFnZS5FAhHg
UBzHtRPAHwR4FR3NktITAoHoxX00zckE89JKIwqPUg+MifmCPKJ+UuiZbcSZarjigDtsHHwki6fb
nOPw01FsitqEpEipqcnWwZgkbkTKXMFfVR2otCM5iIGAotsn8Xv/AM3srdl/+BnuyvaIzKlOZeUi
57HVkTbJU8UWN6dle3gPsgJhzVTXJNZUNVRPgERG2a49kcnVLCNQo6Q/8oxJUPpdmggPzCMZLRc7
ZI9q1E/mH4Bi+alOZeRqSextmOnaBaW6bBD6j6Z9zSPzXvT7wW6MRsS/59vs7h/m+x/VL1dlapwG
fNVDokUR7B2D7Z+yftnsgOfg5QBrUdhwqq2LepWHwlOYhmp4JhZsVI8fchzKP9KlzR1B6iihEBgJ
Fh/bJUz9iick39QRzJ9iyrFDB/etiWJ1U4MV9E8j8Mw1mqF9ZEG25MVqcFFh99j/AJPdVKqVSLqk
W+zx7afZp2VVP+A29iJfT1SHFce11VUsqXQCzRfsou9DinXuTJgCyr6e1iHQAub8u2qEjQOttvvB
j4J7uKFT801BNFvnekIQixGHgjKczKIpF8AtW3EmP4mp4ob31r7e1cQ+9L3Ibe1EQhEMAEQcbr6n
akGbakQcwZwI7O4f5vsH+Y+r7Pej/hntP2D9vbwNfWUHpUWRliC3iFJ8iz5gIZkAn3J8g6i1KVPc
muG/1Uswa+hDmu4o4GlVq0yIs4YgqLRN9RoaBigePsQP73R/uT93oR5xUa/u62i7RgHLUdyyIMqh
+/StyYFJEgjF2BJUTnP/ACe6v2XZUqsu5XV/8O//AAMpm0QT4Lc3ZGsiT2Uum7eCdVv23fkmAc5o
syuPBfEE+ruVjz/0WPiup+9fhPCyozZuqd5Tty5LI9jyD5DFaia4LZLsHihsSP5W58L4S4IfUi9j
zzU4x2/OnOJjGLO3FHd+omNyeEIHpjzW04BjtAhgGi4x5/Y/3TNu6Dtk5xJEvZ2dw/zfZ/rPqQ+y
eyn+CUftHtHbwW13esrTd6+pHiQn5+paQbI8lzC77qfNlTBahSivYOgDgVSo0uhgyD0OJ713yB8F
HNx6ke5DmPWVGv3R/wAy+dvQp1pql6orb/qH+T1oFRXV6q4V1RVV/wDi9yOoa5jSBjVM7nsrTmgc
eCKDFinjVZKpc5BUDBVPa/ZWpwT3OSrQKmOKZPE9ypRaTQm/FMfFOtRHIJzUns2dzkobv3gAY5BP
GpMRIc1tE5t40Wv6ctGQI3GwLrTEczn9ruH+b7X9Z9SHayuiuXZf7NirKysVY1VlZWVlQK324tXL
xVQVUFViWoG7k5tVNjZAPYMix5I1qSixQr90+tHkF3of0EepADB39a9PpRBxMvUyjk49SPcoMLsf
Wh/SD6VEt+NTz1H1RUD/ADj1/wCQWVFUKwVu2/8AgUP/ABctyIbdgHBzAwTJ1VV7GTyvgAq2y7eS
sqW7KVKomWna25TJ/CHXnb200eNSOa49he+a4ZpjUJ3ouVAO0D8NV9ORkBXkvp/LLEARbPgp70ui
Op4NcLTAcScyrfYI4dncP832twOTqIIcszZfaLOysqj0qrBXCqV8Su/ZisVYqx7LKyoHVlbs4rDt
HnMIMSXQ29sgCNq2VatkWTlx3gr4vFl8YPcnEg3GiLCJOFVYHkQvh8CPevhkrSTSkRzoviVZFfEU
wlRXTuKIuAVGlmHgtDfuS6EjFmenNGLfESfEALbjGLkyDB71/YpjYrd22o7jke2ib0po1lmtR9PY
5v2MfHs4KnRsj49z2Bfkb0o8JB1+rADNihLfffnxpHwCEdqEYRGEQyMJ1jIMQcQjubQMtg44x5pu
14+CcUVL5dnFR6demVY36SowaxBbIIGVdJeIy7L/AGTDUDIRJbG47O4f5u0gCMivzPp4Sz6W9ScQ
lt/0Sb1ur7vzj/wq278//wAq+HcPOf8ABfpyP95X6JP9xX6J+Yrc3NuMtqcImQlGT2FiCrlXKuVc
qpKx8Vj4rHxWPisfFWPirHxVl8K+EL4QqRCsrJmW/wD7mJI2xHTpOmpJ9yod0H+of+FdO7ugd3uX
T9TuDuBVPq5/L/FdP1h74n/xKn1viD710/WxPPUmH1W36f8Awr9fZPN/cn8zZPj7lSWwPFPp+nmc
5L9D6c8iyr9HslrdQ96r9BtnlIf+JapfSz2Y4yB1R72davqd6W4fwwOmPjUpuv5yqS3R/f8AwTGe
78w/8Kpu7g+U/wDwqu9uH5fcqz3D3j/wqO5DWZQLh5Bn8P2EuGzXxDxCYyD5OPsXW39RHLTL2LJY
rVUBaRQZdjdjKqbwTmyG79TIbeyKxg4EpfwQhCcYxFAAVXcC+J+QKufBUjKXgFTbPijGW04OD/wR
n9PHRnAlx3IjQXCaQI5hOnFCMVkU0qFHNRG3c54jFRlCkTc8V8abU/Oq+L0KpHgqgHuXSIx7nKJn
ukPg63I/dGzI8fjh2dw/YDf/AKCj9miqQBzVCD3q32K5/wCB9Uf6P/i/4CqkOJ9f+Q3V1dXV1ftu
rq6uqH/gLq6urq6xVirKrDmUx3IvkKpoiUuNAETHbJIwJHuRqIcgPaurdlLFiSrkjnRUNEJEu1K1
K6ZyHIlOZy8StXmSHeVXclJqXUoyc09S5pz4dtFTsqVZMAwQ3JjUfuxNuZTlZK6oSyoU71TGLp7L
2r2ppB8ivjPgngTI4gsmIYhMaLTLuK2t3ASAPIqW3iQ8fYjGVDEsR9ilZGycyJKpElbk5Y7Mg390
Oz+0fsBvf0/Zb4pZCyu3AIExJHFOIsFQkFNMVzCcVCftf7X1P9n/AMX2ZbratLU5lltx8v8AUsXs
5b2KMjcgHx7ckDqL+hDFU8PsTBwkR6e2ysrKyt9mgWCoQsFh9q6urq6v2XV1dX/wLq/2Lq6urqqs
rK3ZUKiurlXV1cq57KkeKrIeKczHii87L4j4L8qJMszZVmYjIUXVInmX7a+K6Sg/eQqGhzTCirXs
NHCeo4FYOifxUAVB3qpqgsWTAOc8FkqnsZa5WFh7Vd+x/sUVQ6dqK3bUJncIyIockxVbZpjZRcud
ujjEIbotO/MdrRGqS1bsu4Kke81TksFuQiLbMi/90Oz+0Jn/AMW48VXcj4hfqRXRMHvVSB3qhf8A
wGlOIORK6ZA8iEwkCeB7KkBdU4jvX6gPIFUJPcqRJVIE96+D0rqiRyVSR3L4/QV8foKpuBP5kW5r
435Ar4/QV+oF+oKqu4F8Y71SYPetOsPk4Tag/MKkh4r44+IT64tzTGYX6gXx+tfqBfGO518RPIFN
XwWPgpwgXlJgzcfsaYd5URIs9s00R34qW3JySQ3K6DUeTdyaQcJ4G+BTG2MVqjUH/B+olnKI8AT7
fszEjpjSt8VsdZJADUv1FQ/pj6lS6pQH7x9iLVlmaqLY0QJDSzFE9xmuPbuxynL1q6p2UH2alXV0
xsowheREQOJojEliCx5hMahUKutT/YuqKgVAsFVlgsFgsO2yqKKkX5J6MnamaZrr3r+CenfRW9Cs
uCcFVXLggJBizimCsfBWVz4JwSmZ0wDL3p3ZfErlUKYzLeCqSs1kjub0SIH4TxQ3fpgdwH4o5JkM
HVHp2UTr2J8kxxxXtWmyJIqrl8kCRSMQNQFKLpDyxKeZfh2vKybD7DYrURT1qocIMOayV1dWTgEq
oI59miLNeodVN8u2siW4qp7MpZqoeOazCG5tGn3o4ELbO0agdcDcH7OqRYBfyiwW7V/yJf8APt9g
AkQPLjY8ZKsi+br4z4qk5DvVZy8VTcl4pvMlwqv1D4r9STc0xnI96ruS8V+pLxXxyPeqyKu6vUKh
VCnMin1FN5hA5r4z4ptZpxKpuS8V+oX5r4zxqvjJHNO7rpTuXXxnxVSmWf2Kqh8VQLirqo7/ALFK
oNU5KgPgqhOzBMAXsmkCCqutJBonw7OPYwo0TKuQQGkutUdstZz0+tTnKLCDE1BupRYljditUgQC
aOO3QLm/AIbko0wyHEqG7GumVTmnGKHEVUB/MeyO3EVNSgxeYxzTH4TcfbohwW8f/wAn/wAI+zPb
hWUmYd62TpHQz1ykSoA0kAA3FlX4ceKEhy7BwKCEQhlh7k47N8MWG5J+8oyMv6eKYkAM+o19SOok
NZmqoyGrcgC5BYOBh3oNtzMsSKi6kfL3IsLgPdr9yMoichm3oKeQ3IjMhR/MYGxJtxLIREzX7zUR
EZGWnEWPAIMTIGtaISEhqjIaXbA4MVuT3AIw1SJlqizauaEYywda5kgcGHrQhET1km5DZoy/3G3K
zREqg3Zapbm3EaRIGTEF8ApQA2jEuC8YuhEGJGJpRBjEcaI6/qNrb5hz6E3m7ZYs4Lo+TuRkIfFO
3oQjukEysY/6Ia/qYQEqgUJPuXlzaUi5iQxpmpDz4wnGoBoPFHbJidNp0EZcQ6HlSE5CsohhQLUN
0GTsxAF192UcDT2IgShUWLDuUYgxqHaJfxyR8vU9aReoR0yBbHUjGB1SFTGNfUEdYIAuahCRhGYF
CCTVap/TR6siaI6dvxJRidqLZi/rWqMYRliDX1qER5UJCTgkgVIxeiMd7yumIk0ZxBIOLIb0B0ux
AkDIFOAwIaqEBOOm3UwbvRhrgNJrMEsWyUI6Tri+qQD6vEKWjdnEhtIIiA/cEJSJMyaAW50TNqNi
UCaP+KlO9eZACQBqzktjR1qMY9RaLA0UIynHqLHD0LSD58a/CbNmy0nbMThVMxBF6i6IkKYvdQ8s
ym7kg0MVqIjKJLMLjmowG5F4n4QasiDuxsVCe3vRlqrIRuEZA7ZjUAVei1kQHe1EWIFbuw8US0WG
DuT3YqNAcxZlqg0ibgrSYwIzqEQIRbNvYmaIfgi8QQBVRMIaQ12d1Ibx0ECkgRQ8QvK25PEUJzZP
jn2UVOo54Jz2cOyoRIBIFSyY6aYZBaelhkE8i0eCqTQIuSfwlMRXAsjCWXxNVRiAWjbio6vyzG2L
oz1AyAqD61GX009cj8QBeijO9tQJQP05jKWIBTgU8UwWCJ3BTABX08CWWqBeLs7qUoAYjST0l1qM
el/BCUCxCaQbdF4n2IyNhxKJ09OJRlGAMBRyVHb2gAQ+pk+3AkOzmgUzfcOyRI4fFC3YJG3lx9cl
0nxVSxGCofSq1QIcPzRMCCRcC/guOKJINL1VGKM4DUBdjbuVxTiuGaoU0QZHKNSiN0SewAYV4urg
EWcq9ESag5EOnFAMyAmqWu1kCxD8FrO3IRlaTFj6E7EcwV0Alr5LVIGIFHLXQMZmRxDWRnu7zTdh
tiLoEyNMgH9aExuTMqdBhT5lMS2xEEjTGUaketGWzGUds2GkqBkTHXCvmCmo5J/ptmJJD6nYP3lA
TAjA3kJCRA5BEQ+pMt2JpHQWJTyZzeov3J2DcVUDkyakYi8m9yB1a5sxFapo7Z14PQLo2iDYkn/V
PEHTxZ08hQ43QZ9WJYMyDyYYUTCenM0RidxgcHQA3Isa3dNqGk8UTCTln1ATIPfpRG6N0AAkmMTh
zRcboetWDDM0UhGczPbiZSi7BuelAQhuTArrM9IJ5aStY2iIkHSSXfxCeO1KRiKRDuSctIR25fTG
W4DY6gYi7/DbvR2obW23w6yQ9f62Q3vpdO+RQ+YQdL5aZBdPkQIDSjqdj4qIhLYkTmQD4OFPa3/q
PJcGQDanBOBwUPzZbm0SCQYxiBSlQK9hkcFqlYVPEogihUtg/DKsTxwTG8aIyiCTqowwC29USGJq
RmiTYVUt0i1uzWMb881pxj6vthbv/mf/AAj7bR5A5cUIiyMc7Jz3oEClCewy7gm8DxTW/evZ9Xof
UZFxWvgvzNvbJY/EHIcXLIy2YGch+H3IMWkDoMCD096LF4xoZRduJCbaiYxFiS55oSjNwzygxcsK
hgtctoyjU9QIAzdkdyG0BEj9KRMg4HHxQkPgYAvVUv8Ahw4IM4N1WQqdL5MoyMh0yiWAOBdbvk7g
kJTJ1B2qdSJ3iXowg3eXT7e1Pcp9wOe9nWjcjKEwPhJMZDmChHS4+8to/wAoi1qAlDbMBpHUXvTB
S29qAMQwjdgcVDd3NkRhGsgTI+g0TdUBLbEyIMACKZLej9SDKMXEWOAPxHSpw/8A8fuad6JEZu51
DFtXsXmz+oEpydtv4p1o5yUpDTMgWkHHNgQpfdk1Gb2pwSAGBlFgRzGKhKZOgULFqBHREMcdQcji
HWiE9JJc4+hadR0vUswQ1RDjqMs1uT2x5c4bUJdMjpkZ8DqQvWgIdwe5fmxJAqAGwUhs7ktiGMYM
HD/iAdDd3fqZk6mMZlzJ7sUAdmc5AAfFpvjgo7mxuSIlXyyD0v8AzcEQJS7w/uR0tHVSQPruqTJP
GKI0uJXBiSGWkbA3NywrISyDclpmdMo/FFqvxKLSckvpJjUeKjPcpGVATQOeKAM6UqOpR3PL3N0S
LBpkd9kd8b5AAJjtmL2wd0CC0dohhe/NQ3AYnckLSAAOVYAMoxLPEsQLUQawJqt4bMtw7MQNWmTC
Ljmox3STEjolIAk95qhKIpjIUqpCcS8cSpfUz0be3G2ojWe5bmoNprqF/wDmR0kxb8VFqEtMh3q9
SbUTREicgH9S8ubgxu7xPgaqUSQNdNUhFhzM7BS2fiL1MWn8qnBztjb+CMzGPjqZQ1z8sxoSzju0
ptuZlpoSIsCM0dz4hFqPnwujt7m0YykHc34XU4tSTYP/AKIAR5lq+lCQF8WYqJNA1VOMNzVrNDHC
KYJ5eATkEcM1kMlxVFU1yCoFcoE05rzImPlgYSESe7FE7ktP4XFEJbUo7xFZCEDTm6kN3bBJ+EAK
EYbcZAjriACO9f7r6rTtbZbRt7Zr4ABCW1v6nNds/GBwookCU5MxDELVtbUfJsYyDnxXmRMdgRi7
Eu57yi0tUDRsPWgYSjH+UAk+KEg8Qbi7+KEnGj8Soa/y1HoQJFSWdz6kaahyoqRNcg6eQMnLMxTR
iKZmVD3RWvyTDbH3iCQjtxjrBopb23EnbBaXAhCUCRIVBCbSPNF6lj4JjIgZCi1TkRizlebujlE5
cVERiQRbTZTkSNHllo4/FHsBDfpxoRxkhHd2BuEfzMjOO3oc/ACaI22zYVWrc3YkkUBmAEdG7tkH
DU6aUoDOWrTRHyqVuTRkYGbtiLLXPcaYsIsX8UDHWTmWA8E5lOMzjTSjHWZy4Fh6l0xYA0BKeBlG
WcU8iScSSgZgkHAIk7cjLAagylGezqBtUEr8nZ22OMgtUYREv6aIbUIwkZWAgBVHUBtglyHYehCE
tyBHCJLKTbcJwJB1ANXkhq0wiPu6QyeOmMcREBk8XmHchg3qQMQInKLe5dMjCJwBUZeZpMbU96cz
JbL3BCUN3qiHYmrKI+vG5GB+GZcBwpbsImcgKkkgo7kt4xjKrHDghL6Mz3pk1iyMJjSYmoNwqmhU
gWriUxrI5Kpqhq3BF8ANRdX1B3IIYFEbQbEPT1ppHSCWc2UZ6jOJoz48FrG0TGQ6RY80CIa5EOQB
Zs0Jb20TuF9UGcoHcBi5o8bN3LbO3WF4tQJwCRKjEuPBTImfM6WlLjgwC8kb8IE9Lm6G7KcukNqb
pyRjubhmM6LVAmMXbVKz5OvKO6BIsCLivFGMNuRYuJCN/FQlP6ee3qAEpaQGzQnuxB23pIwJqpy2
xtVBEekA24qU92Y+l24/CZO0u9T2oT8yMKCQsR2CIww4lMMDXn2EYsCDyXRR7yxK1XGZKcuOIK0k
t/Nh3qIIY48+yuNGQ9PL7Y5Fbh//ACf/AAj7Ra5ojLuHd2tEsDiLnkn08nNU1eR9icdj518Oz6gi
XUZuYgYMFqhKR6iJjEZLZjEOBIsZRAl8WK3pbY0wo5bIcFD6m0ZsAKP1WojKcCIyOkS4i6lvbUOm
LBiWJ0irL6rbjuxlutIDbesiWYJ9zbYlyXOCnCW1OR+4Yyb1hfBuCIFyQS/ghOcJwlOInGzGL38F
5X0s5SE5aTtyjWVHDHuTEscggdzpP4gKOiY3s4xqpeRH8zcDSLd6O7uxM5EMTKr9xRMj0i4AZRO7
PTH7obBTjt7knLaTJmfG2C3Ogb0pBovTv5KQ1anADWq9i6JjETBjKBa7EPRaTIxkWBIsBkn+OX4m
9q6No7m2zVi55URA2JCRsWJAUTtkmW4R5jCkaP8Auy3tuVd2UJAUar0Q3I7M9JuTQMvzSINcGTOF
LzNuMwC8TE3DZoA/SbkSJPMxLx05KR+l25bUGYRkSXkjAxY3N/UpkQJj3qMBAggWfNAzDEEgj0p3
0/iC0BpE2AuF5cX1YRo/rRgZS1C8W/ihMPONpRJDd9VGZ2IhqmoqPFapxjGDkxiBXqzLqTMxdlCr
OEZEgSnLVCxJUT9RvSM30wArzUIQLgl+SnX7y3R/WpbchpAZolbEZSAJjjwKmWOjUSJNQ1UduYkJ
uXohv7cNcJNQMTTMIjf2JQ3JQLao1LoSjsGQEut3HTzCiNqMtmT9UQ5B5klOISnEGoLkMhq2DLcN
zqYIHagdiMhUE6nOalCL6syKMozEHMS72UxODTBpEIQ3QDIxuCJNqClAw17kjqD4Ba4QgQzSBaJH
EOjvHQdsCpJBqjH6n6ja24yJMKeYeVLIT2N8T03iIEKBBcHEUqtZk74LTCOncjcu4PcqDUMSRZAO
67iiGtinPijuTDR+6DinAotMASTgF+YXkfujDmmw7M0x6YRrOWQQ0BoGkeShPbgBTqIDkqctzZlM
HNwy0mOk4MdJ7yEQAARj8RZPGA1VB00K8vcae3qJ0amqiIxIMTUAqpfhinj4E0QlKMog3kzhNGTD
MoylJyKAAO6jLe25VNFCOxq1n4omzKtDwohEPLGt1tR3IuJBzHhZERBeRfVwQYuBgahGO+Dt7uAB
OmXJSMZkwdhEe1EbUIxfEuUY+ZpEnJAAF1qF8QnFCFp3aTwOBT44J1qlZ6ZrcgS4GzIgf3w7BufU
bsoPACMYhyalflTM44OCFojQnErzQCI4kV9F15cSZRxmImncpNuw0xN6gox84Hca2CYRfxqhUgk2
Y0RlE64xuSNI9Klu/VQ1QZowwfmm+l2jtAmo1OFokXIOC1CLjwQlIeWDQFnBRjuzIkKuBRNtETh+
KXSmjEMKmT9KIhFphSiCDKBcizhbMZbQ2tsggiN3R3dRhLaLxjgfFGcjX8JuhLbgZyJ06BQuowlC
W3uEuY8OYXl+Vqn/ADR9pUNo7I1k9bRov9tsDy5xkxPwoPv7MJbkX6mkWXT9VFrmWks6E5zEogCI
MbFluziRIFqjkpkgAGy2P6ifQt6G7uCMyzA4qBiQQWrggp/UTmYTqSXoSm09P3eSP5MmlZRlLfEJ
kPoIsgNg+cGrKy876gARkWFXLpodUsE8okLpcsgdJYFxzRlL6mcZj7tz4oz1y1GjuXI4qUNuUJTJ
6YkEzHeh5snYuNVWdGP1UxJiNAAZgoPWqlEu3S7UPJa9GPxEklCAnf8AFQKY1RqHkXpVR2YzE4A6
YxejYqUoxGuMekihBTbx3HiKaZFuN1JpSlABow3DqYcHQi7shMl4iskAC8SX0nBSIDRFqJ0ZM5v3
oaotRjxVi+SYWHpTof3IDORR2pRaWBwR2n6bx9ypUrXuVOAQPchnY9329z/zD/yx+1TkOZQbCncr
rSK58XwT4m57JNerOtn6XdJ3IfUl570j1aidIpkEQ9AtRwP8EFvTi2t8cHiFPc25iU5hycya1Q25
bWuYJlKQNKl1PeYQJoY8AF/tSNIjo0zFfgDekryZ/mC9CX1NWS8ptxi9HJi+KG5PZMQa6hLSKCjR
GK1fTbAltRGmO58Unln4IbZfbiRWlS2JUusAaQIvzWzPflLe0x0Si9dAsAVsQ+hhKE4Ehz8KiZ7c
Izh1PQOTyRl9VEzjJ5QgZPVGW3txgJPQ9VDUCuS2/rNmQjMEHd2wa6s1GMCZ7hAMp0aJUxvkjdIa
LAV5IjcqBUBERBINmOCnDciJFgYg3v7kdzyyIB6yBZnQ1/EDQ8UdwR6nc8RmvK39okSL6sarehCB
mJl9qUjQVKMzOY3ZAnSJFqojc3NWbpo7sdAHwkAsjDc+pdnIBRA+omACXixACGzr0QlICRBanNH6
cbs50eTGiPkEmB+Nz956stf04k7PIZlaYx3NBvEPhmtYEolquDRSBmNyU3qTV2ujCUmBNNJqUDty
0mDsSTqfihvmcZTD3JJyUtMD13kMHxZND6iTO7A4r6PbhukmUA5DCrt1Mh9NuRD7Ug843lSiAO7p
m7GL4IEmeiMmBBemdFHc3ZTEwHAOfBHdO08SaFm9SE9uJeOIBK3AZka6nBTJ3CxDXoiTJ5Zpogyi
O9kOsxmKGByUJxDtQlCX0k5QJHUDK/JkDuGsAzvU+K0vqiLalp39rUf5SyH+3iNqJuTKqidcZPV4
8VHVISFnJsFohKO5DGQoEYx22m76waeCA3dImLyYajzKAlXlROAxtdEbJ06ixYupQc6ZAuHo6jMz
jYSoc8FpJiDAVwRhtGGnaaReV3QiAHNtAHrUdIADPImQBdEjdh5YHwmTn0LTK7oKRNnRhECZbuBQ
MpEtQBAfDEltRsjtbAaQpKZ+J0Sak9nDioxFZEsAFD6GMhCIaX1EyWf+Va9rdgPLIeAL+C6dXmkt
EAeleTtwl5ppIyIbitIi5F3UohgCFIb406h0yIT7UmlV9JYFS2puxJWjRKlzkgdMtBxTOWyqmjAG
RsNN1pI0EYAMtcDKZwKENB1i8iiZdMRUkqP1W8Xqb2DLXENCI0w5KlCuOSO6Q+ER7VJ6ICNWF1Va
src1riOoXCOBCA3i4FpYozPwgOya0XoFN7y+nkTj9/b7Dub0dUoxYVsjGu3GJ6R8RI70I7G4wxe6
1Hc3DLN2p4rWNYld3/8AmR17bSJqaP6XRls7RO61JGQiObBEnd2wwJYSq3Jlp1GTUpdOfpZ+WzyM
gWKj+RphHBi3pUN2UdBlgGRAnAyFySCURCcBJvxBeTt6TEsNdSUZz3JzMhXUaDkyLhwA+KOqLhga
qe5tbZcBxOtFKW1MxMrqEN2TjbL2qtucxHrAAdgj1x8FLVONMg6l5ZYjCQAUpbWmQs4a6G4JAx1O
y3fqjuRALyaroS1CgZzUqOxsmI3P/q7rAlEeaJagXoFKO0dZ3GGjB0ZbMdO6Ri1FHa35DVFzF2F+
SlsbrSIuRZQ+nG5tzjBmIFhkVp8gnctrHwOpCUjod9HNQ6ZnqA9KDnTQVQN+k9m3H6ieiGp3OalD
6eMdzbjUGOSHnbXSLgZ4XdNu7AYGhOHNmRMI6SA1LAnFap75Ycl+Vqntj4iGdkdzb32MhaTA+CnI
z1SGFnOS1mWgn7pUZay0TUsiY7kWkBV2U9icB5QfTJzVkB5EGqaEn0ph9NcPUonb+lh5gNDcujDQ
Ilmq6IhuDUKkUBoj9XtyMRzrLkh/uTqJIDO1022SBKwuE8okFnZiC3JG7SoH4KTckZZn1dhAFY/C
eaGoPHHkiIVD05JzN5DAG3ctJkZCOBWt9O3t/wDekgcWQ0xcyrHgtW51FFhaqI7/ALe5/wCYfUPt
Dh1e5MyL8/BPL981phjjkFH/AG5kwruTdiKsEQd2uz0ykWrIAOtycQJ7sJRkZYQj8O3tx4l3KgDc
RAPMXRHDsnKIbcGnX/MR/BQmNsmH4iGAfB0RtgmLvYA8lCBl5XnVhIxoTeinHfMZeUZCW9KgkXDB
swowEoT3DBnqHkhq+mO5LpJIicqqU9rYltQAAjHRizHBEedOA1CUYyGh69RIPoQnu77zHTG1a4oR
l90AE3REDWXwgU9SM5sWoDIPokMUAAHIYiMWuFqlKRA+EFjjZF5B40i6O8ZbcifuRkNWTstEtttV
uKOpxM2AwT7hIb7wR2/iAD1HAqRMQ+iZJA0lwWvgoQNYiO0wIe9+a3DCQhrciJcimTBES+nmRUOA
faoa4TG0/UGJZRiZ7kdybgbTFoyNuoqW5HfweIMXpzUd6chuQ3DpcAuDxRiBU3bNCUdxgcw4Wq8Z
VEo2TAtxTxmdeYRcs5uarzBJmdlt7W68B+MmlsVvbcPqIwkbEEGjWUhB9IcF70UZR2hLdPwkhy3B
f7vci25I6jDHwCPmDpn8JZEajAsagnBMTuGQLTYtXgjo1GD9Oq4RkJkyNXkQn3RqbJR2vp+mekiL
MGIs6I+oiJEFq27lECO3KcQ0gTcIz2tsaXqLsDgjOIA08Me9AeWCTfL0Ld+nh9LGUpisiCWZQG39
KY7zEb0zqA1YaQpT3iTOVycVEmEtMrMCoxhty1BzGlVPc+q+mmRK5Ix7kBHYlqN6SoujbJJsLH0o
7e5sjS4Lln8QtcZw2wPiEpsByXT9TGTYRJd0+uTHIKGxt6vP1Ez3CC2nJBovM4oHy47tKh6ojcgN
uQtEjU6IJIBrS4Kn5hnJg8dEceLhaYxLE0DIy2oTmI/ERF/FP5UmlfpQjOEvLN4l419CcSlAG4qW
5OgZfUSESKgl107nmAYoKfU8XNE6bcbQMMzkjskNKJoRSiJJcm5WQzVL59n+5mHES0Oeajub1Jbs
pTkXbkmjB3+8JJo7k4RFgDRajuylKVgR6UxmQhBhN7ycu/Ar8veIGUq0WmTaBVyA7Iz+KdwWF/BA
yiCWayETEHUH0rVuRjENiAF0zgZgUZnTzMKi5Z15f00zHbEfzGACjomXIpG5PN1tR24CJ34vI5NS
ihsykdELR59j44KMBWUiypQwFs1oAp94oASAbBdOCiJFo5laYdRGOCMzAOb4KlAUwk8ezd4bEh/3
9vseQkTpHwxMk0t4wJ+6REFeYTMQvqoERAynIW6mfvXm7ko7T1Y7kSjt/Rkb1aEWfwqiPqNsEgNo
dvUpR/2sSZBtWov6YpowEYAvpo3qWmGzAgBqlGMvp4nZNDJvapCGyZfdZzJycqKMxsS2pElyTc8A
hKcHf4XCaPSY1ZwFTeOmzBnQ/MkXDUdm4sFL6eEtwxFJgA19Do7WvdMJCsNEj62RH1f0pYUjMhn5
h0BtbUY7pNbGnih+bCYZoPXubBQjtS8wSFSAzFGM4luIXTEhA704wJkfiIHrKaW9En+Xq9SHl78B
CdCT8Xhdeb5R3dokjXqifQENG3LcjeT9IXmHfhCcvuXIJQEaGJ6NygBbIB0AfqBT+ZvYtcd3b3DG
ghKTGXiid6IhLcIZqx9Chq+qGqNQRH3Ia/qiQMNNEdW/Jzbpr60NzVqIsCGf0qLkgEUAg6AeTGjm
IHYDubR3gLAYFRifp5RE3MY7khEMP5g6ltkQBgaaeoLXq68RpAHoR/8A5EhE3jEFlI7m9IAB3IPp
Z1Lyd8mf3WcP4rUJ6nFSXd8kCBKpqDEt42XlnAYj2qG3FpCgZmAC3CKAMW4dyiYikwTSp8FLalAT
cgicSGYZFeaxhtwNJ6S55IxkJao1Mi2qvMoz1MPug8U8YT0yHVJrP4IQ29qE9o//AHJiJbLFGO/t
x86cPgBE4xz6qLb2diPwREzpxetckdyXTkZTryzQEp6mtf2hAZlR8ewRyHr7HxPqWgEiIi55k0Q2
4fEayP4YqJbpFxxWo3KJN425HsIzBXMfbkf/AMkvUPtSOQA7Ofqv7E2K1Yyr3YIQ/HOEe7UEIadf
kMTAff3tzpgO65Tbh1zMtc5HGdn7lVZ9m5MaCZMQZYMBwR2zpGuIi8I6tBDnUA4QPnmJb/7YbCQJ
6lD6rb3hu7uwRpjo0u9KMUfNgQHMtBa5F75qw25QkGAxGJfNflEyDMNUAfSmkDF3ewHC61b8eoUJ
NUBubcb6dTOGJWuB6X0swo1fUpSNCYkgjmhKcW25DSJGYBaP8pPFEGcjvRmGnHqjKBqX/pWrd2TE
O+t+qvICi0iO4SB00HiaqM5/VDZrp0NGJDVdED6mRZ2kQW/7tEBufUxmGGkTBDekMpfTt0iRoKin
sKiw6QD0u1+aOmAB0bgB1Pj+91EdIptP3I7ktzbG2HJfcaXystwT3ZERPSJSNS6gZBjOIJMWHqQ3
WI3AXE9WKYfVQlCNSKP39ScX82Nc3FxdCUwNRLBEwGsk0XlyieTIFtEJ9VcPBCYmJiVABd0ASwGC
5WQAvdzghuauqJcEI7hc6jIseK2t+MdUtouIUc0stseTGO5Ih+ql3ayMt8CEXJAHwh8AV+XAgSqN
1nPoQ3NyAMIl5k3kUDtEiIIfTESDnONCUesj/wDQRGuRbHyT71q29yQkMfJZTG9M6vvA7TGqlGMT
KL5esoxhuREZViQ57lNwdI+IO1eC1hjOAdjKiG7pAiLw1E+tAONuBPU5f0IygfMlE3kMU/1O7GES
aRECSj5B8yP3ZF4qO5MxIlUAuSoz3vpRKM7bmq4GSEtr6UTkB8UyIkcrrq2tsgXxPgtflU/lECfA
yBUhsRj9OBXbeInKR4tQBeTu+U4tqEW8V17exEyDxlp1eiLob8ZQ1TOrQAYk8nACrAAkfCww5KMh
thiHBAdHUACcdIBVNt5Ec1GRg48VOe1ty2xINLTEgKUjEykcSCgdzbJ8suwix9ICrHdgTkYkV4J/
NlGtNUf/AJkYiWoEu7Mu9TBAbUa5JjRrp8BggD8QF80ZGkRcphQZdkduIcyLBQ2x8O2L+tQ2oEEb
dy9C66SypuelQjul4DG9k0JtCYEmayjDaJjGIrxKhCLsBWQepTGcnAYFyvKnIlxQ4omW9Kcj/wDT
HtUZQ29BiCNTu/dZPuyMjxRnEggXzHd2SiNqMxK5Lg+ITbn01cCJn/wqAENMYDTEO6rdcM1wwCO4
bQFOZR/FKiYCieyu44oYoFzE4rqkfFflvqRjIY+CcWW6BfyJf8+32DYlKUIGAk8ATIkv3KUusuXB
ldsyo7OqW7twzDB+5AHaOm7v7ygDGWomjMIgcaouZMBQgXOSEYAypZmUoyBiYxdddDgCbptWkZoz
+rnKW0LBzfgE0YyOwayJ1X5IGGyNyRqzH1koz2PoduO3gZsPS1UfytnbiLR0e1N+VXHSf/EunRpB
caR61KYkBuToWDI70N+ZLPOO0QW5lkDubk9xrCdRzZawJ6ncgDFadqEyL1DJ9/bG3AVJDAnvQ/8A
48CRmHKMd3bjB/u6L8mRntmUQQ8QIgR/7xRhIuRQtY+CfW0sIj3qO3CEzOQA1Fj6kDGUdQvGQp6V
p3dO1J+lvhlyOCEB0gh2cH1KhcrTIgUd5UDqO3CUSDIU78ExJpayerYOUDMUDuBJkYgnTgC00ASS
ABRyPQoA1yNaLe29sREdst8Lkow3pREcDpBWiLTOyCXakR/bihrMREfEWkSTxcqMzOtzSg7i6uST
kLoHZeERQiO2TXitE5bmoV/TLt4oyhsS34gNGG40YpztSlD722JEMe5CMIyhFnERVojmpAmQ0sKB
mJs6GuUhTqINu5S2oQJmKSnJnr3qMN/dMTK8IhyAp7cTOPV0yMy+nJhRGQaRiBHVI1AWozjDdify
wQCD3lQ8v6ncII6zFn1HKlQhIfVb0xEg6GpIZUUdze25x3ZFomTsMc1t72ucZRaHS1jWqhHai0og
Rk5erXRM8LMgcA6u4P3cUY7cW9JWiRYAVPcnE9VQGbNMtzcI69yWmETwoiZF5SLylmUeFewyHB/F
fB6f4JhFnN7ocz9t89yXs+yVLiW8AOyIwJ9wRL2DsnNABXgy29jagSfMpuSIEKOHfJypfUylq6RG
H8xqZbh4kksjVqlEXYqwCLDSLelSO3uEQ0RaGpnPJUnEC7aBLScSHdDRKMwR8flxjIH4WB5BS+n3
9uMpSvMk25RYOEBpEiaCIDU7kXiYzvQuP6WZO0jmcPUmiCJNi981AjVIj7tGdlEh9sxAEvvGRxNa
VUdzZ3xOc5tLbESDEMay9SMYgykzBvchD6ba1zMWlRzHkgDsTLYtmjHy9x4sCK9wXTsziwYNduab
yZOcETPbMGHxGiBG0WOLiy83ytJDmVmOaIJbSC7qcNsjclESrLM/y4rVKQMmgSzYX5KQO7ASZm8s
Ev8A1FTJkzkNbNbW3OZMgAJMKBRbecgv8EvQjLb3TI1YeXAOp7EydRmNwyamNGwQluSDiwILKP1H
/wDjd0zjfc2zYEB+km9VvRm8N3yjpcYjqt3IiUyeaiJ/dqDi6EtsdUbnN11eCOZTZp3qU4wxUTMa
TEg3ey0EuDKgIceIXSelqVcLdEi7FweScsTqodRgaZFF4zJN/wA4e5M+5DJt6hPcE7bpBt+cG8FE
bwmInSSZbjkj1qe79NIGEyR5AFAOalHZgZtcByzoiTxmDV3DFGW4PzbAhyDzUnAJwoEOmONJAOvz
NuBb4S1R8oClGO3HULS/ggIwjGOAAuurbDRzqK962tuchBno1CtQnIGxaoohGBjERHVIip7ygd2Y
3C1ASGC8z6fdMf5H9SB69yUaEXYeCeQkA3Tj3IbW1M7enqDlqoS3N2c5/ek/8Fq2vqTEEfvYry4b
0ZRNDrGoeBCMt6YO4zPtwEAPWjuHdG5CMtIYGJEhWqO3tR1mQs/ootUoO9gZWQhGEyZVIuHTkaMj
EWKG79Rvk7US3lvcjigdmTiX3XdkcKqdr3xUZWkRbNV6RcJ9xwICssSAgANMRaKpdVojvbvxANAc
cSpbm1XUGHeiTqdN1A8ionZiTGwJookS0SjQAxBDoTkfN3ANMWaLNyTaH4UUIbhiJAVVZRdHe2dy
MCHBlkpa9Ey56gTq8VRUCJjEkC6eUSPsMA5yVe9kyJ2SI6rhgVq3SGwADN9kQBAcsCVq3JGZGG2P
am2diO3/ADbh1HwUpHch5pDjAFsKKe1vDTKNCFu6f/sS/wCfb7Iz3NRGgUgA9zmhL6ckCQrGfxeh
SJfSPvCNPFNMyPK617msQsKh1L/b+bJ6VIYFDcjNsjip7m4dWAUJbmyJeXTRhL0KQ24eWCSdIwQO
g6o1JNl5VZPQtZCUIasagn0IhxDTYacE27uAQAqZDBGG1tvufco7jMoSOmD2eEVKcxHb33eUiWBH
JR2tne0iOEGb0p9RMXrKTexflzlEZAlimG6Y/vxQGuUmswCFZl+fsC6oy1YP/FfBNuJLIiREgC0t
JqD3oS23ni+SiA5kMScuSb63ZMpRrGcaOf5lGRjt6ID8uJBLKIAEWFZNV0eoEmh5LV5gP8rLb3NX
SJAlMJQcc18e3xogI7kHY2jRONwGocfDfkh5ZM9yQDCjISEA4vXFSM9sR1E1F3K+BxT4hqstyP08
fIO4dUpCLEoSh+YZ9UiBQHvQ1NAsS2k+xQltbrbsS0zhIo/TbUPNHxE1xwQn5JiCwo9gUBHb0mVt
bqMIxjOYAJJsSUTs7eyDGkpH+KMoaDXTICMSSR3IvGcqNKOo/FnRbu4N6GyzavM1TkW/CFub31kJ
70zYgiDgUsFPc+mPQC8YSLlua83c3dvbBPwSk1s03+52iR+FyfUofVS3In6eEx1VBJGQUdOuMZgm
O479JyChIbst156pCzKHkPqgXYmhDVUtokA0Ot+m2alDcu/NAAsKpxHUqwbiHCJJ0hroNMSYijMg
F5kZEzFCSosT1cSv7Qg8H4sFIi2Hiv1AVUghxkoxF3L/AGm4KHGUj6W+ySbYqWijk35BOZOeCgeP
tipBxb1IQ2iYncPUQWDCpBPFfSy3466HbAH4t2T6uTBNEMCTThgicyUTg/YVtvIQB23BZ3MSaKG1
uQno3i0SwiJAnPJS2dzbb6b6aZO5Im9fhC3JCYkY/AIyHUJVxyR3tttokxhLVp+9iMWCjsx+pbUR
1xYCvc6O7sS8/bAk0ZkiMiOkxX+6lDajokYS2ov0g0eQKlD6PejOM5vtQNS0jpOGCMd6Mdeq49VE
YbkXBNWAQ3D0bsX0sKtg5R3/AKecoS+9KNDX4mUoR35/DQAvI6eD4JxuzckEuTU4I7pluShEHUBI
uA9XCf8AMMnu5dDa3BMzlIsDJgBi6O3uk7kadMuqmDcEXA2pESl1UuMOKk4pN2I4KUrEqiYB3wUZ
ygZEholriI4KcjDS9tNADyUTGMjrfTxZajGRq3fyWqMZF3xrRGOmQMeFGQO1IwArQlebuyMtwOAa
WOZZCexACQ+6LkkuVCEjWGsS4HBFtLBg6IAiRgdIqnGgSZ9JBdEHaAIyUYGBr3o7Yo13V0dJAIQh
OTys+KMKSJwRiQzlyCNUU+nb1NfRJUiLsNIIHpRlpg4FNQJNeSj5sYeYIisoknBP0kcFq+nIE5D7
ufFCc9I3iTqBHpQfS1qLVtSB0h60LBEuHsxAdRl5jCfwkAOpylPUxaVLFB50HBRiNxnNOKkNwiUY
kglqgoiJ1QJduB5LpkICXxRLue9R3PMDsCYAVPJCMtsbYgGcA+lbkoAGTV03HijHSJHH3pzGMi1g
bKMYxeMi4Zyow3KG8QAAO8p9wMxBovI24SMjRywCI8gbj1MZW50Rf6aMD/KSF07YrVpElNLYhKRx
iWRG7t6YhjHBT2ph2mWux7wyJ+nIEcQjIVi9Cp2v3oHp5mpWpweS9fZTxWkXNltnbNNJ1c2cry4D
VGUnYFrLUROP938VXelyBJVNyZORk3tTDckBzddUyeZV18SfVVaDPo/Dbs9yuVr2pyiQpbW9t6d0
/fgKSbMKl1WivRUuq9lUxo6p20XFaRuyERg6/Mm753RjtRDxF5cVLe+oA8yIDSGWS3mf/wBPL/n2
+waATHy417yv/wCSDqiXrYryTubMIAsIMGZA9B3pB2Bw4Bbm2N2O3tbTaQC97qJG50SLPZEeYXAc
KL9Uph+FVE6Nc5VGYQJ2XrV8UNf0wEWY1opb30sBGBLt7EDHongwQmxlOVHFT6E0nHA0QnFxlJ0Z
bjS4GVfBGU2gMHKIFXo607EiIxqcgMyob+7I7kzWo6R3KH0w2dkCfwzbDOqd9uUQbRiGPuR3JAQm
1IG7oyd5SlSqH0+5vRhI0lKB1SbKikNZM5GgYoT+tBhtk9IkPi8UD9FLZhuE9VXk3BThM/lyDB8S
tU4y3NFY7cceZUtuMNrajGmmI6h3oahpfE0cKoEAbaJOEwPS96IwYEyIY4qcfrYmJjQCOa1wnHZ2
hgJOTzR8v6k+ZHiJP3JyZFrl8kTIFxgC9FtbOxtuRWcjmvJ3dmMjKolkyMdiG3OcXozsAhvGIDxf
pAAQ3CY+YQBcA14LbO1PbPUNTXGZW2dvcholSRJDd5RPmw1yucGyCM4/UCcgKQFWZT+qP0QlARbq
q3FHciTASrKIoB3BGEDKQDExi7DipEbZIfqLV4J5vtwm8SumQRiN1om4ButLk6iBpzKH1RkNmVCI
EsTyChDdmZaKB7f6oSnMk0A1ZYKMYsRMg0DsLdS1DdExIlgMKKJYRnKUhISfpZ6nmojzPMkYgyNm
JwQ71EgXHYfLDhnOQCjtyo5oVPb+nOpo3ZGMruKrbEcTVRjmAqKT2oqEhCQLsa8k+QJ+3scQT4yP
2SiMAX8R2BuPq/gh9Nq1b0o6tIwHFfSfRxcRkTLelgNuLOSe5bkIl5+aRGV2gD0HgBFTEZ6wAG5l
ckMrp+zZnAtPSQO4v7VA7h84aWMZ1ELWW9tbcjubm9EkiQI0s4p4qe5OT/UV0EjUIAH1lDdnFjsk
aSA0SQXwQ+qDnfcGJAYAMpgbk465GekUGrP0Ind3JSm4IL071Keo6n1RkPxKEdyLkdUpikzLmh0a
ut6n980ACIg3WzGUmhGQYA3JlqJKG9syOjfjOr1icQO4qJhP8/d3JBm1EQ2wG8ZSVd8hviDAhuKG
7tbsZxkQ+3GLAAXrkvNP1UNvcjqPklzI1oFAEEABpyNXUZ7QBoDoJBPNEmAew4FFoEzNyhuCMYzg
A+2fvYOBijt/SA7upjAGLEHEIbk92J3o2jCoiZGyEtskx0jWD+LEobu6NPUNJejFGW3IGWEQUYTA
AESXRjt7w2ZagdRyYoE/WAB3BfDK6r9Zi9x4KBH1bsXZ78Fu9J+OWH8xTCEnxLKEsDXTYqQNCyIq
QLZoGblqUCG7B4k/FIWdeaJ6yLwAr3J2YxNQaVRI6TktciwFWNzyQnEicgQwBYsgDPdBkHFRTmhO
MzqNyc15e4W1HTzRlt6vP2gQdsXMXw5LTpOrVfF8isiLnJPIEDDiqueYTxemSfy2kPvImbu9AEDK
ZgTiVsiLblSNyUXMvAobfkHQT8UhUDkFI7I1SP8AKwKbd2pbbEcQc1OcNsy2HeOnIqMNzantsBpB
DFkGiRHEJoPCRLkkImJqQzsgZzESTjZASj+WaCQqBxQMS4/Fgju7fUBSUjSNckGbXEO7IkAGZsWR
ImCTV7XT0LZFCX1APlwLzEak5BMfppgx6RQeC1RkdowBPlEAAclJwJSlY2MSPeo7UdoDa2xSoU94
y0yuBdTltNp2vidnWmcgM3REmpliiB2CX4autvb3ZDW5jIHFwpbUC4h6zVdQfinZnzTtRWVuaoFZ
Git2UTKybSeac3KoCUXcEYMqoHNV7HbvVVTwVUGxRYAjNGRiQBdUNkdz4nueC0mJAxW83/8AXl/z
7fZX8I9ZWibacJYhTntloxk7xcsvMjvTEmrKfxdzKYhET1YyumEIjizppSEYm/JQbcJMIigGKjLb
caQ3UjKUy5yQfckRlRaapmJAsXQiAxfDFVg74yR3Nl6/dwX50NErD93RjUgGmSAOaA2duO1AAAiI
bUeKH0sYHUAwlEl1ufXfUzlIig2nBkeDSW9v/STj9OAf05dTAobP1e7GcY1DRZx3KPkmT/fMmvwC
factjevctvckDpL+YwMdIHFCJ2p723AtrFQOCM9v6aW3sUADW4o/kzL/AHpHpHGi1kx2t4homAcE
cXR2pQhHaMiJSEYh2uc0BOE5QiNMQSGp3KO1Hbjph8AYUQ8wGAFaBbP1IlHb2ZVLz0kjNSnH6mGh
3eU3kyf/AHe2Wo7k18EZ7n1zOaCOpkIncjqs/WZEDvWmO/Ijc+LTHDi6lH8yTmhAiCvO2tgzkAQN
RzzYKc5htb2wdCGrTCIZjitRBEzXqpTvRkYFs6IyEtvbAo8piK3QdyPmRLM+oHiCpRjOMdA1F6Eo
whuicpUEAT1BnopGBhEAsQXBR+nMoxECxlFg+nihMgjXUSOK8z6nQfpou0p34sjPZMNwgjoBJRIg
wD48FsylAVIMSSpgzEox6YNkFIyLVoea0guAbhTluDqBEWz5qWodIFALqf0+1Mwjt0Baq1EuTUlP
kVESuPYqnTHLEqUhEkGnFlLe0gSFYPQVUtMhtkREjIcUGNAGc4rqZ8mWpqCh7AQHA+PvTxPdipHh
61OXBvtFfT/0+37JUhmB6FVHPAcl9eQNemA0+IYKezImEpggmFL/AHe9fU6xplCTRibuLoEf6nMo
DksskaMff2bE9yegAyi7PcA+xRj5pkN0OGGHitvZO75U9iIYkfHp6q14KX1MdwE7gIYReLSD58Fu
bYnLTPTOQ01fggJbxAoBQPWyI3PqKhnAY35Iad8u7MS3rCnLccRg9JkNPkFCH030gjuyI1SmHjHg
M0Y7mwBvCjgAxf8A0XnzgNzUQemTaYtcwCA+k2zpifiD1P4qoy39uREQS54iq6vppmIyBfqR1QlC
ziQIQ0gkCj4I54pmccFHeAkxkYlhYjkunzJP8LB3PJQ87bO3IgPqDFrISj3gj2oDaH5rdZxUdZAe
piZYm1kSJ7Y7yV/t5mMw4IazEqUIwAiBQtcpyGGg96O59THVAEMOKI8hwDp8UBDYkIgmPfdbcfLm
Glpi5txU50hHzCBqo4JuFqG5AE6ga5e9NKcCOnHO/gjOMyZ6mp8JGaNR/FMbEoxErjpCLhuAQk+l
qlsXU9z6fc1MHEWqSpbsi23AVBuWQGnHvRl8JNyUA7gV8FHhIBRlnfko/V7fTIuJAWlJqFEsIyJ+
GS63i3wtZDVuGc8Y4BSlS1kIxEdTnUdIqCokyAlkgJbgIQ6njGg0gBThs752zGBOgDJEne1SNbkJ
hud+outkyJ0wkRUu5UN2W2J7hEY6jhEI7I0giWoHFslDcAhoAYxe5W4GiTL4a/CtsRgPMieuTiqj
Hy32xeI9i2Cenbk4MLO2YRlEgVDjJD6icTHbJpgZLTsvDScHcjiidrcIYXlV1I/UblMIL8vbjuEm
oIVPpjGTvc3R3I7QBNzqJUob/wBOHNTMy9CltbH0grQSP8FM7e0YxkGIAdTjESGuknUY/UabO4oX
RjEgk1BNwmkGIsR7U7I8UJWYOpSnWUqp4muSqGbNZ8lSICag7kXJQAV+yy4K1c1ZML3WmOFVW62x
iav2VTpkSAd2MakhMaJo1ODXQ1U05rrxyVBqbNACLNQMqjrNmVZNwKfDit2YDCWxJj/ft9gYAjQL
lsSqmA5lb0JQM5yJMRBiPElfnRAh+Ehz6V0gwP8AL7im1answZDSOZU4EgdJI1OK9yOsilKEH1IX
LqO3LdAMqAl2fiuv6zZi2AkD7UTD6uE5NaIK6JxMRXUTX1Ld3Pqd4dIGmOpj4FlubcJtAyIiScEY
SkJwgemcEZRjJhcu4TbhZ7UBrydMKtiowhAbUYhiIu0jmXUpT2PO3BXbJkQInkFPb3Gjuy6tzcmR
EHIQZCQhHf3DEfETpA/tIR1fSbBBwIPrd0I/S/Tw24vaJlp7w68vc+njLcNGBLeCJnsR2pCohCxI
xLMpPtkXZiWqjHa+ngIENUkyPghGH0g22AHmdRDd6Mt3YGncBAIGk14xR1fS6nA6Xk3NGe39BAjB
waclEy2BtaQ3Sbji5UNsSjo2/gEiJN3FaNe2dNDHbi0u/QE/1nmwkTQ6C3cZMn2tUotXUwK1Aypg
1gmiSJZp9R8USNRYPIjJEQc0c9IIRJ3BEAXl6g6E5bznDWfW6jCU4GEheJEy3OJUDuafqbhjq225
spGMWjg1R4qRY1jYqG9ItCLnTGuDLTubRkBU1AQ+sG1HSPh25VD50ZHa3SPLP3YxA9blCDy0/hen
gtW3qjLMSL+hDy9QkC85A3GDhadZK1H4buVQ1xUZF2lWJW5/V7FOWGllvMHeTsaAhr8kIxDAYIjg
qXBevFdPVLGWAQgB13Mwauo/Tw231sYlsTmp7MNREaSJs+QTkkFyKHJFlKIZpXWo/DGp9y/Duegp
4nRuDC3gmkXc+pP+Iv8AaK2B/JG3L7Qww8U9zmVvb8anbiZcyLKX1LjzvqJylvSNzVm7kxJloc6R
ckWCiDtgSEzOX9chU+Ka+Q4rUa+/guGIQA5plEyd47sWapqCFCEdojc223I6o1MfiBIyUpzkIb5r
KVgUNqEyWDsJaWfChTzgZgYmZwHNTjH6EapPAzfURTBwi8WOUiB7VU7cRwJJV4taQYl/UtJJEcgA
zqst2RZww0g8qLXt7G4Y4vKrG1lr2z5QIAEBHHi5d1OJ3TMRYMS6A3iSDhrMY05Mi25u/TwqaykY
9N2+IqMvqPrJyAZojbFXrkpR2/pZgm0hLQCGxHVVeftbQluOYiO40geZp6lujY247Y3SZFyJdX9o
FFP6nTt69mJkfLGkycvcgonclohhqnI0yshLUOMQBT0KMPqNmWyfvbkOp+5nQj/uJRZh1RMfSy17
O75kfxRk49CMvKhub1hKZLtzCiNn6SMo/eL+qqfb+lA2gHkXD/8AMp+Xta92JA0Frc3XTsRwu1sf
vKm1AVa4+HO6jP6na2tMZfFTVpwY5oRlESAkWGkyp3oja2oiQBqY0d6GyYwh8Qawo1VNxp6JuRng
RyRmZHU9CGbwQkK1YnALlhwQrUhBnp4KgAJ4X8EYzAaTOLWW5uB4gMYl8zZkALBCQfgyfiDZbY6t
Th2F6qG3KBjH4i4uWZEj4jVsFKBJYB2Bv3IwN20gOKhOXy7gtcTq1XCjOhf0KAkAxIBQAMQCWAiT
7CpT3JCMvLlpJmzluarOAAGJqmJh3H3qLbkXG4cQcFDZiSQDUw4c2RkIykbCRYeKjE7bgH4QWKnE
xEHD6ZFz6Fp3YaZRoMmRALxd2sXW1Kw8bogMwzsEdxzuEfzE6uYsF/ufp5HZnacASoz8wGUrxDjS
2eChuVEdwPA0IIR+8Bdw61TA0u1AybQfFOLdjRcPcgsqmmauiXZAyj5kcXoy0AUOOS6K8rLQRU39
3Zki90RKhwTelPj9probcWc52QiNyO5KriFW5qUjc4lAgmD45oE1OJUjG0KAdmkxHNAYBNGPUjHz
/IgfiMRU8FTcG7I3MqIn6fb17xDasB3oz3S5NgKAd3Z1RJAyon2zIE3f3qJJMiBU8e5bUNiR1n4x
aq87cfdkA4BrEAL6iW7SR+nkWZm69rs17sBKTNU4J/Kiy6NuPgFGfmeWAG0xC6t2cvBbkuokRLEl
GMIvXMJ5RbvRnuUBDAIdR6sCtLVRjZ8qImdQK3RHlvV9VWTkCJGIUQA2IkDjxRMviKIaycp8Qq4Z
UQjIsMcCobcvpvO3I/FKR6fBahsRoNMduJoG5JjsxgTk+pDTEgGrOjLSxOLoRkAYiwZMIhsmVA3I
L7x4VXwlVHiqgd5CrKI7wq7kR3rdlDaju6wzvpWjcMdmMaxEXNV+bNxi1T6VufTCJnHcFZEsX7lO
e2TMxqwwjiSmRbFBDb2Y69wmkRiow2/oNvZYAE6QZE85LWIxjPmPUjI70RMXaVvBT2hvxkYfEa37
15R3KjFiyO1tEziPAoyEwCbRetsk9BkEAZityqzoPWiYyctkn3qEjUwyIRkBIA4A0QHlmTWeSpsx
9JQBiI4BgzIfTb0wN3dlIh7tFDb+nl5ldMiHIClKIMN0RMozLdQGYQO4SZn4ibv2EYewoDE2iPWU
JNUcaOpREi75sCqiuOaAkWLl6ZrNNEfwQg1seK0blvuyWiQc/dKEI2sEwwoPtFfT/wDlx9X2udO9
AnHDip7MCBrGkk4A39ChsQ6hEmTm7mpK1B/6hfvCYSNchVPKg9JTZW7CRb2dm8ReJgfCShHeOpmE
SbgWuq/EasOKlubglGUSBpa4lVQbblPbkRqwID1Kidl/9sJvME0YFr8lLfiXjKQiIxk5r4lefIPE
yMIgkuZxw9K0xhHUD1Amoce9R0xgNQkB/WA607gG5tAAGJFjiQVMfTy1W6PvBk2OK6oA9yH5cTlQ
YhkTuQYxeUQCQMMENW0JNZ+C+n3NnbjtkkgmIZ1qOEpW7kJyOgSoNVLrdhuFoSgXRlKYDFsSuk6s
LI6SwBsi2d+SE9meiQL6gVtT+oMdZi8p2jzR257we4IBI9AXT9dImUzKIBxI+E8FvbezvygXB8yb
ve1UdX1oFR/pdND6p6uSC7cLraMvqdTbgOkm/BSBmx812/mayYyPwSFvuvUos56o+LUW6A5IhuAP
bihEUGSHU4QOKcFiX4BDUaDxRIcvkgwq9QU8CQWLDPvVmKhCVYycHPuQkJU1C/BQBMXlQEYKJkNU
tcRqFGcFPIsRgpao0P3nZGRkbuGZk2mprReYDQ4KsAeOSEgaAuFrkWL3ZUOoipIR0kTHqRhuAxkt
QIIsqv3IMXY2KP5cYzP3o0Y8kSCTI4uQU0hq5l0Do0sKsboRj/aAvLmBACmbozMyAcGJHijBomBx
bAqEtI1Sd/3BWkHp/CQD60J7ZJkR1iWaspNtw1RDtiU3lgcXKE8JVoXWra1GjnFAaCQMWTHbiDmW
CmIx0aa4kHwTeXKl6J/JMo58Fq0ExOGSm1nVbriFcOFe/rQsWuFXFN4og9jm3Y6l5fxSGnk666Eq
yiANUh4BaRLSDfTkqVdMexzYIkdwWQzTb8YnV8M5fdK8zaAkC7xw5hDSdIN0N4TjubRuRgeKYUCY
nkhWmCP1MtJBoIktJs4rT1VzW9LP6eX/AD7fZKcwDpDtimkBtNjIhl1b0H5r9aPcj1mnBbm1skDb
IZyK1QnGTaS+oXQGuUiK4VREnMsHwK1GcnzRkJSk1HdGcA5OdVKEYa6EljZHYIEYxNYla9lxO7Cx
C6q8CnPoVcOxyOSYd6u5NkZSDiz8UWg8jihIw8yRpGJpVDzxCMnrVwiZb22DcGpYIiO6JDhH3rVE
ExFA4FVRw2ITuaFxUomWNfBRiZUOCuFdVmmclCIBJJZYjipS3JDULGyO5qpGh08UN+O0RtGTGZdl
LajERBtPktM9wAjgo/Uw6t2BcZL8kxiD/KDVa57ggSGsLIvvajLICPqRlIuTiVq1MVrlLqzFEJTI
dwPG6GkuAbL8twQhtwifMqbtYOVpmXe3BDe2dwDc24db5AKMJB4mMzlUBwhuCkZExrmE8pADNGN5
CoC13YuhCPVM0GAC/wBx9dvEuWePwxI9amY/CSW5dgkMKFarkInHBPIUiHPNAzHVniiASGu4ddRf
hZQMKRdiE4KaQ5KTl2cR96O4cKDn9uS2B/8Ajj6vtEYqtpW4S7D4BFHl2OOD8k0byVcez6nDpBfl
IIa/qYbe4CLghgHK3Nj6vdM9Jg29th7B2pzqj5UtoT+4KelCO3uiJO1p3BG51HBQG2CxmNQZ+kmv
oRaA1ESoGdyxCJG2NqMNwbgYhw9AKIScmRBJOLkoRjKTAkgPSykJFxIChq+k0B8Ste3IwkKgjBDb
nXcb47WU47h6m6Pa6hMVjJ+YahUBuUjvRJ23sa5/2rVGy+nOOs+pa9yYiNZfE2FgiIS07IlqjE3p
ip7O4ZTnpeAHBTJhUu7lAACMXFcnQ0zAEQzijqQ+oMt2BFACRVGUYEA4aio7JMmFNuJJI5IyntAj
EtUcUJQ0l6jiiWpI9e3Y05J5bM5EtibiyMY7PlgF5FzUrY3obZIMhqL2L3UxoqN1n45p/LFYz9du
9HSIgGUb0/dloIEdQm3LgeKDViMOxyHGSGIFhkiJhpEU4MtMDq/eyyOaBMyGRnty1YkFbOHV7EHG
Kj9MW8nSJCLC6BH/ANyPqK4p02C6gHFnVe5fyphR1ADAXCMrOC4Tsx4LWGNGY2TTiYxzjbwTxJkO
S1yHIKzHNaZFjnRPqPoVJrU+o4PggSKguugUuY4KsAB+GqMp7OqT0rRPLaMJ/eMEwMhmnDsbJxPS
eGS1y+oeR+6BV1GG9MmQFxRyidqZj/SXpxXXuAiVw3uRO1KIICPlmMjIBiQSEZmQY/FpDKJ3ZHy5
WESUfJMtIoxOKfMdjmqJGKfDNc8V1VOQVIAckDF3VyuKoE7WWq+BWrO6I7HZwboy+EhCMgw/Enem
aMIB3DOtze3Y69IoOKLBh6kNyMhKMgD4rSSdGMbhGZumlI6fwvTwTIHcpDHNARiA1lUvktRut3/2
8v8An2+wQgf/AKcaPxOC0To2JUS/QhqlGRkH6S/j27gAJk1GXmVMi4McFxTYYo7UOmJL1zRYuOCn
LZ3pbYnSeTKBJLxvL8XNGEZNOQoCj5peSqne6YOFVVDclQNxTE0yTGpyXmiIGodNU86HJ0zvI0EX
XdZbBAZ9sErWfgNpYIbsgRtyqJEUK1kUIdsWWx9TuRaM7Bw/eqQL8SiNAFLEra2ZQEfMLHckaeCG
xGPmQIcysVCO0QDE1AUxuw/NmencNwF9P5OwIQas42kUPpt7p2j1GMaOY4IbE/LhABtDhgFIQ0x2
IAkR6XkM3RIVqIvFyLFF7o6qnMLSYPKrEptMYszl040Gtg5RnYkriuKiJAOIzobfCVKUQND42C0O
RCQaQFHGSfZmYyylUeIUobtCN4ygQcDFM9MlB5PUCqAlTMhagHj93lmgBKMBGUiTKoRF2N+wg2K/
eyH4RXmgMzVALc4kIHgu8IwNxZOTZc7BCIsPtyWx/wCXD/lH23wPoKMZfEPSM1++PZ3IJzaxTy7+
36uP/wCKR8KqQiWEr+5NUEP4lSG8ZOWETG4rVTn9QG0aYbZhSRLdR4gFQ247gIkDI7YI1BhXUpyr
1DSYmjUuiJReJAFbdNkx2TIAFxCprZHZhAx3Kny7etS29yJ1xuLoaIFbe99RvyhvmR17UQ5F1OMY
TMmLTsdRdu5TO9HzxKLQBLCJzUBLdmRAaYh2DIQjudAAAibMFtmIJMXcO47k0nBGB7BEULuSR93E
KR2onUJvq/lUTI9cqyGFe2tZGwzQaMnvyITHb17hxqzI6IyAPw4N4rXuMZWD+1bcYmJ3HIkBeTlb
m3v7LT0SlGlQRUUWqO4YYtghHbGshzIkLa3Q+ie2JSbNRILwxoVHaMjKAEgBpL1sqhOQtO4JeYX6
gRpGVEP9rKW7uhng1GxwQlBzAMzYZhHdDmBFzcc0ThkreCLx6nrVbe5AdUJBvcrG68+W1+ZEAGZu
icBKBdUKY/6IAd60yscUzsRitMfiOKaRc4oiUixDBCT0INcOxwhrC+EMeCGqMX73XwOe9S3NDwAZ
jmhH6fajFjUmqi+zF/vFlGUYwOrBqhUhDSL0wUf9tBvx66+CEpgCRtpDU7CRJiQHB5J9wEjDSmjA
iPqTwgZEXTfDwVShKO4A4fELUZRJNi6aVJIGMmayJmBJw38VGJgALEDFGO3AyGGa0kMQKg9jj3Ji
QQiBXmgLA4DtfBMmeuPBMtMbDFcFQ1yTFaQuJVSUy0bgamK6IiI9C0E/H1FexDaNxSKJkWGJK0xp
CNuKbwWZXHsEYuTwWmNzRb0JRMSPp5O/9e32edOMp/U6AIxbpZypS3IxiTkL+CHlQichrg/hqdDa
M9qMz907gf0OjtiUNzcF4QkSR/3UJfV7sdqBDhgZtwNlI/7ncnEfF5e0bcyiPpo7hGGrSUZzE9p/
hBjU+hBzMjM0HqQsWvRkQ77QoBkgY/BFnAeqBmNMYU5nJGcYCQFXkbL/AHInGWyT1REhr7o3X5ER
quQSXHitJqRQhX5LqwTuDkyrhgqhBmDZqJ3IvGFgj0gA2zQnKWkvWWXFfkSG5EDq3S4Ej3rZIlGM
BARJLryd/wCogYwDQ2oh5UX+3FYhhEO7NSiJnJzK4NghGY1kWdDpDjvRuAbgIDbcCPUDiOK6pncl
/LVflfT8iQvzCYjJ2Q29iEtzbFpSpCPeUAd6Mt4hyzxiOAKf6zZlKIvKJYHvDoDZE40YjcOruWmO
2z4gLa+n2vpDGW3fcasvQpebuQ+nEb+YdJ8Fo3vrY8JRiWf+oqYhMShEtGQBaSptFx964ZS1hoYk
3dOBT7ssCht6WJBdFMtnW2ROYZT3IhozB0gAXW2Nrdid/dJfSxAwuvzJHckbyw7lDzQT14HTgvy4
MOMjL3J4xAK+KMfLiJS1UerME4DgWIt4obcSa/dBo5P2P5hZGMgoC4ep7JDifQhwR5IbgOkNU8UB
3AJz8R9H+AVsj/8AHH1D7bLlYrrp/MKjvyVCDyQ4IgdRuw9pTm+WATdv1EfxbUx/3SjJgTgETK5W
SeLuPQpESPWGkXqQbhbm7EiUogFjQCMWBYeC3Nvfj5czMGHS+DEFnU57RA3piRkSKMGdvBHfB/MP
UZxyONE8iZGX3sUQ1RcLzPuAs/HJHSHllioyiTMn4oMRpKAhsS/mBHqKcbUQMiw7wiZbgAP3Q5bw
XXqmcxFvWvNEzEyLRhKr8A3tR4RKOL2xUokCEYsROVAQtP1P1cIFnYAy9Scb096X4QNPe5C0jbEY
vg8i3MrDwTQGkUamAwRnuSlPEAC3IKe7AnTAtIG6EJsYAjUXIICMtndl5T0IJ9qJlISN3kbnFSH0
+jTKk4AsT3ra25TOrbgIkOG8Sm+niI7YDvGbOfBfDLv3B/4UNMSA9dUgadwClpmYl6A5HgiANUYl
nFU8JGJ4URjIjmzFCMyJxGErIygfLEj8LdA5XRMTtmLM8Zve2CeUoxP3ep3dDb3ImMnq9EO5bP04
LbU4gyDYrc4aT2SnCBIh8TYI53tZZOhEG9HVcEYjvQo5KMSSAaACqGr4Mf8ARPGXuWlxIjAX8EwL
cF8THEL46dycAyhwN/BMwCMowIiMSVnyR0vHUGPJSE4CUjaViPBGRLAUZSjo8yMS2pyF+ZtmLgC7
oaZAHEGTetPpfvd+9dFHun0CY5VTh4thxQuC7OCojaOqbkAAV9Cj5u0RDEZgeCgN+B29davGmCbb
MjS4IKjuzlHyyTpL4jNEw0kEUMZA3yrdNuxIfEqiqq+KMs1XBUUduNyhtbVx8Uuwk4po0GOfZSoT
DGy6z3YpohuJqUSTUoA0GaAAWs8h/BRi7AnHJDanHTG0ZDLiEKLzZloxqSz+C0QcRBfn2NEHXk3v
TTjofMj2JzIDkHTOTMYFaDBmtJtMl5hiCJVAiXMRgt6cnO4diTk3+OHYIDZjPd0Amcog0crVGEQB
hoA9SaENuJxIhVUnEcoheadwjcP3qBDbl9RKtySBRHbEJbzUMzIV9C8/a2zs7hNhIEHuW3+e29Co
Mi5UJynLUKSiW09zMjvjdMIG8bv3uFARLRGAN+KM5Q1AHoEo6onnVDzgxleIfSOQwR2tqgwAcLRK
Wog44JmEY5knV33WuwdhptRRlAX4p2qc0NwSv3shIYYG6rZOBRPLwVU5NcAEBGLRxJDeleXt7sgB
aOot4KppihKDahjdfdPMIR2iDPBgETuDScyEIbZlKcrCIDnwQl9QNzb121Eh1SZbI1R17u39MAOm
U5EGXJkNz636qP1O6Pu10D2lCEN2MYi0Ygt6Fr806Wb4Sm1kg/ylCEIbkd2RYHbi1TmpfR/TyGmB
rLSCS+bupeb9UREkERDsBkwIQ24EAiJOo0Qj1zlItGOMio7k9o7UdsMXsUIGcNo/zv6wo7O2BMzI
GofDVEfUaZR4dQ9SB2dqJOZFkY7m2CT+EAI6dp4i4K8uBG07uIR6jw1Mn35kxlaAFY/20QoYgmpo
wGd1Lb+kJJYNIEXerIR34ebt6tIdhXjRDc2oiMofEAAQQnkaPZgPUo6YgEsAAGuh9BvRjCO10kCI
1UzKgIgkOKn7L2lmmZimkGOYR6hj6U0i+TJgL5phVOay9X+CVtf0R9Q/wGNl0+BXw94p6k5BPiUH
pwTD7E45xI8QqgH10RqzZ1VqcKojPNdRfIKlMMkY/UDzAbmjnmh9TDbPlzjKMQZColY5hSlu7rSG
uMYUIaQZ3WqMzGQqGwU57zGcZR0Szi3U/epkjTI9RrpBAvRMNkFh8WJ7iVrG3LSDhpHsQgdgycO5
n/BGW39NAEFg5J9TIaduAc1obeKf6jSNs00gV7ls+XvQn1EmL1FMVKREG0G7Mo7UJx1GjRs/NHZ2
m3PqWbhHjL3LzZkznOUjInEpinCBi1Q6sCMHTMpxiQIzrOgqyG4Cxm7+KEAXIob+5GnF1Xscqqiw
uQzc0drf2jGYLVCJ2Nxg76ZB4qP+42wwNTCr0aylPbm0oh9Bx8aqvgrclkVGOsx24l2iA/pXm7QG
6dtjCM4iEzHnDFAn6eY2zV8fBQ3ztzG9BgJmMnA4BT+l24TJlENIhg/rRhKRg2D1BU4xkGAaX8yl
DQwMWcZ4OtJDmqFaOiOKMhVYqiD1o5QDCDnUTGx5qR29xhcgl8UHIl+/FRjMF1qAlTFyhB2kHq+D
ouBLInPmjGNInCVQVOMQC+XNSYESegIo3JAkucQAyECL1k7gF+SOmRhiR8SE9sx3BEMweK6oS1P3
MhHamYk5EhFyJiNyR7QyEp7XqkPAqMvLG2Y2EWiHQ03arZomAMYguKMfELqnKcjRpVHqWremZkBo
g2AyC9q0mTwFRE1r3o7bAwJBZmstM4SjLgxj708TqjgbKyqsua4qjo/UHq3JHTAckZG5qjKFBGpJ
RIna61QLxPr7BKZbldNHpjwuq1WKf04oyOC1RqR4IapUFAAGTkl1dRkNwyMbaq2RO4TW7UHZGc5i
YN4wNQh5RmJx+GRL8nBKBmSJj70STHwKad87IEXCcvKYvB2ccF0/l6S2i8j7FvNFm2JXv8e32CIr
+XGhsKyQhCpldDZmzt8Zu6O3qEsiEwA0nEonV1YcURM6pWZEkyE8BggYyMgLgoTcdNUJ6dYakcUb
9JpRDbmDGUsSmh1xAGDoaGgDeTKW5uREwA5JLOUTEPtijGoZSpc1rQdy1GIMAWYlAbcS0sRZCAGq
emh4pt0jmBQeCBcy1fesFp23KIII5phVDraR+JlQ1NAv5s0E4iTxTC6G/tnTIGnFR2RDzIQ6pf1Y
obm1HRKJpKJqFGO91iFI66kKkIvy+zIjAOSvMEYy3N2pMg7DBlLf3B1yLliQEem5crVpGrNRYA7k
S4pkhHdmdI+6KBNER5lyn88RazDJHb3T5k5MdaeMBMWMQA6fQWRYO6hvFiIEEg2U90S6ZF6BlWZ8
UwJJKjtbhYA6hje6kdjVokbkYIndj54IpGzFEbf0cRNumTuQyO8Ys9yMVtgBjYvWuf2mIddJbgUz
Bs1VvFdRfgEwDD/CK2xlGPq/wHJTg/4O5GJ+GUojxZScOKWzRxLlgmmNPBPcpzRR24/eNTwQ8qGu
MPu2LDJQm/xwjIjmAVrPwhiUCD+5Wk4e1S3ImIjEAGUiI2ritEImVbixrmmlBpDOvqRDPE4gIy2x
p0EPKRYEoz3JCcpWIKDkEZhCW3PVIltAfUyYTlEg5m6J3SS7l3qTzUIxgWmaOXIJomApmiCLKgJ7
kHiayADg4lHUeqLgjlyUgIkai1VHUxBFB3o1Z045J5sSVWhQFa4pzclgsjgeIRM5ajYuEACwRllV
aSAX9qrH0oxEAxYsa2TiDDmiAK5rzBAEHpvgVCV3C2PpTAk75IEnsyPI+pTBL9RfxVHqGTD0oSdl
QlgckS5rVM905qnURK2KJjhUuvheRutBepuFnLAujFgxvgVHQNJd8zyU/NlolggNydjTT7VqizEY
elUYuKA1QeLBxUYoaIE8aMnepizcFIRkJHU7jJG4ia8E+5txNWspCJMC1QKhAbc4yAs9CtUtsiGO
mqkawMbYOgNcmmdIqi4f9+CaQZX8VmrdjM8TdA4GoPZx7GjV7lQA+6G9KYHpFSV5G2GgKSIxZXDZ
LS74pojqzKc1e/2K2xTCgFuxlTtYrJPEstJYjHNAiOmQ8EZjTttal0YbhEmsQnjQiyA3jpGJC3xC
WqB2JM4Y/Ht9hkY0O1ECXeUfLkATcsFr3d0k86IzJfTW9fBZHAJwWMbVqmiHOJKeRDvUIv0xJoBQ
souSYYgLTUAGj3HNPEx1Gz2RnuPKZLRazqOuOhy+ZZfmHrBpFlpdhkFKMoGQZ4tWq17LQBvqQhE6
Y+LrrBEgFKMosLNKiiYmIwYXRjH4jd05DytQWTn0LTd7pgr0QlgMFDzGhAkArydnbG+TFpTJsVrE
neul2UYCFLMMl5f0sJT3JgicpHSA/BDck0m+61FLcFNRdkWLYFfEPFNGvJOSwVTzWjahLSfikzA9
6dgGDCIrZQ2YCsyAjtyqDWMs08ivM1YMgCDpkemTUTRPIKO1uRAjI1zRjg7BQ8qxgCeZRMi4ydE7
gPBBj1YjBCFAaOApCv8AKUGHUPvLXK7uSck5YQwCMwFLzY1kOlESAbBskN6tyQDl/wAKVD+ker7b
CpTGyGjwTToUQ9k4+19TEWG7uN3ydF131TEao5J9uv8AKbppUzCMyCZENFsEfLn5YZxE1qFMF9W2
GbS3w4BaDIASA9KNBKM9IJP3dKMpSJjIk05qW29z8WNEdfVtxHw2RaIMZfE+YUhV1GEtIhAGwqWX
5480zI0yBIAifiWqUg5qBE2yCBm8iCCDEtZHc2iJCYcRfqByKMZS0mRAz5qBJ0v8JF9SidzCMYk8
qOnjt7e9tYSMRqjzKaG1CBAZwAomUjLQQREWdHdlCYkaykyh9VvS82Gky8s0QgRWppVqp8O1nug4
raJFkxtZNZsUIk1N3RkGYWQ44J8rldQZ8ewPQrgXBTkHTLHNbMxL7jhbZ3JgfURk23wliFLbjuAy
Ar3re6aCcm8U64qoJGAC+Er9OfcFXY3WGLFR3TPTGWBuEBsfnn72k2TfU7c4xwRMTpi9Ik1R3dsn
WJAabljijvXhFtRGDp5elEioVbguEdLPIuWWkl8lKAjqeqOr8uTMHOHBO9Azc0dvdlr2gHI5KO5E
1Zu5SlE3k7hRjeEkZHCTIi5b0KGQN1OtAKKJ3ICWq7hGe3HTOBEmFlJ3i9Q6jQF7ptLB7omErZ0V
K8BVdcWTMRyUYSLEClLFGEhUdnDLs1YaiF5P08rjrkMSqns1Gye/FcVWyuqLjj2+vtrTtZZplpch
ux1SwW7H742JPxGvb7NGlxLbjU2FZLREAbYuVRg1wHqiSJNiBRlLRLVLGJoe5a4ghgwR1Uxrj4po
x0mX35XPggRDWZC5PsRiRpkOoB6lF4EyOdQnex9K1So1VECWoxrUWT7hBBuBitQfS9QbspCHSMz8
SOnNo4+KjLSDVsnQ6tBFXxJ5J5S1E1JsnIBItQ1R3WJapqwRMrGjBUWoXOKcrJGtVmVbsHI3Rjts
A9f3KrgiNRJyCJcnmmWptIGKd+jGRNE7Cc/xH2dsvqN2sYjpAZ3KG5EAaOmId0xYZUdRO5F4vWIy
UdryxCAfy4kuYviETL4cEN7SdUTRHciRIm0Rmtf1c9G5IDyoNYC2pHa3oaJxObhMCC9WaiGocmQM
Q8nxXWG00IHFPckVCmwLWBWqUmIwZEgAiNS6BFDIMRihuyqGa3oReGh6x4j/AIXmof0j1faLXUjO
pKbAIg4L8tAE3KYV+19Xl5sj41Wb14r1pxX12WRz8E24HykL3TatViBwIVCxHihGRebPInEm6ByV
LHA8VGGAe/EupEYVpwTSqCHBHCqOg6RKuk4vlyRlL4Z/C/BS8satuTnk6bVS7O6lDdlTVfinHxCk
giB+4R5LSDWHUO5EEmbs4OCMN0ddhIVBL2Zbe7tdMmPmRtUHBCDMRWuKGlgSIjTi7IbUiRAuCB8O
aiAX1El+C07TuSGDOjsbk2njanAokboi1tVNXJCEpPEk9QshF6DwRE6h3CP8ueSjqBf2pyHY9UcU
4yzRj8RuMkBOrd6Y14MmiCMimemKj9NEF4lxJs1H6eYiPp9oESI+I4hRE93TPa6gGYcVOQc+YAIx
+I6luxlhMv4ohn9S1RkAcQmMXemajOfxUpkhDYGqZA1SJcBAmTRay0EGQFmoidskLqkTzQTIw+7K
4wKqG4hMajKxUjtyI0xMiDWg5IQ2+oyoBxTSDEXdVLFA3LomXTQcVSsagjgVoDGOAyRiJERNwg2C
MXeJLmJRJoNLLbhGXVqfuU2rHS4UJHEspxdzp9CnpPEKBIBJubH0Jq9JxqpGJEtRcgFvQVIbj1JI
fJAi3GqiDEdVio62JbNHc+nru7dmLuMlVWWC0g9IVexlpFh2+xckwt2Oi32K9rdnBVTxsUNVSnwA
oFvkt/6adv8AzNvsiCQNvy40ObnJDy2i13JFO5fEC2Ab/VFi4NhkEWGkCtv3dNEucmLqJiCJeJdD
XJmzAqjuF9UQ0aFvSh5sb11lx6ijARAcM4cH0r8zVMGm2A9O6y1z6pG0Cx8AEJSqTcUp4IyiBEM5
CjAAEZsj5gDjFkRoOn7pwHoQmAZt+GnqCMp7YbElyfWtcSIRIoboxhuatwChNn5LyjMmIrQBvQiQ
85YUVqYqixXTRvvXRnuzlPc40WkXxITrinYVFAQhGRiGL6pUPJgy1AjSKA2HijIkFw7Ag+pNFeaI
EwdgRWqn/ugGLNA0tmhtgEwAboDsVQESIeMTS+ayLXC8vb1GIxqpO/mGjm6MpSY4BCIxRMTqIo61
am4odTjCtERci4FkPrfrdyMdNdrbNf7pD1Ke4Nze3Jmp3IORE4M63JHbkTM9MpGo4lERkDJmcoCc
tQxAVR0gPpNSow3BIQmT8OAjEtVSEYEmJOoYqO3DaIxqQLI7kdyD7fxQ1h372Rg5JdpWb2oRjAAs
7kAutAgDKP3mMZd7LTuljCghW+P/AA0P6R6vtVWmIXmvXJEnHsDZoN9r6iOMjE+MYomQImKROBig
HuTfxQlgQPUjyPsdExDvqYNjgiSXPHNltzJAMNLt9+jl+9Fi4RwTm4sUBqAlKg4pges1iDiy3toR
Jcuxsz1WmROkOwFGyZbUSTLiVJosSWDlgyMohtWOC5IAEg5XXJPqYi5QZh7VKYzpEZnFa6F/iiRf
gotEARDDVWhLshuSAEgwGVEdUIgyq4zRo7FzkgzkRDNgFriTwyfNDd3QJEEE5FlLdg8WxwibUdaN
3q2zVsX4FPRjcIT+7kExGDceaZqxLP70ZycxJrFrh8FEiIAiGFXUTEm3UcEdNRcPfxTSqc1XBXtc
B6o0eFjkgISFbUWoy0GV5AVAK8vZ3IykZgCconSzWjzW/q6T5hNEw71THseMiEIz6hyWiYY4EYKl
eSqnTr2puwsqkhCRiC2JuhrGuIvA1HpUpQaQLsHb0JpAxL1FnRLMDmnZkHZXcG66R4LkmapQiVok
QenAW5lAuwErXUn/AAtdTDvk6jqBvhVEEsaUsgCO9lMQePIoSYbhzsfQovqGX3gCnJ50TQJa9Qj9
XsvKBrOLGhzCsmA5r29rA9R9SY9pJNFwy+wWLUuqkk8FZEF+QTl1Qt3JouU5o6qQE90+mmCoKYKt
Smke4LfMAQP9tO//AJm32R1mnlxIri8sFEs9MAtReJNSLAqQMNUjiQ7BULR/DYlatuZjuZGyLgab
Etda46tV3zXlHa+ItGNH5ogNEg0iLIS3ICW4AwDAkk5qMJy/piahl5lRMZUATiNBitUY9QxNgjMz
1E1BOaqA8jcICEX6bC60xi4/fEppUjk7utNYvc2YDIIyg5L0J4KUxJzKhcW5KWoyqa0KI2/hFhih
tk6XxK0uCB944oTi7mxBonmQ1nKcSGg3BuhKJBlk/UfQte1HywzABO/etQoQccF5c9wmNxEuzrRj
iaMoygdW4+ITCBnE2EU8NnQGvIqWxOerb3mlJwKEckx3t6fOTBDZjJiQ7E1Q1FbkfqZGJ2w8AGBK
MYvdhyQEY3uURtwI27mRRDgtlVR1y0QFSVo23McQAi0ZAYRsE+2wkTWI96Jk5bEqJB1vlRCc4hjQ
DliiJ9MiHjICq2dmBpUSObRKn5gMi9C3FbcwDR3i/qRntmjvxCjORYH1rTGmcsuSjPabUBWT1Rlu
fFKju/8AwoGJwUQC7RHq+0AE5vdaLDsBNigYXTTNVdMD9ic3qYwLf2piHCeDgjBDk3sRa5BtmYv7
E2ZkB3o4SwJzMUZDg3B6KkmzZUl4ppl4oSixlE0Mq1X5pcAuAACHOK3N+J8qe4A4iGjZiGQlKUpE
UFE0hq2mI0k1dSjGEpRatHUQHO3EvIY0RMS/rVLgJ4gMP3qq0xQ9BzTAlsQqkcVxUYxqZEBuJXmT
idJNJisURG8sOKlDcBjuC8TkewH4sxzTNTJEAY0KAuRfkiCOnLNCWOAPBWd/WVLzQ5HTFxX0rzIA
ygWBez8clKUC04msce5MHQEqSsckdNxYWWoVEqMc0SaJ5m9QLhPjZaRItcMaISNTig9sVrkwBPS9
zy7HHZknkLpoktiDVdUe8J4ybgU9xwTtVME5VahVsqBVQ2NuoPxE2AzQl9FTe2x1D8f8UYboMZAs
YnBCq+J8kKIhBSncC4N15cOkXdNq1RBzRkb6SMlKRrlgqgxOKM4ydwzIGJPILcF2s6eQIrzUKjMP
ROSyLzFS4TzIMTQg1Ut76KYH4tr3JiGNmWKpTl2AZDtMssEPEoHtc2VLdrhdRbkqBgMSnBVUHPcq
lwrOcE8jpBsEBCgNziuou91v/wDtpf8AU2+yMZEt5cSQOcl5jaIjBz4lO40g0IKeNzR085gH8LVT
4Z0TVAxFXV+gCkTgvMEdJAYN7EN3cgZB6uiROg+EGzLXuSvbNaJzBB+47U4oiEul20hAmej+V3Ut
qI1GP3+KEZ0JRhGWqYFHt4Iw3HcnDNawRS74BCe5KUgS3cpbhkCBatk8ACfxICZBeoAWtjqOCMnj
E54oDSx/FVdQlKYDRLvVESo1h2ag5iU5NcAgMBYKUNTwLGQOLIymNJNRHgtW4dMM0IyqCRU4BGGz
AkPUgMCj5e23EqEPpzLanH4jnyUY73mT3jQybFb2mIG5MR8uc6tndGc90zlkAoR0SBlASLyu6jPa
hEYEkOfSjv8A1LAxkQDZfkyl5YwNn4ImAJGKEz1SJZnstMyBE3R6qH4QhEgSJcCQo3NR8s1l8QwB
WlxORj4ISlJohgI4KQlXVQtgF9OQHc3ycFGETqeR6cb5obW1GWoB+lQ3PqYgbRrpcOeAC3Ig6WPS
Dkojh3I/8MfMhq05vipbUKQhMiPK/wBh0wsLoE2injcrmhKVyojuREsF5cMUSZdMVU2+wRJ67UCG
711A0vqK6YRIyZPubGl6AgonalKAY8eClDbnqhCRESaGmKGrqFCX8Lo6qBg8QnLObqorknbFdNC3
oRlK1snQJeJzwWr4o35owjLSJtrBAduBwQhsPCLEatTuFKO7uR2tQNdOKGndmYiQE52aOJZDa+ll
uT3WJ6mjEtVT2gIw3SKbhmSBwYIz2ZS3ZgtpjEob+/tGG2TpBOeSaIJN6JhuATasZURiYib43iyG
9EeXIRdgaxk1PFHZNduQaQPrQDhmfWMEXP5kItGWYCjM9D2HqR1BjkqEgioKY0ZEiVBjZEfeFQoO
zF6nAouQxo4pZRkXPXrzW9ACUo77SrHTF6vZa2As4lUFxR1E7rR3TIkiAERENRuaEgHiRVs0I/eF
jdwqm1ii9H9CbS7fey4hSkD1QDtIsa5KJgH3a+bQARODFEAgi5T5eC1AGTYiwCfQYjjRPvEAB6Cp
PBZBcE4tZNiuPYwX/wDJLbG2NW4+IGClL6GWrZNgaNwVQnUREaadWLoAd5yC0bUpSm7EEMj9P9NK
Md013NwivJATI3Rx94WqW1LY+qFpxGoS5o6tuTD7wCYXxVac7pnoVREGxuCnMjkUYRYg1CiHuC4R
EqtcFOw0mxBTxLkF+5GBBicCpAFziqi6g/gg9qIxNJOwohCPwfeYrTtyYyNWTbpc4IjE5K7K4ZOQ
rImVWRiAODJ8lwQewT+hPM0wATAEp2KcllQd6uqyACu5VT3BWKc0RG0WGa0zk+LcVj39m9/7af8A
1NrsDByduIAHOS0UIxiUdqEAA3xN6lGJk8zUxjdCEIFz9539SE47gnV2WrcLyJqP9EYRhqmaWsgN
NLEBDY0aQPvErREu2IXl7QJA+9i6fekWs8ioyBGgmwTbAJ8xhrxC17gJq7lOWDGwv3rzCWyQOomd
3OaIJ1ENU2oowgJCIsRmiNRlLxCDBjgbI5RAJMkNJJOIC1YIGRAEbI+U8pHKy6hXNWRAk3BVVE8Z
JxU5p3qM1GPwk1fABE6pTGMhZ0BGDlnJkXqjHbA14aQnmeToSBds1r3JPN2ER7kdw9ZsAcETI0OG
Cj9NE6du+5LIKW1tS8zZBaMytV5kO2DYLSYgxkdTPVAkiJ/C9hxTTsQwWklicchmn2pa4x+81+7B
Q1FjOrZd6MYUBFzWgyQAixlbJfTCVBroO4o7kjTURSjVRlCQeh05tmjqAAqRIBrrrmSTg1EIPpYU
dOLIAZj/AIWUQQHlEk1+7VMDUy1k5yN05TDsLlNEOmxKugoRioNmETYkLRH4h1EnIry5GwBIwL4L
Vt3auTlEb17huCpBoPXNkNwyAhKxzdD6uN9rbGoZgPgh5lMwGY+KBh0kCku9CO/M7gj8IC3NraaE
JCQFKsQuqYPHkmHUMwo6osTjgaIZqtFTC6fDEprjBGLUUeRWoOSylpkYsTYpt2EZ8bFNrlsyaxs/
NADcMwRRlKe3ENINKRYtjipS+pO5KMog7eloCvcjt730Z3Ix6o65GQEjiUN3a3IgyPTtbYIiAwxW
owIOcTR+RX5UhH+UyWjfFBmWPoTRkYyP4qhER68aHBaMziLJ5HpCJBwZzwXS9U+PZW+LoOXicOKM
X8VGcAJNgUZgkCTFgVqnLWZ0ujIldIQjtksfiMRgiAWgKEzGlxmBVR3NsvCTARYmXEoaJGQkK8xg
pRntz10Z4mQc8aXwU4DbEIggTiRiFTpGS0EEnBqrTIGMQXYWdfEZSGSZhGPiUNJoTVaJyEYxGqR4
BeXtDTswpEZ8SmFygAHJRgW1C7Jhc2UfpoH83cGrdOIGSY9mBXUC2LLSZTi93b2J4HVqx1J47IL4
ldG1AcwvhAwVQuoCMsJChRJ6tvCY9qtQWQjpAbEJyWT3CpEj+YIHamCG9amNyI+G4uuibYgFbU5l
wPiQO2QYEqsQeIujEAjngqkGXNkG3GZPEgnNdRpknF08qnFNEcyqL4m5KkimkxCYSZMB3ohmIujF
sbJ5UiMEw8VWqpQLEqoA5rqmmhKnJDqVZhX1FaY9P8zUQJLl7Kg0jIJzXj2Mt4C3+2l/z7fYNEtM
fKiWpnJeYTQ/ECfcjv7W95cpUMSQKDIKUNswMmoHIJ9C1Cj3IPoeS0zHXgDZACh/EQUYSnKT06Yt
61qhJpYBGcmFaobk4kQXSBCORu/ctIjT8T+pM5fDJCEZE/yobcgzYiqrFnq7YoaBXGRsE0JiIsZl
MZgn1oi3JUDAeK14cVGLadOOa0u5xKxGRNl0moo1bKo1cEWiHz7K3VTZV9C6Qe9ZD0rqk3IOUQHk
TR7IiJpk66g571WI0jAI05BMgYllomXimHeVuEBzOOkHEeCOqscAM1VzAWcjBGIkISjVj708iBHF
2B5oHbm5NXxCBmWkbEZJ9dw7nFsgowBb8RyTbZAYNEcFECJ1C5LMtv6nbYzhJ4AjNEyIjqLlkNRY
4PUroi3EqLklz3IQelKoCNmZBxiFan/BjWHi9RwX5IJcuxJKlKUy0i4iSOkZBSl9WSNuQIAvHBjw
QltkESyWqEgQhAGiMpXKJHaGqVDIFygHZ80WidOgdQtQ5oEt+YOiXHL0o7chp00D4gC/bGJkSIho
jIcFt/mGWvb6g9mkVbvRYsUT6M3W4GdwdKJkzvQG9UJRJET8JvEnEcGUNRBiW8WTG6du5MMcESPD
l2wyY87phN47gwUuZHp7ZD7ugmlcUISYAkFyWt3rcIIiBYxq4C0ylqiKMapvqIHUPvCyEvpNwGAH
ULkKMX1E2Z3Tm4Tbm3F/xDpPoXxDbbEv6wogaJg2kSCPFde9txGIhEy9i+KcgOEYj2lOY6pCrkkt
6kcIyLjvUn/iCiRkz4p3s60yFbhdVCDXNl0F2LHiDZEkjpqyrdB6GxCGl3JYEXbJS1vKWqMIxJ0s
46TIIiMDtz2IgT8s6yCaPWhovKG0J7Yeu598H7z4dylHZ2pQ3G+AAzhuAEfCcGRDuJFyHconTSJs
BdGQmKXAoyaPU9wzrVZ7g0Kd3BwPvsho2ycjYeKaW6ImQYxHUUCNuUv5p9I8EBuaYAj7j+tTjtPL
eNIGVdPFOak1R+r3fhhSEfxSRnIvKRcqqbtoibZINMtkm3YCXEUTtKJN10bgJydvWtUfiHpCqHBw
K8z6ciBxgbdyDbZnE1fb6gj+RuHgyH5ExkGWo7MgbWutJ2Jh8gpHRMAjFdcS9rI6oE0wCMfKcnEA
hAbkzI4ChbvTEr2qzprDgmHe6qK4DNPZWorF0zUT6SV8BVYkHkqE8ihpcHMWTgAE3Ku/ZZ+a9y8z
4Imz48kPqPMkXrpKMNodX3jdlHdBDSsBIavBdRrkuqAAlUPWnBPJ3wAsVHcLRhIkDGyGqLPYlEvZ
CEQ1alaZio9qbFbz/wD9aX/Pt9g3IaS23EHU7isrUZaPO24EWMrcrL/aw29uchaUeH8zBR1bMJbs
/h3BORIHL4UJTdohyI3K8zqhEWdrIiBMtxukD2rXvDpyCn5LRjhQOO9a5F5PYVKEZRLYaqFdTB8g
CV0ivG6oeboSoHT7bGRxTyKGm2KoTyWfErIJxdMfBAC6aVCLLRE0bNAysbhPtRrnKwRJp6l8Ac51
CfFMRU9nHsoS6chZL+K1ACmBQPxE2YU5Ly9IGknmmLdkphiLAvVNIsSKNU8roOWOCoNJN5C7KoEu
dLJhY1a9AosDx1UcrXLqH4UDMNRmFfQiYHSGcE3DZshUdJ+K1Frlfwj4IiPUeFlQ6RwTlUUAasXQ
lZyDzT5qI49tf+AYY4oGxDOBmC6iNqVd4xjEnAH4lKItgOSfH1KMdomM5FgRSpUo+ZLclJnlLhky
dGL0CJxRGR7AJGi1ZrTt1nEgx7jZS+n3AfNnA7gfIUqoQLVc5hx1OuioEfS/aAL4L6ee6ANcZMHq
GINfFXouefYRiQVIFwQ9GfBRMpRJtordrstqB0zhIhjE4EE1RlFwQHARqhhyUiBVu2Fa9WFWdUdl
ICgBLuixyRGSdnGkuLKc9zphCOpiBLhmvq9jV0iMTAPStaAp+yZAqwUfMGoHciCwNQcKLdkNmMZw
EjEgaZBltEGu6TFuTe9GWnwqMfcncg4EUWkiJc01elMzCzKlSKKqlg5oqHgU1wUG7ziykABoouoX
wBQYnSRUtRCUauWZlQ6XvSjqJEiJRLxDVDKO75R29y25IS+OJDaSPEqe+Dubp3K+U42xKIrpnpk5
Upbf05AgwiK0fDl3Ko0SFiHA5oaZasQ1f9F1yDmjEMUakyFkxkIvi9e5gmhGW53MPFEy0bURfEo/
7nf1yAdn9gRj9JsGRa5AjX0lRlDaMZaWJk0B4yUZ7u9b/wCntxMu4zmR6ApSh9JAzmGM9x5FuD0C
lube7pkK+WOoeKjCMfL2dukNVO9NORmcoBDy9kQB+9O/pdA/UNJyxERp9Kjt7ENEdNRWvj2VT4Jl
1CiYV4LpTxnIMmLF8WqtsRiJ6w+SMZUkMAn0gx/E5QrGn8yOmcIzFdJNxzRAESRlJx6F8MSOBWmU
Ij+5WBOYssS6fAIlUCqQO9MG5unMxzugzn0JoxA7nKf4RmaKpMjxssFYKyaPqXUxPJNAMnJVFUps
kY7pBMR0xzKMJfAJdPASz715L6jGRbKvuRfxxVCQurqAzuiLBAwLnE3Wqp3JGpZi3NS2TGUpSNJG
RoMtNlUNwCcXyZCRqMGT/EFvA2/28j/39vsIEDPcO0GAwDmqlu/U/TGcQCwavOqh9ZGMdqEAX29w
6pl+ABU9gbMYQJ6HjcDFA6r3ADIaQ74kunNBirscHCPmkyAwFEZQ6Ini6EX1xwcLVN+QQILrBk9y
EwFcUyZ2CcE96Z3RMU4uq3RLNxK1ZZYrVIOea+JjkFU9yYO5TAuE5FckGoSqJjdOfhyThW5qyBNW
VS+S5K6DWGKpUr2K7A3XF6Hgr9URTCibUCMrlCQkWIrwXSHibk3LIshqAfFVkxK6B3ldRfhgvYmV
rJxRGZ6lEToHoGQAKhz/AODIlUepGuF7LbMbQLgcUTKol7UCDgygQaxct3dplgiSicy/Y6Z6tQIz
ma3QlGoGzKHzEqU9z4hGIjI5x6SPQjIhnjbvQjEPI2Cnv7paYZojicUJRLEVBWxKcnn5m45N6xh2
cEW8EBpAMQykZ1kTV+IVYg1GHBCUYtIaWTanGKIHcmCIbCiqEKXUQXBBkD4qoc8MVIxDVNk3g6Lh
uIyQBq4KlGW35gnFvS63dDmJ24xOq9B2z5BRMg4Eo+ksCt9vwy9S2DH4huFvAKWrAaa5kTCDgM1F
0+Kd1qBobhMcUcPYUS9RgmNGV6FS4ICYbS7SyRLhskQCQmNJEUe1cUXl8NX45LWQ0aOXdA4xLkcF
+SdMyC54lCZJqe5HT0meSaxzNU4mATwWv6ra80ltJAq6EPo9gAxrqNf4I/7je0CRch/ZBOdW7L5R
7SmhGO2OF/G6eZJ/fiqMPSUPKgScCvzJaRkT7lE7pMya1oEDtwAY1YIAUq9E5L1W2f5SqXXVXitP
Z0gk4NVDzYGLsa0oi2dndOE1uSB25kabYrriJuXJsVDdnt6RMfCarzdnbMZAuYsdJUjHahCcbcUZ
7s2c2iy6CAnfVL0DsaSYB+JWZzV1fs0xc8E+4a5BNGICqXVVQqieQYJhRcFkqW7HWiA6jihMVYg1
4Kcoy8vf0t5csXUgPggWCJKaAcoOXzTXTeCaQWlhS2CLz0HAFM7vdPbI4IUcSoMFvGWP00/+pt9h
O3tGW5KA66NiwZGYbTiB6lKcQdtqSL1XUX5p9wmTilKJoAFxcYJo0bFadT5oHsESxGSZmCzyVWoq
otbFCuOCL0btaxVEaWTDBMKK5HZXsyQzCrYLgnFwjicUa1yRJujRlyuFXmyJBVLKl+z3o4NVUotc
h0s44ojTU5JzSjB02p2xKYVOQX4Y4Mn9K4q3endzksnVn7K1QJDsbIHN1Hh/wbmmRTh2wOJK0XJF
0XsA5HJF8L+8IEdX4T7EJHu7PLTKl+18kzsE+Gj2ok0BNO9WtFRmK0cniVuvl7R2bOeuXqHbxxWn
CSqPuxPrCsRZdJL0p3oAGiJxwQyXxDVkqhO603aqhUSAcmLeh1JxU2Aw5oRNBL7yhwsXugOZQYst
wkuREJpBjkicBdEWoovIB5QZ8xJfUxJqBIcyQ7BbXDcf0BEiTuX4XkmXqTpjijI4GyMTTNXdasEC
LMjxKrfEJwHemnJNeL+rBBuPNdReXFdJobg2RaY8zgiC8iC/Bk0ou3oR1sDKxyVDr9y1Ehgq4JtV
zV7IuK5JgPBNFh61GW9PSJWAqidOuUT96qkIRYNRAk4MhmgMEQTXIVKcQ0RJpOdPQoTjuR1xe4YV
TeWJDMFDztyG0+Zc+hPLcluFiaBrLo2wTmapo9I4IeZIyagcp88VZCAuVZ+SO7vhtraYkH7xwC+F
4H0Kbn7pY9ylUliQuorgqYJpF2wC6IsMytILnE9jnu7GiOZTRviVUq3ZZ0DOgyxTAKxKyC0mvYwW
rSTHgFCf3ZegjBR2/rADszBjImjEihRH0W95u36k4pxCaXURjmnnQZKiYYpgSF5kH1m0Gv3o7W4N
M44FVCz5oycvkoR0HSMWoCVUWemC3gbD6adP/wBTb7BLcMhEbcWY0qZJtsS1YkIkB3+8cECKgGgX
XNhi2CIJ1jBUZ8HT53QMixwGKaNGQGlj+JCZiWwdO9EwqM08qZBECLg8FqeuSY3K5LiupUFFojfH
giPEqiqF0lObKhVbdnBBcrr2Lhiq9yrir9yrbNcVQhGlFdAelc7hdIYDOyEZUiMAsvWmgKoOe5OS
xKAysmIZkwxVS/BUDdmSonQey5KLC3/BBRBoDVE9wQnAXaITEdEwYkjA4JjeNjwyUokcVEBALViU
IC6r9nbjL4ZRYnvKYBxngaUI5ok3I9ih/SPUt5yzxYc37NotTzDX+1Ph2MaAI6fhejq7gx7qHs4l
ZBECTEpwRI8EUQCGAeq8KFTYdQui8WxrdE3IopapfmSsMWTSqBbBQ01/igSOQREBcMUYzn5cYgGU
yCW8F5G2TLa3Wj5hDRl3Ke1909EtxjWr6g9skfP2wCC0IyGGfNHcG2AdwO4lcAM5ituRhIbUpHTI
gsSMlpjQKMd2RG29aavQpQhswjIUjONMXdloxAcIADguosYnxZE5pkRgUW71pZgbc1ICoFe9By5O
KJGZdCdC+NinuTlZGBatiME5oB+9VUMLpxchiV02Zwi4bNlYAYupDS44piGzdR0SJqzFObqERiQg
DaJZFdybVqlhGNT6F+Xt6I/in7gvz9wz/lFI+hao7Y1G1LoGRoDQYBFiy+InnVEzhGTImWqh0mAH
tUpbY6Ho9SEYm4Rpa6OxuAEXi6cwZ8YqO9tyJjkU1gpgcETawa6lE2MSpFiYkligFqmC12TOIxyB
dHrfNgVpgaC4sezJexA2jiTRaQQB4lZlPoHNfCAqALhGqZNcr2KgROJTlCMIS3/qZFhAWCOzvQG1
tbjDy3cRemoFaJ/mbUI6t6ZDyfAqZgeh+nkqmijtg6QSxkUJSGralWO5GsSg61bY1x9KOBXwuTco
MXGDqE5QENwXnHLihtXngTim3ImPFBaInRLDJ0RvQ8yTuC7DmFvEY/TTI/8A3Nvsj55Aj5cakA4y
zUxsxG5qNGj0lESAAjaMbeCqGCY2wXBMyOS1aQStQaEchVCBBORIq6pK1nwRJkXxZHUH729CaAvk
z+lNGrZF0aFgqBVLDJOLY5qj0RiO8qifFGirfmqwrmshgOy1cuxz4qgYYl0wDK3esuKI9Kf0Kgf0
plUOuGS6fBARi5yAX5pERkalWc/ilVUqi1Ii7LMq7HJUo2P+qY1WT44qod+zNVVl7FdX7ADdYlZf
8CBmVDVWAyVne3JBouX1DnGwRAEWizUahDs3BT27TD94B9yJ3Aw92KiI2AZA44LSZATaylIWdh9o
EjvK0BtRFB2bZziPUo/TxL6eqfPAdkQfiG498NKz9foXT6UBAXFWQEgRzVBa3ZRBMXHIs6d5gZO3
qTC3GvrTlgSqVATAEupyJckmvqREnHOqe8TjgpHeD7UATQ3yXmxJERURKDghw4pfkmm+kgORUs+C
jITlHdMSPuiUXxkjOEzICxIYimSI3ZmQoYsBcUuobj6jGTzgS1MAGU9gw24wk7S3I6pRi3wuFvbE
xtDb2Y0iXbqFJR4qMN/dGzD782eyifohrkf0xJpFiGqEZfWbQOqJMduQIBNnAxTGhiaBvQoS3YGM
JxeJwlyzRIIqUcCMFzTRrxCYWCEYm1SUWxVA5NSVJxU1ABavF0QWPA/wQmYtE5VQM5FzZqcqoEkm
MiSeYzTFwMOSclgbYIMbokkZpxIADElO5Z8vag2JZgFr3B/SC9PHsBwgCUz6pfhiNR9Cbb2htwP3
9wuflj71q+q3zIfhfTHwCbY2wTmzKWpgBYBeXEgyuWUBzV0eSY0RxW7EUiSSEIwgJZ6rN3Ied9Po
e84mi2t76Xd8+Mi8oioZnqpEnSwJgA4aWFF1vGYIwxF04OogtRfmS0jgHX5W7KHcD7EI7c47pP3T
CQP/AHUR9V9V5Bo/0+0TMn+2jKW1s/SDQ1d3eLzp6kdIjED8IARck80XtxWmIvUlHVJ3FQKqxPey
fSSciU23CMe6vimJ7ivYjRNgmAPJPKgWnSeaYFpHPsp49nVf8K2/MifLMgBEfheqMYRjGOgsbMmh
PVuQLOA4Pep7v6eqAhEyvMsyYVlknlU5YKW4aOWiPaiJ9cLHbnWJHIozAaEj8IwXSXBXn7UeofGB
60xJjLjZeohGM9uO4DYyuEPrfpJiUSOuApKB5ZL/AG/1Q6/uTPtTWITAnmq3wW/t7ov9NIxOBHmb
fZp2toy3RtxMty0YxBkptB8DMUPcUfLA2o/y0J5lZBVwTvXspZMKlNKnMpo9XEIiIIOZoPFGORqb
oTlU/hBqjtiGmrk4lGlZU7CbRxQYqiphcpxjcoNROa8UwN8EyLlkHPcsiqOr9645rinIXRGmJNlo
kzqqpRYHiqrMqkWGZQM+s+ATBojILoDcSuqTq1M7ItV75LhwR43VVl22dZdjdlU7U4LJUTgrA81W
Pgrsr/4zhS2tw9UGIJyWm1FMEEzaT6a1dn8EJwJIg9eLaS/itcviDuowGC4px8XsRiKOQR9tsXWo
ns2/6R6luyf7x9fYzUBdWurNxXBDI3otJoRYii+KXCqZyRxQXuRIFsUMCVmvej61Ul81Qu+a6nEc
h/BaIwkd3cl0NiAjtyl+aPjJLxi17KENk9O28RKWAOIXl/SNLRF9zcJbUXwCn50jqBEY1cF8XUtu
RcwJBIWmfTG4WgSaL3kiNwagSNEwWC2t/bJ29UmjMnp6cVD6gbw3fO6tyz6nrZeTsbcRuiRlHSGl
b8S0zHAA1ZeV9VGUzOJltAVBOBL4I7FRsbEjpgKxgZZJ+KMqBcUNNHuvWg9kSzZDghLx5ImMQGwN
V0Gt6YKMXM4nAIAkkYg4IQMiI4Ih6UYAVWiDhrki6BDGRsRkUdTUwVOlu9UwwwUCCRMWjHJaZbkp
EFjE3CdShuloEOe5adnbBbgwVDpHBOS549lKOpSMg8RWIqaqI2mgALy9yec9XewRjGZAOD+9XMxk
arr2DI4mKc7coSIasceaGpyB90hl1bNOAf1LVtPADJwgYm2JYkIylOUial02GSyCEtyJ3I/hdlo2
gNiJwjTxK6BqlLK5W7u78hsx0lgblVLDLFdIc5lEmNFV2yTSPcFQd5XBUsezh2XU5HFgHVQnKPrQ
A8VRHZ24nV+JCW6XmbobrdW4QXxYLd3YdLiUAAcChIlitv6jdEdv6Xa6INQk5sqY4pyg5oLBa9fU
fusmqeNAtJNMEQcaEFSAF6xK0moyNlo34eXP8QNEJfR7hhkTV/4Ixn+V9QKuB0y5LyfqBp3Y0jPC
XemqCr0U3FRsyY8NcOz/AGkvjlCMrYElARqMqpsCnK17sQRhWqMtWlqtitMCc3lRag2nMWVWBOdf
QtO5ICDVkQtzyyTJ6Rb0oCDR24gBmuVqABf4qJqBNqelSiBFgUXsmuU8lWnJcLpoiifFEEsn9Kqb
pomqz+xyVaq7sowIAnIXxR34y1D8OLIg9gEIkp90twinEa5lMKlMCwKrXNOQ/BOAAez3dlvskyLK
6oCSnbu7GwVLLFcV7FQgpyqpwr9lR2X/AMLXHkUJggkBaAf1HtezFGQmJRdiOOIUtNzQlOomSLcq
ISZ2wKfOv2bqleSYBhxVSyEBMlrBE6IniQqxCfT3K1slUM9guAxTfuE4NfX2M9VdMCoRhIQMCDP+
YHNbvkDo1y05KrrmuXY4qq44qe4T1AEbcjeMpUohOYkIzEupx1FkNqcRqlJ9b2dHa2d4bURpEiS5
lIB31ZIy3vqGkBr2g/TpfBGTa5AuZVJvimnkRS+a8x9ZJFJUtyW3GDQkbgHpD4B15BnHe1xEhMEn
QckYiIkCGdrZIwgdM+JZHUWlEt4KJEjriGBeo4Kf04LQ3CDIZsmfvXT2n09gONk7r4Q6IZjiAqyM
QKiqiRJ6MeKtUWPJEuATgvikAF0umfqKtZNGTYoAR10q+a806olwC1uyUu7sqnnMDhcptmH90kdU
tQOFh6ENwQbMPQqBjAwMcTYqlDwdUJHemMiAqF+aqxTmIPiqDwsmL1uETEMqS9q6og+hVBHpTiQN
bMxVaIbm0fzI1BwRnuzMjkbDkE5qmxQBZNCqcjspRlU3+w3ihGJoE5sqJ2onlbFG3TQ5lSnsbJ61
5v18oiLUi9QUfp/pYhxSMhgicDcpzUr6f6TQIR2cRi+a4HBaQHdOT3JiiAQnEgWwdaJfFFMamNQc
URKhwOKINGQEi4FjigNXJ1La3WBGPtW2SR52kkN94CiY9JyW6D/9iX/PDsiP/wAMPXJNGOooGYqa
tdWJTnwQGkc7obm5LTtg11Uco/TfTAQjbUaEtkjKUtRxIqUWBAdGRpwdnVbZJiOnKycu+XZkM00a
5lA3RTiqbxTvdUXNXTAOBiq1yVU/YMfWoiJBLVKuoxzKjFvhiAoPeQtwU7RgS4Kc9Z4+5MCA2ARA
DcUSa9lbprBZBPfn2VXtKqe5YclQhlQ+KcnuQc0FgFmqRrmq4Y9nqTelAYcE69qos04rxTGqoqFZ
q3ZSiurA+hVBHKqv4qhB+zqgWkLLW1WHctRfVNjIcUGsQFx7H7KllmqRVSArk8lbx7Kp1dMey6om
aq5I8VWwTp7q17FOayJYDNec5ibMDRRhL7xZ+Kcm6zCFTVHHmrI4d60uBQkk8FKUoedERI0mwB+9
3KO3I6BK3sCjsfVkShNjCYGINYhR2IS1x24gAG4BqxW5IxaUgInVYiVmTtpMWwuVOP8AtxtmWnRI
F2Mbo3IFmuhQgWqGUya6yO4jFRnsxcxLyIoh9YDqjMa5k/drpRjD86RA6oA3OHcjEhiCzYriPsOK
iyoWQBq1lmTdOXC6blVJdB0S9LqmGOaYVKeVeC+FjmgA7+KMrUa6DFMQX4VWqIMgLmzc1oltyrUk
VX5I1SyJZkfM3CI/gjQIevsovamOKy7M1SibUTEYMwValUoOwHDtfJMLqifEpzTmuqSoe9B3KrTV
4rSbYFfFRaSakOnNl0Vg3enwVKkoae9MQqLXuuAKoeWa2C1WjiAo728SQRqY0CPXEziKQhdEzkdD
9MBYL1pl5k/hij+EnpRGIKEiHIuCiWYE2VUwTk1yWraHVGpKY0OITj4ZWRjuR1A+IT7ZcZGhTVDY
FNuDxQlGT6bDEclp3Il8Jhbr1idiTS/v2+yG1pFNqJ1Y1MkKkcQhujceJuMSm2AQP56PyQ84w6qA
RmJHwFkBGWmtXqvKjpAdyUDq1NgxQjAEPUlmW3uhjEkauD5uoRgARAfELE9yJkKmzo6S/BDVcoiI
Ms2Dq9Rgj2MzBcOxya5JwO9ObBMb4ss3VmZOvYrVV7Kp8UIQGqRNAFpIaUTiqATnmQomMpGUbcFG
U4tuAVVSyNG5rqoOPuTC/wC+CqH4KoY+PZVUp2Z9jJmTXVQ/FMVkRZUKcyYjiqSJOSpIDmukge1W
FF8JD5LJO/gr+wpg3From4CdmV3PqWeacEsPu0RmC+aoDXA0VL4o34ApmrkrXVac6KipIhYFVHgr
tzVCokihDqoA5oxBDBvUs+SssAqyVvFcDitMixyKYM6pVCvgr3VS2WSoVZO3emGCLHm6BwV+9WLI
UNUQbLJVN04sLFPEjTsweXNHHAqE9QgIziCTxxRiSHcghPhmh6kU71wRkbAOVKcm0zPQ9xpv60DL
8wSjV3ArwQ3QCYxILh1CUJGjvA5nJSEwZH28XUDWezEDVGLsAtzb29o6YDTtiQcDVd5KMJbggCSJ
Biw78UREvF28FWTEVWi9yStzcgdJ2hqL0ccF5es+WzGL9NeCcFqvHmEZyLylU5umumQz7Mk+HZRd
UBRVpwXTLudUPJMA5TaSEI3zTmvBClGqE9smTPzeyZnCeJdrd68uRBAvn3lHQSC7gXB7025EdIoW
r6E0DInK4rzWmUYyoGvCR9YXVr2+Mo6o/NB0+0Y7o/kkJHwuuoEHjT1p78Vl6EZBzEXKrTgrfv3K
y4+lcfFUB7lq3SwOBX5cWjgZKtCgKnFlUCIzKZnbFdIsr9tbCpOSpQWHJUREhqGI9yjuijtVcD4K
netMhWVu5PIgLp8UYzEpHGUGLdxWqG8BwlCQI53R2djcjuPkD7kOqPnEv7wpT3wTtyowzWiB8vbA
YQGQzRrUphcpsewRFI5IvM6mcBj602BTutWIWaZ25LUPhBYlaY0HFCbdMrsgB8It2N4Om3IajhII
6HPDFMV1B+BW7GBL+TI6Th1Q7IyA6BswBPfJdGwdzdxMwwHEBHzRY1Y+1atmRAnhO7IyDgD4twin
cgNcpE4gUVDRDVbFl5hEaMQweXghs7cSNuJJBNH7lOJDi78ew9hjCRiJXAxRI8VW6DYdj+hOaK3e
sygXY5IlNZMbql1ZBqdktuMYnUXJlEH1raIjEGAaPlwjtgDuUpzkSXvQjxROKDRYLNMCa4Rv4p4h
h6fFOaLBUVB9jirriqJ8QnWfY7VTFO3irpnRqulnwGCd+rNkOnusnMADxTsxxTB44fvRUJB/fivi
Y8lQge1ORW7u6cR74pyO9k4DgYYrURb7py5p3L+vkmtKzH9ymcA+j2IijgePrTtbJOAf6XQYBzgU
WiKXb3BVix4hVA7lEx3ZMKCJqEZT3PYEREvL98V0mJDPnRcOCcjuZN/orP6AhRkzIhu9MnFeCrcY
4I5elVYBA1fJUqc08vhagQAqTYNZM3Mq4bl2ME+lCo9KbBGX3Y+xbknOvdi5pgi/TE4tq9DoiG5r
g+pyNL6TlVbkoNplJxTghUCI9iMu50GNSq2FkYg3uMxko7YDRjZ7qO7Ko27DlgtyMNsShuHqMq6c
Q1kTIOY2CntzhGcdwXlSUWU/p90RjtEERkQ4A/mGK+nn9PL8zSY70QCxI+El0JSALM6aNeKCZmUQ
HBlR+aMNuIOmLSOZ702KYG+OPY91Qpk/2MQeLJh6FU1wKpLwQqK5/wAF1CnBOJMeKpPwZPGQLZql
QLB0TuAiWEaoycGV2WoGmS1xvJaHY5oispGhwZPYAv3qvVm9kIxhpYYMxKrCJOGEvFDR9RKINhMe
ZH0ok7e3u/zQOk9wlRCR2/LNuuL15hwmhtQOe5E6pKo7KdjGp/DFNTajxrJdAM5nE1KE/qCPpto/
en8R5RFU2ztefuY7u6H+WNkN2EWhLKwKf7FuzRYyrL2BVCyQzR2Z/FgpbUuYTIGIBEbOtRq+a4n1
oCAJmaBl5v1u4Ij8IqRzKifpdPlB+oVc+tDTBjENKf3eICMIy/KhUc0TcnFMKk5Iy3ISicCQQOxs
CmBTd6BVPBMbKtjZXUNuJD4vQuUJE9cvhiMVGA2hCIubk95URJpbcw8JJ4UKMSGR3IxBg9SnMAQb
sjIyAPJiETtSE4jxW/E0/wD40/8AqbXZAbcT+nHqwvJGcpMRUixKMNMQ1AALd+aEKyOAwD5owAND
1UoAEIeWx+7LBlV2yQIXNOEcESO5OR3JmZUQN05NVQ8z2DMqhpgi5V3TmnFNGpzVbBMPFOKuvh0j
M0TDq5KkUDM9ychyLE+xepOfEq2o5m3grISmKDBaQBEeKcmvindXr2OLK3Zbsr21WfYybt9iyV1V
cVUU7M1dZgWCo7ogMWCJeoqBgjR3Ns1HpbTQgH1sjKTEACi1kUowFECIs+ZQMT1msjki06C5wVJd
ILBseKEgQTZnsumripQcFhZxdfDewIQjIB+9ARofQs83V6eB7kzsRQOnd5E3TGuZDBXLck58K3VS
54VTPzKY2xOafF6KibV6UwQo8V0xZHjimsFyVxyRNGvVO6NSSmrS61AWzKdqlaGcG4UoxFBE+hTk
KtgiZf8A1KDkiIs0ccbKhLGqLyocAqnxQ6geaIwxQFipDMZKe2duPVV5WtwRkR4VqtdA7MAtQPNV
PcudlwCbBDNZrqJc96eLuM18JVaFWfkrXsq0VSOSuFe11dyEwNEBEHVdWdhZPp0kWexCo39tqqrS
kcChCgf1pgA2JJTGVDxTDqjclEGqckh7ogUyN0Iwl3oyPWcFqEaYjggNBbkiSC/oTehDcBBiM0Nu
UY6RXUBVBqxywTlyHrE2TiB1s7ih8YogbknakZhw/ejAgOPvRLB+9SArKOGfen3ZsPwx9607YAJ8
T3obn1e6NqBrpHVM9wsm+k2hE47suqf8E8iSTiV0QOn8RoEYfU/mahUCjKW0QTC+3LOK59rJz8MU
S71+wCCy29771kwwqVSpKhEh5nFdB0bYoZH2ISMDuF2O4Kkdy07k4gEGj1L8FPZMpS290uD8OnOp
Xl/TbImzgEmjKW5KmrDADsG6A8olw4dHZ3gDA8EVrnsjdGRstO99H4CviE/0sJQibiTFOEMxROna
g7KFPuOSnwQANsMlolcWWqNTkjsTGd8FoIOoFwcCFVivgDrdMQ0/9vLw17fZChby41vjJGcJUdgD
iuphAXJxUmIcYvmiBuCMMeaAMozjH4THFWTYoAquFk4wVOwvj2umVTVZAYrisuylezpiTyXWdIyF
10x7zdU8E5xTXQMfAJnD5CqAAv4pz4qlfUmwVu2iu6oqsh2VCsrK1VkqKh7yrpmTt28U6r2UVbJ1
dgi1uziqd6BdODU+ARYu+KpbFAjDBPjZENTNVsMFxFAEz1N0ASzVbMqgrgE5FbJmD4lOQwAROTWQ
YoPUle1ZjNWfJBZLvTY9rDxVSwFlevsReXPPgnsBmnIr90K1ThwTRAbEpwWiFatgicTYJnriUwpE
URmLNbktRLmSEXdy3ip7DsCWB4Lyw+mIAiRwVBQe5CIuyY1LutOVsk88RQI5Zr2JjjgV3MiNTZK/
JdJpiE5XLBVDsmEXGPBOExqyJF1UcFWuSpgumuIZPxYLSE0pVGa1EMDiqu3BUDvUFORp5qsgSRV0
IhyRYsm+6BWOSIFhgyeRIya6AlUmqET08U4D5JwfioQSqlokYKNbogF5E0RMTZMeoNUrqHgr9yAo
smREZMLpm7zVVoEKuJIsSOIOCoasqy580BqccUwwuoylZ6JidJGa6ZOFHcn+ZIgGtqpgGGQ7JyA/
N2hqgeGIRHY/h2R2xe8uf2Mky0YAujPMsgcAgT4IwLT3G+GNvFNH8uGEIoFzqzVX4v29VsU4Tikk
BiEDZOBpj+KVAml+fLIUj4roiIRP3Qmz7CDiGTEEl0wHcml8S9hWqJ9ycG1lxx7IzGFCmGGK5rf/
APay/wCptdg2xETl5cSYnJ5IASZ6gXATyapYnNGYid2QsDTwCO1KAiAbMxCAtnmjpd8OKL3Qcc02
f2LUVAiTZP4hOKLiUQn7GcvkEKaI5yuq9Z4rABUqqqmOarbiqgkeAWmrCwwTm/FUDninmX5pwPFV
7KBxmnYNwXAKg7bdjX7MyskwBPawHb7EWonPpVQzJyEyoXVb9lwr2TqyrgiLq1FdlQrI8Faqs69q
DoNbNAp7p04VmKp35IU70KHuTMuKonNAq4hMExs902SpbEniuAQLscBxVKy4o19KqekLU9MDZGUr
n1YKkalCLnnZAPQVqiTQCxdHVco3zJZCpa+C9HijLIOeQWrE1UWbTYcyiTdOc6BXrgAnGFkSO9HI
r0ugRQom4HcgG710myGrG5VKjHNlxOCf4QTRHjZPpAMavcIEDqduDIFuo0Zaa96ADg4EqJMeuLly
jIRAJoTzXTERAqBxKFBThmpAQ0yZymAYE4h0QQA1iLOqkMDcc05JJexQJAAlYtWiAA1Y6haqAl07
hqeIT6iWNQgQXfEU7lpi+vEqmKqaNULTIMMmTvXLiiZRcIl2/CusagRUoESbuRZiHoSU5qXQwOSZ
ymLFVDK/cndwiYlUhgqxPghRNEc2QYuMXshOJJTzLc8u5R2N2OvRFo7gJcnB1GO7Mbe5MWeg5lbu
1PdEdEiImRAdskdmE2LsXxfJDe2z07pPTkVZVWo/DAOUZHHtf0Zq3JcUcrIRxUXuaAKG2KPUoG6y
VUUyYJhZODRcVUsPSvy41/FKqectXYTcKmHaTtgykKkC9E8g8nZAAUzVKZJ3HJ+x06INQVKORZDB
b5Nj9NP/AKm12GEiHltRcWLPLGylME6ZUAFUJbY1NeTWHAKI2wKF3mLImJkd41kzaO7Fe+6wbhin
JbuT3As2Kd+7sr2VuqHu7HCq6wB7Py4E8cE+/P8Atj7023ER4/xK6Y9P4jQIknVIZWVwBzWmEnOJ
ZVoTlUpgGOdynmWHG6z4CvpTR6VU1yFVSnHFU8VenFZL2Mml4f6JgAI8U5N+CJemTKlTwVKnNqel
V9KsOCzWQ7aK9SqqioXKLOCqnszCuqHuT+vtuulXTLNVCr61Qss1ZPRX7a4qnT+/FMXPJAZr2qtM
yWTE0OXZSpV+zJk+HBPbirqhCpUpmJliQ606bc0QQxj++a1OAy1WNrPyTu0zg1HHF0wpI1JoyAiX
iL3I5UWLDG7+pGWDU/d0SWJ4HDkmAcYkf6IBxKDVLVp3rU7m4DintQiDV3NXb0IZC5D09ARke4f6
rcwLNf1IyqJO4DLTKJIuwLELpk0rsaFPpPNUqccLoh1W5VlVcfQmQZ3IqshmnlUt04J3viFiRmMF
jRVrmqnCv7lPwqv6k2SOkauHBdVImhzrgpaaAmhwpZHVUvU5BPCJkch610s/70VXi1SOCaXMc0BQ
xliaVRLFzQmvrTEuxeNHHeyjEyAkLAexExYyewu6BjGQI+IsmMcXEcSO5GcoHRi+RXTEsfxexNAt
LB8UHB1cf4ISmNQ4r8vpc1BxWlwzu59iYEg2ICoQSDknANbsF0gE2IutReJNkHoTSuPgq+my1eLJ
wgbLqkOTglOQDE2K6o+1VArnRNC2SfS4yDFEyBEcAFpFeacMHwKeIrYkqlQg+ksa6vejs7peQLwk
SGCY9gz3D6AqU7a4JkBgKlMFquhwRe2CZMSm7GxWmNyup+KqrrPJHhftIa+KqGXJVQnA1iXR3Nht
r6kVMfuyR292JhMXBRJrknJIOTIgAkZlZBVsqFS9KK3xl9NP/qbXZESuNqJ9MkRq04p9uQj3XTmT
cqI1qMbp5YrpxXUHZHLJVDZdj/aYAkp5dEeN/BAyGuQ/FbwQjAcgFq3DoHpTiOqQ+9KqJnKmAVZP
E1pQKg6VRXotT1zVS/ZkqKp7Lq6x7Muz2JnTvVVqcFUOqd6uslkqlNfIpyPFOEW71bs4Z/YuqeKY
+Cv3JwCnZOex1n2UWJ7QfFO6a/FNmv4J/wCCrVP4K9VShwTNXBAZ3QALgO/BM9BcWTNTvKrTJVx5
K98aI1fAkepO7jPHkgcDnddJYG0eKxDhhEigQ1Cpo1GVKEi4s6OD4h0WsauFqLOLWTkgnMZJrnE4
pvGiBoNJxYepRhA9TOeD0Q3H6j0kMSHHFMGBZ6sgCQBiHute3IDbArG5U4guYChIqQaIeZWNqNfN
NJuNVQseDqvURiyZiFSvNdQLBU7nVUE4qyc1OCOKf1oMW5IgpnRLORguuuTrTdrlNgPFA4FVFkZM
5OJqtIIjHJXL5goxiGBqidyJAw5oECpHNkKEAYYqRbpqziroB2hGkmog/UTUOKqJETExqzkErUA+
kIExFGY80ITgIyNa4p9tw2GDlCRjqNwEdZIDs1gtJqc0Dp+KyIMHAK6osDgmdoioBKdq+tHFygSe
ToCdWKcUGSoaYpynNmxUt7bBlsxYGWRWl+gXCqGPFDpJkSxL0C5YrVGWrgzoT0aokPRNODFUDFAx
JB8VSfsTAP3+9B4kAJpRJjmQ6EY0kbqMMIAD7Tm8lSh7DLwTksmjEJwE9lSi1TqUJRoRVHVV7ppP
E5iypOuFGQPpTeKp2GOdlokKYHJNh2cU8ZMRktH1ENe7EdExQhcFZxkUAzPZNEcz2MboH8WKqWK3
pEgg/TTH/wDs2+yMDY7MH8ZJx3KtAmBDrqvwTyVLJj2Xt2vc9vRElPuy7guiLccexsFkeFSmPXLK
yA0iMI5Jz1Vx9ye5yKL96oOzNW7KlN2WVlZZLNO6r9uye3ZX0J05KurXVU+CorJqAqpdUPZ7lZwr
A8DZFrq3eqW9PYQD1BEZJgX5Kzc0MECbEpzElWPJZHsumaivzCYOOKrj2cFQsXTmhfxWmXc1LJvi
BPghEBzJXESarSeZZOBS75p7C4zTmpFRmnBL3AKYFzg6pQnHIq7YqtBgg4AyZOS7FMZA1umB1HGi
+GisxA1F8AMUdzkAOARiJVdyFiVZEzDixHBSnEGOth4ckNVxkH9a1BqWal0QRyKL2enJF68FnwKD
Aj1K1OC6Se9OCCmMSvWqKqDDxRJKddNuPY+BQe2QRBo+CZ3QD1yQrUJ3DFNcYlBw4FUeonIFG7tg
mFSLBdNAMEXOoZ5LS2LOMUCbUIQLshuEuLDuRhcgOPUog9OaIYXcOMlIyDHuFskBMdOYK0hxI2Wl
qi5RJADUpjzTT+96kDtmhxOCdxJNIVGAyVLKrhEBdRAjjqOa3Njb3Py5tqibdy1D7uXtTku+dimi
GBqUxpzUvLP9tlHa3XhONDkto7ZBejhXBWSofFVAK6gmJvmgw71Muwsml3EfY0jvWQCbAJkwLHBN
PBagVxVT3Jz2P2cVwTPTJYFM3gqV9aerhWCfsqiezJA7c3niGogJQtkvKc6TViua1ITu1FRlujAf
Ty/59vsj/wCRD/mmtOATHvCd+ScoN3/bYCuSeXQON08uo8Uwos1ktLGRxZaWLnLLmiZkRGQ96oen
hjzKfWBHEow2hqJvIqzFV+xVUHZXtoqVVuzP7HrTJln2UTJlWyoqnu7M1dPiexynHZmey3cn7L9l
7q7hUIB9KBJNMEIiDiNQCV8IDWYLqLZEpzJVKoTyusiU+CdOFUWWbW7DShstQa1UauWsqARLVQ1k
O1SECHLVexT6qcTVanr60+qosE5uqkE5umJBzLpr9yYBxZkxFOaqUXkreJXwqgZNKQiRn/BT0y1C
ZEX/AJYD3nsGgOTRlq0tHNAyk4Ja6INBSncs1r3A5FHwom8WKoXNy3rWm5xoyb1rMeKfDJEiyMs8
0SKcE4BlwK6ttuIVKc04YoagXVDdUKfBUwV+5AqiFa4omxGKPDG6McFeyJzNAnTBgEMWxQIPMKtj
RF/vKUXc4JyREO10dXhzRHpKOrFmK4NTkiJMQLIkggSstJIk1ig4ZrhGIoCb5IRPUyZny5JkBGJ1
HOiOrciJfhB1H0IHdgdwZPpqht7OxHaY/FGp73QLO3pTENPiiDiE7WsqJzRS1xeMssFuaJDU1jdD
Fqqi9iaiurv7F8QyQJetKFDQW1Mary9wRkDiDiniXj2MA5WcjdVsMEVxURkF1VXSSEaOc+xjcX7X
7LKgr2uCmnUZrgmxwTY4KviiydqIuskCLhPK/YyO0Sz/AAnkmyW//wC2n/1Nrsj/AORD/mmnunHe
qK9cQqHu+y4iWzsPFAbngPemhEDjiqB1Uq7ALU1DiaJ7gY2CFBzsENUnPCyJJcHOgTPrlgBZMZAD
Idlu9XdOqlUFTiVUqgXFUVPFVVQqBU7cu9Mm9KzVLp1ZOrsmVap7pxR1Uo5Kh7lxVVQojFW7KUz7
KBVV+y1OKrZdJ71U1VA6aIHcFUsOYRckgK9O0UtUntYDvTYZFUCAwwRkyMompwCN34oxADkIAk1p
ZAVOF08anJkdOGZARBAHG6LViMbetHcgH2qahkni55Kob+ot60ail2Lt4L4vAe9UiSfBBoMO8rpi
QCaYJ5uAc1pnXSxa470ZSnqkalyoygSYyJIBwHZt7sKR1fE9mzWkkVu3uTsS2IGKMojqBDn0Jl3q
tGswZE3axdOebFHB88E2sB+9PKTHwVjJaRFuaaJpiAFfxonceLpgARmBZVHsKcEAYg1X6b8RRYxO
V10tLgtJiQU1gvUqU7MgnvwTN2NZOE+KARwa5dXcDAoRmaCxX4gsgckHkNQuJFjT0INoJBuZRB9K
6gM2cEegoEbeocx71qAMJCpj/oukdMuIHrK+GvMe9CWqEc3lXwC+J62AJTtuS4aW9aaMTyktMIRg
P5QPWmwTYInUqGnFUDqLhiQ60kiNMVXcfkmjqf8AfNM7gWe6cxY+K6TXFAiyoCFUOmqOazOQqtU5
x248TX5RVdMjNsWb1powjE2cByfFCfcUYgrSnAbMIyOFuzSO9PgEEyy7PWtML4lE37W7adjpmpiF
+WX4FNKJAwV04C02dPKLplSib1J4s+WKeVAtUI645jDuTkGLZp8uzeb/APrT/wCptdkSzjyIf801
xR9XbpAvldeZPalHbNiQhpiS+SYxGzDEm/gnkDuSzlbwTDwCBEdMTiaJ59UvQmYgYUTPp53TyDNY
yRHxyTEO1hYJzTimDyOaq5T2OQWSuq24rhmsufbT2Ktln6EzUVWVTVfu6oOS9iYp3onTWJssSs1y
wCqCTxKsn7MlRfwWaZu9dI9nbirME2ePbZs1fvV+a96PrQrXkmAJL3YLSSzetXTu/es3wTe1MGVI
1zVVUgFUtmUAPFEkqqJzVDVVNVQODdaiGD3Ro+QeqcHqazIGQOo0d/cupicAmqMMkSznGqZ3ldmQ
jWzkOAFpkQXqS59iEQBN6ScPRSH056CaglmKOqYDYhy/JaAficeCJ3ACRQxkSPUnjCMRHIOCF/Lc
MGRd5A4EqtRYiRdGTX8ETiWAoy24RFQ5LcKLiiNyOkkvCWNl+YTkGoj5RIFczahK6p6pSHSGYfxV
VAyBq5i3hVUj3mqy7l8RPZfuVAeac0GZKPWO4EqhkW7kXAAa5qnLv/KE7PkSapmD5EP6U1QcqMqg
cS6ZhXIJw5AzstQIHAl08oAjOLhO0oZHBdBEhzT6e8LHwWTIR9SPBcexk4WKNAArNxRAxzQBqMEz
sQXDpjJvSqnwVXL3dUiujTE8QuqXJlUOeKcAR5BVKG0ZjUSzkkAd6Evppx3JEPqk5B/pYrRuCGgm
sokgjuqjKUhLaJYbge5wkME8iSOS/L2uRLBWERwqUXk+mlqqJkHBoVosclWqeA5g2VA/FVqqFuSE
JM2ZLBU3RKXAH10R6YyOGsOAuqTgUADRA8AmPjimwVQpRa1QmIrmhY5K9EwTnBUuU1ggcMVUq9VU
0Ga0xBEfWrIDs4/Zd7JwQU0QSTclmTSrAWBshEsdsWDZrVEMcQg9sV01CuyaIJkcI1TEkAUJa3cp
SEoziM3HoT/eFslGYLvfgU4NVGRAFKkUdNpr+Ls3v/bS/wCptdkf/Ih65oensDRIB+8aDxKYGJ4g
o7v1e5AbpoBJqDg6jtfT7hmYH4QCx7ygNvbEZY/6qkCTgwQO6dIOFynjGv4jVBjzOC/JhrkfvXRl
OnBZcqldXSPSjpo9yrd6p2V7aXVb9luyz9l+2nZVNfsz7KeJTm6onwyXLBMUMXuVRPc9jDBOVgmw
VFkuCcF+yqrZdNE7plVO6ZnV+xx3hVisVb0p/QqBllxK6incyH4cEwFMlWisiRVVovaqVXDBMU5s
n9qFbWZMaEZpj1A43TwJqeQRDVzJTEABqyqgIEnuwREeljVymIAGd0BAnSO6iHN/FaaME0idLdyG
k0F+S6Qy+E8UKAN3Jn8E7FxivhPgmapTOR3r4yxNuaiCdemz8U52w4FCPWqjxXU3imJHgT7k5MiO
4JoR8ap/Yqk0zKaRYAvS61Hq4OnAj3hUBHJMAG5oRJB7l95CniV8Q7gqEvxCpBjxKqYxTHcJzZNp
MuaYQETxXUW5KrGPFfARxwVJCuBorPyqqjscVPY2GS4qoTqz8MQs3Ks5T5JkxNAqFyqRktRDehdU
xyVjLuU92I6XJDZHNaIzOn8OCfbqeAdMROQJBk0SAWzVU8XB9C0NU4oyiQHu6BcBjgnkHKYE8rpp
ABEy3I8o1PoQkSC2HvWgGMInCICeAMuBDJ5RbMJqhN8QxZF6HB1VtQKoCeSFKGhRgR6KKhrlgnty
T5U7HXsRGQ7MgtMfh9fY5NPsMSz5oPIVyV10X4qpp9m49qc17GTgsc00w5zTxAaN44oTHTE/dupe
VMiYwtRPOjB60TAO2Xbvf+2n/wBTa7BuwbT5MBU5GSE/rtwzN9EBQcyjP6fZ8vbFIRNe9Ab0ZCR+
8TipRJeAD1QgBQYoQ2w+JOAQlvS1n8NgoxgBEWogSbXKM/uveVAoxlURNTYIDYD5Niid2dcYhOAn
JcnHsz7a0VezhwVe2/Zf7FOx+xnVLq6ZZdtE6qU2CsgPQE+CfFUumXBP2VT2T9l+x03ZS6ZlRZFV
7LOn0smBVSm7KKp7KKqpUZpz4LVbgnq2Cdl1YWVO90PWqScDJANQXBQiwD1pwQDvmmCNLpwmsRdy
g5HcmBrkmtIGoTAP3JhQK6uqAlMzKpAVZrpclVi5TEAIRiaNYIaizp3fkukuRgtJJDigQdo8U2rV
nRUs2KIIc0ZOGOYCdy2SAETwVIgKsg60uSeC06PFMwiF1SpwTuZZhPEMUDEJ9V1iqgeKZwc6KhKY
xA4lMaHMLSOv+oJpQZ8lSRB4pwxXVEq1U/YeC0i3DNMAqkRXVN+C6YGS6dsR5qstPJdY1ZydMGfs
nSrIHio7k30sQWqj1TnwZkNwUEqqlSmlLTIVC+Gjt1XRf0IAd6YSEGyFVWZrVPElOY18VYOLGxXU
dQwe6rEsm+GOQVPSusgHgqDUU23AAJ7Mo7gHxAFajTknNrIuKZ9rnBEplpiaY8VwVMVwCqWXxcbJ
oh3sVYk59nFU7WFUBOTBAwbiStUOoYsrJxTgqX7dWAUTEaTHEXdfzeK0zJAauK06gMzgVqj3t2b3
/tpf9Ta7IxB/+jAt3yV1d2USJS3JWEBb0I729zMQaobW3F5FdDfzzpUomQ1SBoLo08sYEVKYVxlO
dfQpbm7YUiDj3JojTAWQafV6k8i57XPYGdV8E6ssu2pfkqUWat2e1Uc9lOxl7lZVurqiYqnY2KdW
7R20tcqiZ01wrPwXFV7aqg7KqifPspRUqqiqe3DsfBY07OadUoVUqjlUomuEwQFuSYVWnFlRlROr
0RIeqMifFFPGoxRhSLC6DSeOLZKgNFGIuaOc1VwUCBhijvbHTIV0gojccSGWKpElACID4lHQKYME
5eI8E8zzqtRkjFnFKr9N8iiQAGuEKvTBB34MnlHUR3oyBAewWokg2KdtWROCcAAHvTux4J3Tivcr
WTEsmlKngqScZKgTgNyVSqmnEq6YdhYFuKGtgOYRETrGaLgDmqsx/D7ViXVqZlXbldUJdAkO2awb
FOHXSfGqrtxPNdcRE8CuiR8KJ3EuRXVHS9iy+J+SqHTxLDLBMaEXWfJagQBkKlOeo5ELrg3H+K6Z
MObqURMHuqiMvYj2APVqIuSfQnBrkKosTnVapAu1U5LFAWyKYnkql80+p+C6mbNGUpHTgqR180Ix
jpyomMiHyXxVyRANVUVQJLHJRJrponAvhddTEFdNAKUsnAqbjs5plpHeU/YwOo5BCRA0mwQlEDSb
xWkAaE0O8rPsorosHQAo+Isners2PNHWGAtJAjcciz4omYBDUARlEM+C/NBIOINV0HkVke1xYhiM
kKavWiwYoTMgCaIkSOnEXCFXJW9/7aX/AFNrsEiKjZjV+MuC07UZS4k09IQO+dX8ooPFadqIgBgA
yAgHEsTZSn8UzUgUC65CO3bTHBGGyK4kdUimIMMhcovLSDWRJeRR8uGs/iJoOaMpESkcBZZHJN6V
Xs4rknJpxPuX8FpiDzXvTlgOCos1bspgmDEpyVVexZdjlOuC9fZmrsFS/FVKonLBMVwVFl2OydlQ
XVSqKjpyFSicl1bsoqlZDscdjOnuFQsqyPcmDlVWPbiU1lzTgueKaQbtdu9VRLUNU4ABzVWDUVQW
wVAzXdEFhHFkBHDA0RBYYJnJFqC6JhQPV81pkQAA7oN1Cx5KMIUJPoCBGHrUWBOoOyfQWGZVJRda
5giRuwZE6NQJ6STgpUxom1ClQQuokjKzqJEbscTdDc0wlKe0ZecT1+aw0xAfP0IVYilkXdrpwBUY
pzQ5WVaDFAC/ihRuK4KhdslU6ebLqLng5TMT6PemiAPSnPuVSq/v4OqeP7snbv8A9WWJ5D/VVLc2
HtVZx7gZe5FtUvAJowD8SSmiAP6Yo1PFyybcmBlcoRiTPlQoUofxFAsBxF1fU+CoC6BDdyq8TkWT
TAfmUwrhZUh4lUIHBVdWickQSxyZ0Xi5GXuTAmI41TyYjEiixHFnC6ZiQxahTsed1WII4XX4T4fw
T+YWNnZeXMsL81Yd6tpOYU44OmEXJLCqZlpD6rsnzTRN8URKVcEwBlmnyzV2D1Tx8E7NwQcRrZqF
MA0srelDR1Z8DxQ1gdz0TvRC6d2OGCcycJoh0zs+ARzButT9JuiBXjgnkBm6Ai44JwQSqqiYCpTS
kA2GKMhInLBOQHPiQtLMAaOVUDapUmo4LVIU/EnaiwHFVKcrXtgSwYGq6RS9UdYcmoqzIBxdq0Rj
uEFx93+KcE0zRAoEG7Ldl3UjNqUAKrGMntgnIbIBPqYCwIRDNAYoiMmwJRJrW63wb/7af/U2uzzZ
wE5iEQHrYnBEyYXAH8EGaIuZG/gs2+9JP8TjkAFpczDfCKRRE5BmbRG3ii+kH0ptmP8AcbJy0jng
EBOQkfwiyb4RwyTAuycUBzWmj4phUYkBO7IOfaqW4BYtxTOOypsqKyvzVSqI1vfst2OqKhWaoq9l
/Dsurq/ZUqnZfkqmq9qumVnVB2UVVdXXrVEzrgiMVdUVez1JrKjt20uslS6cgPn2Onx7HJYqjmOT
oSDBETNE4oAqluCZjwQIppJBBREiNLNRDQCWzsyJJERZrrQ5kGoyBESATV8XRFBwRnORoHIFF5pD
HDkqlVciPwtdMAzXBXUwBOCYFzyWmIIJrdF8apwQEdTngqiJ6QBqGqr0W5uNGQ2tvVuEUAjU0Vic
U5sUQaMmkaGycFxjR1QPkmYetUp6FU0H78Vdu9ldZ81Qe1X0nhRPOQfm/qWMuQ97ptD5OSgABEZx
CIgZGJ4lPIimDp5SPcEDHHNWDMGZNK/BOAZNngqDTnkUCajhcK/jZB2BzCZjLOibSwwdXpwQNZZA
0CfQ3EYL4gyoZJiyYypgnAJGIXRTPJXY5tVEGOr9+KMSNOThVPTwwWoPJEQJieJf1pp6ZDNNOJHJ
UA9SePSeBVGlzojq6SKMpGRYkuFQ6g4Penbm/FAx+E49jkVwVUa1VQufYJAMntmmJoLIA044p683
REi5GKrJimJ4ql078wuqmRQkcSi55h08ZMBZOJWsHoiHAJw/FyUgA+AcYLiPiTM9apgdMWr+4RNy
MM0RLbZ7VIUYxJaOGSE9wahYgqMHYhmIqyIhLVEWpRDTY3yCA1MSaD+Ku8k8pP8AytRUk3FBy5zT
37XJZXpigQqWT49rBNGRByCqX5h0DODgi663TwEZROAZ0RtR0xuRxW6cR9NMH/8Ac2+x9TAQFBe5
RkQz2epWs3IuUSRpjgTj3JoRcZysmkdROEU0RpHNaiCf6imIeWDWVSwwEU0eookhojuCe0cyUSGJ
amJ8StMiRC5ajry9p5bUbCLt/Ffpgy/DX/vFaI3N2CaR1S8U7sMuyic2TCgVVdO3iuK4KtkfYqel
XZXYKnp7MgrVTIk2TAMnKy+xfsPbX7XHsoE5CpR17FknJV+wB+ynZUriqqjvkmIurqt8lwwVSjQs
gQmXUC2CBDBNLuCcB4tUFVIi/imEiY5hBgxNS+KLkH+UKpkwXVEE58EZ6hAjpI5K5lEetUoxq5R2
ZtomCJMp7LuIBgc7dhMQ0SDU8E7knggDGpzVwHuE5ctUJ2RBLMgDKhwNFMbchESBGogEhjRnX1Md
0znLejpiXp96/igwHrTDnkqq6xRDMmCqWXVNUJPJMIPzPuXSAO51+YSeAsnJABzK6p14BGhl6E+g
MaVqsKZLF1ZODTK6urWRYsMkPWruqllU6h4KhA4JyR3XTivBOAxCaTDgtNSnEenF8E4kBmAgavgq
Ad9005NkqvJ+5dA6MQUJagCOCIk/FlpjHUM7J20xxaqHVW6pGqaVBhI1TyIlwsngDE4MqyBzBFU8
4mJ/EExaY4p20lGlHTEgAWVagKvcEx7kK0uU/oyXFc01ynNk0QwKYrkvaqF80KvwQOqoVTXgvxOb
FQgbgV5lRkKkGq1gMDeN6oSBo9lgwsVWjpjUZrULYFPEkHFGLdQ9KBl8RxN1qJfB8VUjScSUBGpx
JAKZgcUIwLRFwEDObtgr9hfC61C2aHUA9qpgebI1JyKeQYEH0I1AMcOCBABLYIghhkqHs4dgOIQB
HAkUTRJMcQmADvdlqoGd2XFbwx/20v8An2+wGJaOkO1ypFhE4TJcuiZdRaspH2IAfmEZWTAkR/DF
N8AzJRABPEqvwjCyOkGUjRsENXgKBE0fIXWrcKxbAUZCIi4ieqt+a0We8Yj1laYUGAFE79zos4HB
UKqfSnkUwVbDsqV09jqtSvcndiMFdUP2LKic04lOn+xxTi/ZxPYFRVur1VVwVAqKt0VZMUMFSpVV
dVTRFE5XBPhh2ZuuPY5xVk7ssSn7C5Wa1AAUXWaZBExYxwdNKjWZdJJKfSGRGu9aLrBNaFCQaLYK
UZGlmCiIkyiKELQQIuHdUk5yCLAl7lMQCc09HkA5C4qjYtndAkmt8KoauqJzwKEYDruGCeQ5r4hV
OXdRlC8HZRM6m5iU+kRA7kGD8qrqOnwXxDxdXfkFSJPMrpAHpVSfUmIaTuSSuqQGaqDLJk4hTigw
AbJE4r4fFdLAHBOSxxwVXITgc+xhVMzK449lgq04LiqUBzTuFiU5YPmVd+Aqnr30RYD0lPqL8P4O
i5vmferk8k4FefudDUKcKH0oS0kSWC0mRHAIBjJ+FU8I6SbumkdJwOHitM3J8R6F0AgcbJ4s/C6I
My+IsmIB4hdIpxsnkQDmF1HVwRG1G12XSQE24A6cx7wuiengVgWsyYgjmm7KIPTDsayYWCOarXsH
p7HFOKe7XUY4XPII8KBFODdEuOSs2WSB4Ypi5imHVpIJHNFrCwxCeR6TQHJMQ4ON0aO60TNMYqTD
VCVAniHB9CAhF3x4oykLFmCcAgOzrXGTt600ogEjFFrmpWiZFUAASDYoxFAc8EdosYB7VbkmBfMq
J25MJM74IxIEgKEhAEEPZdJcjBM3aPxDEJjfA4pqc05oV6lvAnq/20qf37fYImemGkFvFNsxfjJE
mTvhQBdUhEmrCvqTbYrma0zyCJlIE4ydEwxxNPBAXOJRMImRxlYBamgJHMr4jL+kMO5dRpHAlgPe
tG2RxIp6U0a+hEnsoqp1S/ZS/Z7eylVTDNNc4pzQKgT/AGnsEwqU5rkEHXBOnJRyVSyqaBUDcUyL
VPY7vwTkMOw6qBUVBVZdoAWat9hyUckCKBM91TBWumFVUOU+BsqYKoc4p8MldWqqhwndkRUgWZBq
NcFMaA3KYOShEgc7okS6iMOCcxJOKMw0QRYoxmaCzJ4g8XVAAr8fFVqRRygwAfEVTAlBw58ShIBg
RTuVKnxUSXiSZMCOKDgubjirDNkJGTSGARDkhBgxFk0rG6+Ivhio7kY6dyNTxZGI9WacuybEuHJ8
Ez2uE8j3IvDU1ihIARbJESNMENNSMDkvh7KmuKYVBTYppYpgb2TtXsrVUpwKGotxVDqbDBUHJNKh
CY+CcmhzVC/9IJTN4lvQrAdx9cmTOTwH/wAqw72f0uU9W4v7WVA/L/5XVSHyx9LpmLcXHrZUIPAf
/KnPq/8AEqs3EkrplbABlWL86qjAISjE6hYsmkz2ZNuEjIi3iqdQ/CV09MuSYhOSAMlmQmjFgvjZ
sAni5JTB/UnFrnNP6gmN83TiZBKwKrE9y6m5FUiqFk8ZeKPSmIbj2cVQ8ygccUUyr3Mu9EgsMVPf
L9XTEok3NlU17K2xRYcXQ1B8qoDDJWozS5JgxyPBCMQ+JwCuShqJHpQmJVF8ady6ZB8mcJpXkHGI
K1g3wTTrEi4Q1WwJRiS8TWlGOaJEjrFiS6d23MrgqRiHEbsoyIJhZh7E4OkSLB7siSNRz5ptsdUv
u4BeWYnOnVVHQbhiCMUGmDKwBTyDSNUdQfTZkAxjWroB7qnY4xsnBcmisDgQt4xsfppUy69vsAYn
oi1WAqVqOkter+kp30tZi3pWqR5Alh4BdA1Djn/SExiScefNEzMYA95PfWioDP8A7o7kB0xiPuPQ
cSvxNYAME85dRtEGyaOOS6i6pQK6cqlE5KYKizKquKzQZdRc5BdL88FVUTALqYy9XbdXVSiRbIrN
VpyXrVB4qlSqjsFe4IuCsuy101AqVTX5pmcqvd2VQyXuVHZNZMCrK6qaokimCyxoi9VSiyQq+azK
uA2JQq4N0zeKouKdmVx61U2XSC6IZM/eiQCZFOwAXxeCrFzmUCOkjNFzzYIaY8aov0thdRILxJY4
JqOg4tjgjIFgcEGL5pmQZubupbm+NUIh7Pcsm2djvLD1KG8wjI7hDXoAU780wcngnMKrXSNOa6y7
YCi6YuMXw8UxFDTkq7lsmCADlwD48kREMcX7KueKZmexRr6E7c1RmK4hYP2WVwiPi5JgG4FObK6t
VU6Qe9A/EMRZEOEwB8KelHSG5n/wunJbub/mWJzqW/7oAVgOTP8A/EVY97//ABEJi3j/AOALjwAH
r1FfC/8AUSR/3jEKhAGUf/kHtWPOg/5tRXUY95Mv4K55RaPqTxAPOqwHoTmVHbvTOSbqo0viTRXD
rSYgHJZjLEJw4Oa6pCt1fU3eukeK+JUBPFao9PoXXIvxQ1DU1inB0nLBXVn7HIfkmAYdlQmvwTAV
4L2G6IPoTY8UxAJKYxZNGRBwXSXGKJMCwTkV4hPiizllxQjiSzKOxC0QyYVXJP2MO8hMVY0XU4GA
TsUwpmcU4oydw5RJI5OqBgMAr2+6bp5MR+HFCNh92Vj3phIPalQgJ1HJVPTditUS8cRYqThnKHlF
vxAWQlEuONwm3YPIChs6iQGleLmh8FKrSJpUu3BARiZnCRFfFFxzCJMiItQH+KfbPOLImVJ8kNcH
oz2onBbNV9CqEyot7/20v+ptdgGe3Gj0uU8i7VY0HcFXxuUZzOo4BMBpGVkA7nj8I9617sukYYnu
RJDQwFghGnIe1Up6ETKpN3TOrKqZMK9lezJUqnTYlOSHFgLlObnAJ/R2vMsE0PFVP2NMA5T7peeE
B7U8+iGEcU7cgsk58FQMEzsAnFSmbsqn7HKAAVlTsZuap6EydVLBUt20TN2VqqclUqxp25qyun7L
smQayYjwunFiqUVlQ3unIfmsuSrmo0oX8UYh3KqBRaiWe6LDUMEQwi1Ch1A4Jw5zVuRR1YZIaa8F
uC3TbvCyGajtnCZNMbo0cg3QJDVZ18SDgldMaYYIuwohpLh6hGO1AkgOSRQDiV5Y3BJjpMo1Drzv
p5eZGoMTSThaTQi4KEo43CZleydnTjFNK49S6Rz7KlMz5FPY5IOaDuVS/KqrfiWXTXk5VS3eB71/
qfWmr6v+VV9P8U1fT/BMBX98nVQw4/8AzKsqZBz6mVj3sPerj1+tMH9XqVAHTOg5TuEAxBur09Sd
3OCAkC0S/BckxD+pPYYBVqy6Qj1MBgqPIrpGldRcZJgFYKzcrK1VZVtwVFkr6uHZRVvwXSfFWK45
LNMAqBWY8FSRC/EFUGiofFF4govFitW1LgnZ1Le3A0hSL5ozjUYsqLjmmAclPM6eGKZyrmJ8UdJE
vQV1BiLJ7J7qi6q5BNkmscU3pXM3TEeCpSQsiTUGq1SqDgyAHSAXRILkIgdMsMk86ZhNLqjmgYyb
IYpiTJs7qgpdHUHAtmFWZ/lOA4KQlLq+7IWK/MFY3bELpkNJsLFExJkM1dMb5riMU/ct7P8A20n/
AP3NrsABb8uNcbyWuYOkW4p5NEZFAg0GLepUiWzkg7PhkFdycgmOGDpz4qxKtVZlZpj2U7LpgWCq
gHoVVyi3xGiqqdnBMKlV+xmVr3DogMSjt/TR0xF9wpx1z/EfYiwqamRQ1EkoE9ICY2KoKBVor0TD
G6YKpWadlcKpdO1AmsmKaIVSO17Kp7lRcEMX7Kq65p1w7CsmWZVQrquCAHeskzuqBVo6Jcug6JAo
U1gE4ONUwqSrWQkSzZKjyitDNqsmMgCnl8Vj3ISiKC+CIsLEKMTIkYq3BAelaZBwRVdMRhgiwda9
TasEdR1BqBNCFRigCABKjpgTSvegSKo6vgufavK2T+VMPMDF80SXLmzroGna2w5OAGZK3d2HwSl0
8mumNrunHgmZicVU1XJPEU9qaRAVyUzUVKfvxVD7Vf1BYnxWS9/8VZ1WngFd+51bxPuWA5D3rE96
GiLuVig5qinVByJWRC67YLVCirfJMQ4wTij5KpdUwVaJql10sy6i5VQ/FPEMs1RV/inuFx7Hdk4r
xxVnV2VUwVbcVQss1ki7S4YqzcU48VeuSseacDtDhEvdfF3FF0A7krT92HrTsxTihXxsMXutO2OZ
xPZmqFkxTEuMQtUPhxGSB+6qJ6IVqq2yXDNZnJMcLdmnBaXoExpiCsnzWfFX6ggAA+KJJrgEJG5w
yTCQAOKJDs9WRlEhjclNLpkMRULVIAvSuSeMARiERF653C0BgRin+IZhU+IJlvf+2l/1NrsjtwLf
lRPpkuqcjlVdRumvkF+5WrckzWWJ9CeJdrhDUaCwTO7XAQaDkmgWnTGIN2uoiJNbnDuT+tOW0vTi
mapwTG2Xa3ghIVZEgVN3VBFVARIiCBdajtxbnVN5bBVgVWBVYsrFdMSOBombzN3LALX9TJvw7YRk
2mAoIiicjkEREMBRabyPYSQrqoJdZKpVlZk5LIaa59jKtslQMnsFS2KZqq6omaqd2Tyk/BBo81dk
xNM1Qd6ZByqLimsnlbirU7LOsR3pyU1SnTMslUsmugWZEWZOKlANU4FB6Egq7n2KkaokcSyEpVIs
iW1Ci0NpeoPJfF4LUayNwgdLAhskRROZc1EitOxnZ0dLuOKBoESA4N2zCYhh6uK+J+StRNQaqI7Z
Ji1wbWuhXJ0No7kxtECRgJFi+JRkaS1AE/2ptLkcE8QoiVdRpgnAHpkr9wYep1V+/wDiqDwr6lWn
Nh63VD4Ofcrv+/BfD4//ADKrD0+pPU+AWA5klM/gAE1+JVKKpRcs13Wb2ZP4IYkoh24J5Fx2VAVK
jJCjetMSnHgnCuy6QSrALqPuThuScRJVAyzWKbxVKKqdP6exh6VdVoUWquod6pdMyqW4Lp6iqUVR
42Qr4LI5mqZqJ7DjdMeo5n7FLqvgqqgqtZ+I0iq3N10kvxTAucyql0+XY9lzxVnVmVTQ3Bom+7dW
dZpwnuU+GKJGATRHMlDVuMeSP5g4OCnDSyIKeUCG4IHuZEmpjYJhQokVTGhGa03qmlTFUDOqlAwO
GaiHf2LpLNgyJDsLlagWIwOK6h3CyOikh4p8Qt4//wDNL/qbXZEC3kQc98lTqHC/ZEtcJoF2vkuo
uyoHzVnzCy4CpX4XwF/FNtjVM3JNl0txkUKuB944cliYjEo2lIDpiLDmjORclETsGOkXrnkqUZcU
+CMhjZM6smAL4lMAfBWPgq9lR6FUDwTiNcEIhtuLVOK/KDn8crrrJnM5WCGogCNokrSHMuAUtzC7
WCDDSEI6mxkX9y6QSeSJAVSzKh1KoTlO1uxn71n2VNl1GqoKKqqqB0AAE5NQreKBJ7kXKoEwCZxn
2UV6qxJTDFPdV9KNE5oqoB2VDVM1eKMixZVKJqQLPVaRRDBVHeyozYIF+8LEnBM1VrNMEKkg96PT
TwQeQ9aYnkVd/Sm015suotpOF3USHIfqBTFgCmZ2syYD0rqLJ6kKgACIJ5MnBJIuqBgc0ASwFk8S
5yW1PblUatUTeLgUQelm715UpHQwoKWU4C1wMiCnZlqzqWV2lF9PFaJuwviw4OgQXfiZHwiF8OkZ
lo/8xJVx6Ze5VccyIqpj6T60wJ9ScR77qiclOHPctQYc6rqI08OzT6eC+Jz4hObs3BUqEEHLHMIW
VSmVLLqIbNUDqjAWZCpPAJ2ThWvimZWVPSnlEcwqUVm5pjdZ8MVY81enBUrzTepUVbqtVwTEJ2pg
RVOxTT/inAJGBwTGhOSYgkcaJ3VBXiq2XSSQqhwMexyq2TAUxJTguqjvCrZGTdEaRVPBVsq4qjAZ
q7oMnJqmOCpYXVrJw4ydGMrYEKgfkUdUSE2PZW6Be9CFHTeNDzTSvktVSeVFQuM8kPMkwGHuTSiO
+hTbYoS9S6fsLh10GuRWuRJIsFZiVgXRD3wITyoMFQMqKqoniXRMqcVvtb/bTp/+ptdkQP8A7EPX
JaZ1Bs61OIkYhaQ7AUT2Ju1uyRzoiWqVZl7VzQK0iRAOC6pEgZnsG2wc2kQF5W30RIdxeRxcqtex
hSIuUIxoBbtsq0TRqU0zg7J5AyJwFAFhCIvxTRGriEzOcWZk5FrYrrmIcmTOdAvKryKfb23lgZUR
oJS+9I2dNKYJNoBr9yx3JYm/rQmQzloR4LTSL3RBkdEcFo24sDeSjGIDnG6aRcok2wC6qBdIcDFW
ZMsUwDnNOTyCqaoMKKgZld3VIkohmzVT4LE804t2UuVYJ3Va81RMnTEqofJOzAKyvTsATus1S6L2
Hr7KWTdlqoEU00T6jUpgH9KaNHzTkgh8FUlEGrWdM/FWUiKPdMS/BA6e8pkdZNMLIaR1cVVhwVS2
ml2QMOrhdViw4oDXTgiJ9QwJQO1GseDDipDmF++a/t9qOm7FDUek4hM58UwC6qGNHxQ2/uyNRgQq
AAq6OJaia6ercU7VTSug11QeKJduAVyc3K4GxyVaNYqoc8FqFE6r2XZOA6yzVSa3ZOa81SowTt3q
vo7PYvYmCZZPdrJ3VR3Khpkql+HY4Dpz0nILPvVm5JgqqlOSunHgqJ09liV0gc2qmkSnFeSc9JyV
ahOKKt81QsclX0qi9gTgNxV1pF5epcBguCqm7WNk+OaJd0wqtRonJrgr1QdynoeaeUQTmqFieKcT
B5o+YQxs10QJGQlgU4Heqmiunsc1UuuqrWUieafs0inNdG6DEYWZMSDxd0YnUSL0QIiS+f2GKAa+
KYUBVcFvf+2l/wBTa7IiVD5MK98kzuMCmFgqLgmxQFgnKwCFb3Tmq4jBasEwXNCbaiLIOBS3bXEv
21VfBWYZIZlAyLMCiZybKAdAlzEXwcrpjpGdyiIARiPvH+KLS1SNgqHUcSokRDj4Xq5zTTmxNwFL
ckWjgnEWHFRhtkB7kYLqkZyClukXoAutog4KI1HQLrpgSBiV5jAGZ8E8pauAQJqTgqjSCi9IhAio
WhhS6JzVnKcMGV1aiNEAT4IlieapRPqqbKprcKt8QFqagWSIegsuKsuKzVA5WpMTRUDhM16JnFFq
JrkECLDNBM7E3KYWwVewYqzLJGrZsjiUzokWIZEGiBJuqBNZVkh0mUsXRkAwNCFpkaHJM6rGqAHS
RUFPrqMLIGQY480JwiwIq9FpmQKOOKcEljYm4QYeARMRpBwOakDmfb2dx9apRwfUE0zTBEY4Mrex
aiam7LSagphEuhwT4KliiCFdVq+JTC2CbwVb8FqAbgUzsMVUv2YB1RymAbmnJ8Fm6pYriqKt1S2S
qHCceKa+RCqKekKgbinFRmuqhz7LVVFWyuqqllVWqnKyV3VFmroMHZcE91ZW71WpxXuXv7GFDkmI
Rr3LprmgLpzJGrnNGcrBGRuexgUxPeqOSrsncK4TXWkLXLuT4YJvFMEzJ87hPGioa8U5NF8Tq6aw
TAME2PYDdMSAhoNEQBVBwwWnREG1SussBgERADvRnYm4TT3ZQnkGZU+okP7QV0/VV4x/iujfiTxB
C1aoSbipCdZChKOkPGD1GSrQi63v/bS/6m32RAt5EKd8kaoKtxgnie5WeWZTmnYwqU5QzFuzh2t2
6QKYlACw7HNFph4rVMoMWjgqlaohy18k5qTcqrUsF1Sc/hCBJaMcOKoGQhCQAxIVAZy9CdnlKwQj
MgB7YP3IRroHc5RhtxtR0CW1Ss9TzKGomY4JyKGkY8E0mGaM5nTEUEQjpFTYlRjAdRqeATyIeKqx
kanFSiImvxE8MECWgMGutR+HE5qgZEykHTAEoxFGuneuC6q81TBWTKMBYLVIu1PFTxDsF1FgcEA9
AtQDZdl7YKgJKchXHJGMjS6aIJK5IElkA75rpCwzQJPJFg4zQajIZGj9hVMEzJzJnqqF+SduTpyW
WMlpiGPFF5CmC1SlXmhoqRgF8JT6mBrRaZGjO60xDkq1kJOyFXBviyMYx/1Va6boTnLlgyJHUM7s
jARYgY0QiJB8VISuJkdzIHHS/egWdpRIJy+8otQMWPC6aZAkDXFCW2KijmzIA0B7CGdrMnZjmqqo
TJscE6pbB1Upyar2o4KjnNUF81U04LNWYr2p8k6zCy4q5THxVKcVg6aQXTRcEQzHIqngrUV0y454
K4Ks3q7LOmCrVW8FU0QBVOzJUKrfMKirfsrUqyu3NH73BMzJ4kkrroc17U/+qYh15cV5cbC/NFEn
sZXVVQPzVAFUnuQcHmmsEAESLqp7lQo1VLp3YrqCqGfJdLl8FXvVF0ppFziy0wACITTk0slqBBkq
OwX3gUxkaqhcCxxTyKcehCNxxunibpwpSAd6IkmpupQjHVu7zgvkEKrf3D8I2JRfjr2z2RmcNmFO
+Sk55ohqiyEiNIGJV1mnIWkV+yU1uwMg18VASANLpoBh2MKlOSwTC6rVRiLp5eCAsBgE5pkFQMmB
riUxKMj8Is6YBhwWlxql6EHJJxLLTpYRoBfxTRYUwFU0n1GrKIaguiNqLQjizomc2LZ1TF5SxYe9
CUxo2wXJOKcARjg90wJa8pexWcZmilIkSJtEepGU4vLAGwWmMQAKOjIm1UNRJJwCLtERzzTspQ0v
IGpPFdRYCrcU0i8skNIYe1UsFofqKG5E1FkWHVJNiZAop+zgypdWQfwQDuUzXtRE0HNEPwKp3JhE
p3ATGsskPSqqoVSwBTEk8EJRjQXdOWD5KNXBuhpi5RFAHddR7lQ9VMXQMYEjgFri0WoXRnqNO6iF
iT3qUoQOkgXojEtFqHEokSJYUCBfqOdShOIbT6kRYxojXknAwqwdeZAAAi8lGM5NGdKI6mawcoDQ
SRRwFrj0sDEgoHU5yRkRiO8IDFnXIunAZNbEImIoVplQLF8E+S1YG6pV0xFE696ZYAZqjnkhSvFX
uq3VahMRTsp2NZMLqoKcUPFXqup+SeNOz2LMDBUVkxunB7kzVxVPBcVbuVCs01lmnN+CpXmq1HZS
oVaFVT34qnZ7FmmZWJVLKteKtyXVfNVFEdJ7irjldWTgUwJUpy+I2TkrgLph2P2Mjl2AmuaswCrY
ocUwugMezh2U9KqKcMFZ10hkyeZYZJhQLVmjI3wRbHEp/FNmq4YKybNUVa9jDwVJGllpnSWeaMJ1
hK6PlTGg4uoxNNIZ1U81vwjKh+nkdPKe32D/AMmHrkiW6jdUVS6YKtBijEWzRryVfscEaOU0qhP2
RjkAFU9yYUHoTk1zVKBE5qg71QVz7LsrpsE7MtWJxKAuMlXvTtyTihV6m5umjEyPgEd6QqaAOnlI
RBwoP4oRgSIC7WJ5IMwHNa5SZ8qJ5AyOD/xRAAAH3lW5LRHFdVTxRMyARQe9E3pRh710xArWRqmk
bmgAueLrTKbtcA+5REQwj1SOJXREAcUKnVJzJqJ5kD0lSJBrbknoOJREaAKRzKqaIThIGJqWIcKH
4RJE96qVdWTlkxKYk+KAA9CBs2CLlgMkS3imCe6pydVLE3qumJPILTp041/gqzPdRVqXuU4BPFaT
RwjWoGa6Q54Ba4RozF0ZahTAIGWNalQMACA76QiIxYi5lRSInpMi7AYp9w6pfzFNABxkFZibOtZk
xIYsEYzkeD+5aNsPK4YJ9IDZla5SrK7UUbEWzTAaibC3rR0sAagEuwRG6XINQ7DghLbA8yJBDVK1
aCI4k0RlrAMsg6OqZJFYvQcUJYEU5ISrTBUA71dMLmy0G2BV7WVk5VSxTCqYBuKZ1QeKclgs8VVZ
Ht9qd+azVKcFw4q6qE2BVcFmnBp6lmyb1pmqFkVe6Y1VRZMKKoWRzV3ODpjQq7K6dvBVDHNV8QsQ
c1kc0+Ph2WLdufBUTlMCyqO8JyD3XThZjJMKrpNciuo+5Vt4Khf1qtRlimBeMRZA4djJ8StUu4Kg
XH7AcOyYRcZpytJ7lXtrZN2dJPFVxunehVDXNfE64pyuSa6OPJObp+10/wBh0xJVEOkll1RIW+S7
H6adf/1NrsH/AJMPXLsYB80NON0GvdMTx7H7WCINkwsqqqeFSLhAGjLTFPLwTBVTJ5eCoOy64KnZ
Uugwosld37WTBVwzVAhEFiVWvEoCPwwDR55o1vcpoxcBRlbJrBNOb9/uQkfhFIMLlcSMV5caRjdd
ZfiS6IYkuiQG1Ue6AlLTwsqnVInBBo9yMZXABIGHBdR7nW5ukNQ0+wM3C0n0Ik4BcUGHiqlmTmqy
Q4Kiqa4JiX4KkTWzq45ZIidRHBMA5yC+BuZQkS1LBROo1ug3oRaNL1ohIyIfJRN61daYB82ClKgx
AQMyS+AQMQ9ai9EwHcEZx6XDKIMnErtRVYKQjEkPRYRZOZk5p6AnOpQntxNO5AGgKId8TzTWOaBA
qLFAOIgls1ER2n3SOqUqklHSLioGaO2IHi9EIiVskdQeQu9VoNNNuRXxPyTRi6fuVSSnLPxVK5Kg
Rc1wCZldmund+x7Hsp2OExVEzVWScVOXY6umqmHgnAbNPmqY4q6pjiq3CcCoXrWYT3GIVE8RzCrR
YlOezgqovUYMqKvbxXtTuAVQ17GxVm4JwOxxUZp5UXtVe49jK1OCoWT3UYYyu2SMqhhQCyb7FFzW
R7CB9i6Y3CJTlUwVSi/2HwVF7UWwZcDZMPsVXD7TduYGCBB0zC3YGOmQ2JHu17fYP/Jh65KqdV70
WF7DtY1OLKgYFcHqSmjQIBqhUuUKMcU6NWTO6pVVCsrdjdjmqfNN4rgqCqyVVS6YX7G+xdlQq/Yx
sjVgq1RLLXiLBPIsczRX6RbimgK5lGMKl6yXUXOS1F6qBZtJeuKjEHSDUtkiHrc1U5GoNiF5cRpD
seP2NvcF2Y8whIG4crigO/s5og4Jrkqzc0QSwGSJLkrpiTnRCTMBZDqunN0WBqtOluJTarDBPKp4
oGIdkY2TaiwTm6JAcFkHsVwVA44I0XTJsU8q80DAOQagIRjEubItIAmqInUiiMYAPgyfQe9DWe4L
hxTYHBM1BZEEsBdroiJL81kcQozFxdPdExFJLqtl2MFRBzQ3TCrItHknJvgh2PENn2syqnFwqhM7
piEQA2RTF3TksU9SskyYpiHIWXZQ8wswVQMq0K45ql1xT4q6sExVV7VZOKZhX7KelP6FZOKhEk0T
hzxKqmIcdlK806p2EMxXHim8R2FvFV8QrK6tXNXUptT4YrqDckTEdJt2VT9tVmn7KRKwC6peCBrI
hVBgnjNua6SC6qCmXDscdlE0g4W4ODhNlgmx7K9jJz/hOLrejKrfTSL/AN+32azhsx9ck/gtMQ5K
ECQZ4omIHMr3JrRxa61Cxzumjci6Y2KAl8Ubo5KjkG4VDXJccVnwQelEeaoFX7FAqqpVlQJlVMKD
FUsrV7Hw7GCclmV3VAVwTkq6sslwTXKbFBgviYI6ayKBIPvKD0AwutMbAdRWqZ8VOZFMGyTxDDMr
xPaAKlAbhxdslERpFkwLnFEgPxKc04BMLnFWrihKIYDNAOA+SdzXiiV0qmFV1VQYc0WHNGQOCBJq
qBUHJanbkq1VESBRVqEMkycWOaIJRoxwVInwUZAaRHE4oapMDkq1KkwobMtMYscXVZUFURIPzTxo
QnOK1QD0qmEfFEgs+SqfFUq2SDDFFzRk4LlMBXNVKc1QlG+LZJwCnFHuy6u5UV0zUN0wv2VqeC6f
Si+KrzdVKqH4rMGyrZXTNbFNmrKtWsnCqXHFMDTNVLhMye4TyoAsSnCqqWVbLPJN6FXxKYVPBMQy
qaqgrlZUoUGIVmWYVnGSuRwTJmZVV05rxVKZKoVcMcVnzThUV+5MVwWRV6KUxcWQD8TzKqe5GBDh
epN2XWKsrB0CdsavQnAA5BBz4phdMCzqninPd2UV08wCfBMHj6V0yBVQqpxUYp1zUjkaJ+yiKumd
+xsOxu103Y63v/bS/wCptdmgbZnE7MXlhUyRbbiQbmSbbhGJN5RqhLcOkTxN0AJatWChMkdTUzXR
RMKZ811dyIJdlbkUwDEXCZUvmsSc+zuRkaBUCyCurOVl23TCoxZYBZlXTM6yTDvKqao0LBMzKlgq
p7cESKcSnJ8EBiunxZOWWfZQVRlZAkkPgE2aYIHAYFaX8ET41TlEYmjBU9y0hhQBMIk8lqn0jxPo
TivE3QyVfSiRF3RiA2ZTApyXKsmtknL0yVT4qicovkgyYVfJWFc0ziuXZxTJnbinNTxTBy9lWiES
aJ04qOFUwi3NF5XyRBwsSiCXOICpbB0NZpkE4qyt3ISFrFXuqUVSqsOapVkwDK6f+K6Q58FkBkhi
MVQKi49jgODktLHi9FSTHBPd7vmnF8hkmZnsTRVunFs1YsrsmzVBzWeRTSVa801gmwN1cJmsqDsp
Tgs1ZOKj1J2VDRU7wmTioVCgbK6fsBduVVdzg6DsCcrJiH5L4vemNRmeyzg4rr7mWKoenNZhXVK5
uuaeNRlivYV7F0fKmIIITuz4LiqqtDmExoM1pIfimXtW3t5lz3KUXobFVKdMbix7CSiZBUXBVwVL
BALLABcVx7KfY45J1RUL8E04PxFF0ljkVUU4WVPBSJs6YKgAVSr/AGWKt21oOyiot/8A9tP/AKm1
2CUJEROzGgzBkpHcJJA6eaG6Ia/xHGKj5fTIF80NxqACnrW0AGAFk7Ocl7F02jQhBx0kJxhZUKYd
l1VVoEwFux6tkn8FW/Zks1Sp7KlcU6fsvzVAg7DJdRdMLBWVbrgr93YzUCrgjkqXwZViz5qpZAai
xwVkSbCzotU8ES1TmuqQGYCunAZEmgwUzGJLG6ZaSMEYm4KD4IEd6Yq6Y1R03XvV0eFFg6smRqSq
hAgW7ASMWXLNBvQreKJJqnPpVPQnZmQJNeCcPdi9UG8AhKIZsTRVPgq1ZOKArJMUWtmgH8FUuOKY
X9ioKo1srptLoHBdRchOBRWVBzQa3FURJxqhiFUU7BKJ6h6QgJNEHFO5c3wRie45haSVd04scViQ
q2TGvZX0qnZVMmQepC6QxF+xxjgnfms08bYjsLd4VFS+KqsGXDir04K1c17VX4Td1nkmAtcLNahb
sdk4rwTAdnRXNVKYXTuxWRTdlb9la8VlyXDNdVlT09jSsqHpFVoDupF/hDBRl3dnNAg2t9iqbBaY
4Ktk47OHZVZ9t06p20V2CuTwQBDEpmpiFRwezh9uvbVVt273/tpf9Ta7IHZ2pzfbiHjEyF5ZJ/8A
bbvHol7lKB+n3OsM5hJvUo6PptwFqtCXuX/p935Je5RP+33fkl7k5+n3H/ol7lXY3GP8kvciRsbg
f+SXuQ/I3Lfgl7kInY3CBjol7k0Pp9zidEvcv0Nyn8kvcm8nc+SXuX6G4f7Je5P5G58kvcv0Nz5J
e5fobnyS9y/R3Pkl7l+jufLL3L9Hc+WXuX6M/ll7lXZ3PkPuVdnc+WXuVNjcJ/oPuVNmY/tl7k52
tz5D7l+jufLL3KuzufIfcqbG58svcv0dz5D7l+jP5Je5V2Nz5D7l+hufKfcv0Z0/kPuX6O4f7T7k
/kbnyH3KmzufKfcgDs7hJ/kl7lXZ3D/afcm8jcp/IfcqbG58p9yJ8mfyy9y/R3B/YfcgBs7lf5Ze
5U2Z/Kfcg+zuNH+SXuVNjcP9svcjLyZuf5D7lXa3PkPuTD6fcJ/oK/QmH/lPuTHb3GxaB9yfyJk/
0y9yrs7j/wBEvcgBsztV4n3Ivt7lMoFSfZ3GOOiXuR07G5pf8Evcn8nc+WXuQPkblcoS9y/R3Pkl
7kw2dz5Je5P5M2H8svcv0twf2n3IEbO4+PTL3L9Dc+Q+5P5E/lPuVdmY/tPuVNnc+WXuR/J3PkPu
RbY3PlK/RmP7T7lXa3DT8J9yc7O58svciBs7nyH3IA7M3/pPuTHZ3CP6T7kX2dz5T7lTZ3PkPuX6
G58svcv0dz5Je5P5G58svci+zufJL3Jo/T7nMxI9iby5j+yXuTS2tw5dJ9yaOxMN/IfcmOzuU/lP
uRHk7nyH3IR8ib56T7lXamP7Je5OdrcP9svcv0Nz5D7kw+n3Pkl7l+juDgIS9y/R3HNuiXuTD6fc
5mEvcq7W4OUJe5Mdncrfpl7lTY3PkPuRbZ3G/oPuVdnc+WXuRbYm/wDTL3L9Hct+E+5N5M+HTK/g
v0Z/LL3L9HcZ/wAJ9yaOzufKfcm8jc+U+5V2dx8OmXuVNjcfjA+5fo7nyH3L9Hc+U+5V2Z8Ok+5f
p7ndA+5V2dxxfpl7kANncBw6D7lXZ3B/Yfcm8iZ/tPuTeTMf2H3L9Gfyn3JpbM+DRl7l+huP/Sfc
v0dz5D7l+jufKfcv0dz5T7lTY3H/AKT7l+huD+0+5fo7nyn3JxtTGfTL3J/L3CR/IfcqbMx/Yfcn
8mb5aT7l+jufKfcnGzufKfcv0dwf2y9y6dncf+iXuVNnc+U+5fobnyn3JvInz0n3KuzufKfcm8ib
f0n3L9HcI/pl7k/kzA4xl7l+jufKfcqbE+I0n3L9Hc+Q+5GQ2ZxOPQW9Sfy9wkVbRL3JxsTH9h9y
/R3Hy0lfoz+U+5B9ncBxaJXTsbgOek+5P5W58svcm8qb/wBJb1L9Kfyn3IPsTJGOmS/QnX+Q+5P5
M24RPuVdmbZ6T7l+luH+wrp2Zgf0n3L9HcPOJ9y1Q2pjgYn3J5bczwET7l07E24xPuTja3ORifcq
bM/lPuT+TOJy0lfpbnyn3L9CfylONqbjDSfcjuR2NwnLQaHwWqWzuCUqnpPuRidibH+UoxGzuEYH
RL3JvI3Pkl7kB5G58kvcv0Nyn8svcm8jc+SXuX6G58kvcq7G4B/RL3LTHY3Pkl7l/wCn3fkl7l+h
ufJL3Ifk7lMBCXuVPp935Je5f+n3fkl7k/8At935Je5f+n3D/ZL3L/0+58kvcq/T7nyS9yp9PufJ
L3L9Dc+SXuX6G4P7Ze5Odrc7oS9yrsbvyS9yp9Pun+yXuTj6Xc+SXuX/AKbc+SXuVPpt1/6Je5GU
vp9wNnCXuX6O58svcv0Nz5Je5fobnyS9yrsbnyS9y/Q3Pkl7lTY3Pkl7l+hufJL3L9Dc+SXuX6G5
8kvcv0Nz5Je5fobnyS9y/Q3Pkl7l/wCn3Pkl7l/6fc+SXuX/AKfc+SXuX/p9z5Je5fobrf0S9y3p
bm1Pbj/t5AGUSA+vbpXs/9k=" transform="matrix(0.2654 0 0 0.2586 0 157.4063)">
							</image>
						</g>
					</g>
				</g>
			</g>
		</g>
	</g>
	<g>
		<defs>
			<rect id="SVGID_13_" x="718.671" y="201.251" width="217.388" height="198.62"/>
		</defs>
		<clipPath id="SVGID_14_">
			<use xlink:href="#SVGID_13_"  overflow="visible"/>
		</clipPath>
		<g clip-path="url(#SVGID_14_)">
			<defs>
				<path id="SVGID_15_" d="M758.156,201.251c-2.205,1.552-4.197,3.122-6.201,4.637c-12.206,9.261-3.365,24.418-3.365,24.418
					s-7.576,14.314-8.841,23.154c-1.265,8.84-1.687,18.101-0.421,24.419c1.265,6.31,1.686,6.31-1.266,9.677
					c-2.944,3.372-12.627,17.686-15.992,21.894c-3.367,4.209-2.108,7.155,0.842,10.521c2.945,3.374,11.785,4.209,13.893,7.16
					c2.102,2.945,1.258,3.788-0.844,5.473c-2.107,1.683-2.107,5.053-0.843,7.156c1.265,2.106,1.687,2.521,0.423,3.787
					c-1.266,1.263-0.844,3.787,0,6.317c0.842,2.521,5.895,3.788,5.895,6.311c0,2.527-4.631,8.001-5.475,13.892
					c-0.843,5.894,5.475,10.104,9.263,11.79c3.787,1.687,20.624,1.687,28.627,1.687c7.996,0,10.104,7.151,10.104,11.783
					c0,0.93,0.085,2.177,0.207,3.696h151.244V201.251H758.156z"/>
			</defs>
			<clipPath id="SVGID_16_">
				<use xlink:href="#SVGID_15_"  overflow="visible"/>
			</clipPath>
			<g clip-path="url(#SVGID_16_)">
				<defs>
					<rect id="SVGID_17_" x="717.108" y="201.251" width="218.951" height="261.178"/>
				</defs>
				<clipPath id="SVGID_18_">
					<use xlink:href="#SVGID_17_"  overflow="visible"/>
				</clipPath>
				<g clip-path="url(#SVGID_18_)">
					<defs>
						<rect id="SVGID_19_" x="718.416" y="201.251" width="217.644" height="261.178"/>
					</defs>
					<clipPath id="SVGID_20_">
						<use xlink:href="#SVGID_19_"  overflow="visible"/>
					</clipPath>
					<g clip-path="url(#SVGID_20_)">
						<defs>
							<rect id="SVGID_21_" x="718.416" y="182.037" width="247.077" height="282.174"/>
						</defs>
						<clipPath id="SVGID_22_">
							<use xlink:href="#SVGID_21_"  overflow="visible"/>
						</clipPath>
						<g transform="matrix(1 0 0 1 9.899828e-006 4.226731e-006)" clip-path="url(#SVGID_22_)">
							
								<image overflow="visible" width="880" height="1005" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgEBAAEAAAD/7AARRHVja3kAAQAEAAAAHgAA/+4AIUFkb2JlAGTAAAAAAQMA
EAMCAwYAACQ7AAB/xAABBPn/2wCEABALCwsMCxAMDBAXDw0PFxsUEBAUGx8XFxcXFx8eFxoaGhoX
Hh4jJSclIx4vLzMzLy9AQEBAQEBAQEBAQEBAQEABEQ8PERMRFRISFRQRFBEUGhQWFhQaJhoaHBoa
JjAjHh4eHiMwKy4nJycuKzU1MDA1NUBAP0BAQEBAQEBAQEBAQP/CABEIA+4DcgMBIgACEQEDEQH/
xADEAAADAQEBAQEAAAAAAAAAAAAAAQIDBAUGBwEBAQAAAAAAAAAAAAAAAAAAAAEQAAICAgICAQMC
BgEEAwEBAAECAAMRBBIFIRMUEDEiIBUwQTIjMwZAUGAkNEIlNbAWEQABAwIEAwUEBQkEBggEBwEB
ABECITFBURIDEGEicYGREwShsdEywUJykjMgYOFSIxSkBTVigrJ08PHSc7MkMFCiQ1ODhJRAY1Ql
wuLykzRkFaMSAQAAAAAAAAAAAAAAAAAAALD/2gAMAwEAAhEDEQAAAPTgizQzDRQGigNFAaGYaGYW
8w0M2WQFkBbzDQgLebLIDQgLIC3AWQFkBZAW82WQFkBZAWQFkMogLICyAsgLICyAsgLICyEaGbLM
w0eTLIDQzDQzDQzDQzZZAaGYaGYaGYaGYaEBoZhoZhoZssgNNOfU+lAl+QjWLJKCSgkoJKCSgQwQ
wQ2SUCGCKCWwRQIbEMEUElBJQIoJKCSgltklBJQSUElBJQSUElIQwkoJKCSgkYIYIYSUEjBFAlQI
YIYIYIYJgCYAAAAAAADAAAANctj6QCX5ONJskbJGySgkoJbBDBDBDZLYIbJbBFBLbJbBFBLoJKCS
mQ6CSgkoJdBJQSUElBJQSUEq0SUElBKoJKCSgkpCKQlQSUhFIRQSUCGCGySgkYIYJUEjBDBDBDBD
BDBMA1y1PpAJflo0ixDBDBDBDBFBJQIbJKBDZJQIoEUEugltrJTJKCSwkoJKZBYSUElMgsILCCwg
tElBJQQWiSggtElBKsILRJQSUEFokoEMEUElBJQSUElBIxJKCRglQSUElBJQSUElIWsaH0QEvzE3
NiGCGCGCGCKBDBFAhskoEUKigRQIoEUCKZDoJKZDoJKCSwkoJKCSgkoJKCSglWEFhBQSrCCglWEF
BKsIKCSggoJKCSgRQSUElBIwkoJKCSggoJKRJQSUElBJQSUE6xoe+BHzU3NiGCGCbBDBDaoYJsEU
CKBFAigQ2IpklMkpklAigkpklBJZElBJQSUElBJQSUEFOoKIlWVBQSrCCglWEFBKtElBKsIKZDbJ
KCSgkoJVokoJKCSggoJVhBQSUElBJQSUhaRoe4BHzk1NgDVMAGCGAME2CYxMYmwRQJtktsQ2JshN
gm2SUCKCXQSUElBJYQWEFhBaJKCSglWEFBJQSrRJQQWiC0SUEFBJRUqwkoJKIkoqSgkoJVokoJKC
VQSUEq0SUElBJQSUidJ0PYAj52amgAYmMAAYAwBibAGAxiYwGAUQm2IbE2CbZJQIoJdBJQSWElBD
oJKCCwgsIKCSggoJKCSkSUEq0SUEFBKsIKCSgkoJKCSgkoJKCVaJKCSggoJKCSggtElBJQSrCLKP
VAPnZqaGmACMBRgMAbTBpgwG0wYwY4GmDGDGJjE2yW2IYIoENklAigkoJKCSgkYSUElBIwSoJKRJ
QSrCCggsIKCSggoJKCSgkoJKCSgkpElBJQSrCCgkoJKCVYQUElBNFHogHzs1NDTAGg01AYMAYDaY
MBtMbTgYwYDaYwYMYAwBgDBgAMAAGCGCGCGCGCGCVBJQSUiSgkoJVBJQSrRJQSUElISoJKCSkIoJ
KCRhJQSUElBBYQUElBBaJKCSgmlR3AHzs1NMAGCDTUYDFQNMbTBjBqoGAMYMYMBgxgwaYMAYAwAG
AAAAMEMEwAAAAAAAEwQwQwQwQwSoJGCGCVBJQSqCSgkoJKCSgkoJKCSgkYSUElBKoJKCRgmM6wD5
2ammAMAGAwYNMbTCkwYwYQ2AMYUmDAbAYMAYNMGmDTAAGAmAAAMEMEwAAAAAAAQwQwQwQwQwQ0Aw
Q0AAmAJghghghgDBDCSgkpCVBJQSMJKCSkTQzoAPnZqaYmjAVtMGAwYMYNUDVQNMbTGDBjBjBgDG
AMAYNMAAYAAAMAAAAABoAYhghoAAAAAAAAAAAAAAAQwTAEwQwTAAAaYAAAIaAAAATBDQhgmBuAfO
zU0wBgDaY2mDTG0xtMbThtMbTG0wYwYwBgxgAMAYAMAABgAAAADAAAAAAAAAAAAAAAAAAAAAAEwQ
wQAAAAAAAA0wAAAAQAAAAIAAAATNwD52ammDQaajTG0xgwYwYxg4bTCkxgwaoGmDGDAGmDAAYAA0
wAAaBgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACYIYAAAAAAAIAAAEAAAAgBx5h7oB86qmhgDGAMGM
GmNqgaqBqgqaBjBqgaYwYMBtMABgDAAAYAADBDAAAAAAAAAAAAAAAAAAAYhghgmAmAhggAAAAAAA
AAAABACGgAENACeJd/Odxjy89V9mBHz01NNpg0xgwYwaY2mNpw2mNpjaY2mNpg0xgwaYNMABgAAM
AAAaYAAAAAAAAAAAAAAwQwAAAAAAAAAAAATBAADEMEAAAIAQAIGgAQMUmHz+3HU3hkb+0/VOwCPn
pqabTBpjaY2mNpjBjacNpjaY2mNpg0wpAwYNMGAADAAGAAADEwAAAAAAAAAAAAGCYAAAAAAAAAAA
AAhoAABoAAAQxAJyBnzHavM4T6GPlMz6vm+dmvY4OHlO0vjNH08J6Pb5vcfXAR89NKwaajAbTG0x
tMbTG04bTG0xtMbTGANoGANpjEwABpg0DAAAGgYAAAAAAAAAAAAA0DAAAAAAAQNAAAAAAAIYgAAE
ACHFeWeZyaXZzHXyrmiCdsesw872OY5u7w/dPO7Z807/AGfEk/SgI+eVTQwBgNpjaY2mNpw2mNpj
aY2mNoKExgDaBtAwAaYNAwAAGJgADQMTAAAAAAAAAABoGCGIAAAAAAQNAAgYgaEMkKEDQh8vL4Z1
8medab8TPa58e48vm9bwiurnoH6fGc3mfT+aXlPoHherx9B+iAR8+mqGmDAYMGA2mOpcNpjaY2mN
oG0xtMGmMTBpg0DaBgAAMTAAYgYAAA0DAAAAAAAAAAAAAAQAAIAQNABn556fF5HPXfy88JuuZH0n
Z8jkv0vk+eisSDaMmazND9HzYPq/lvX8kw6sPXMu32fINZ5PfPgPp/Q8OPovjv0LzT2ADwJpUAwB
g0xtMGMGOBpjaY2mMAbTGAMAYAwBgA0wAGAAAxAwAAGAAANAxMAAAAAAAAAQAgBACAFkaeX5/FXR
ljBvnkjWYRrkszo56zKEEqQeiRXRFE646CdIr1ssTq+h+R+vjwvJ+08c6/neT6kXrfAfem4B4M0q
ABgDAG0xtMbThiY2mNyxtMbljaYxMYmDQNoGAMTAAYmAANAwAAGIGAAAxMAAAAQAAIAQhoQCkXzF
+dV5qClMl08jTF5msXmVneRTlk3YJqSlLOozZREn03LHWeR9Z8j9OdrLjwvB+w+PrsvyMY/UQDwp
pUmAMAYDAG0xtOBpjAG0xtA2gbTGJjAGJjEDaBgDAAaGIGAAAxMAAYxDZIwQAAAACEAIEIaQHi+p
8eTExVKQdQF5pjAAGTNyRrmzQlAoB1OhtJRM64nq+3899ifMev5+h7Hk9fyxCj1TzL+6+Tj7kA8O
amhpgDAAbTGANpw3LGANoG0xiYxMYmNoGJjExuQ0IooTBJFEsYgYmDQU4ZRIUSDEDEDEACAECECA
EB4/znZwUkkVIAJgFkUIbiTUyY83iamNFo0F0MJ0hjyrM6vs/ifpA8/6D5Yno9fqOP3Ag8n1magH
hzU0xMGmDTBpjExtEMAbQNoG0FCChBQgpyFOQskKJZVQGhAWSFOQuUhiBiChAxAxMGgYgaEMQCEC
ENCHzdCPiMO7iqBMBA6WRpColtg2xN4CivRjzdPpvHrDDqRy9EZHZGdAnJ0fQfP/AExp4/rWX6GT
jYxDZ4M6wDw5c0xMYgoQUSyiWUIKckU5CnDKEFCChBRLKJCyQollEhZDKcBZIU5CiWNyFEhTkKJC
nIUSFEhSSKSRSyyOlcqOtc9mpgjyvn/qvlKlCGZoAZQIdKg9Je0dPK5Onr8tHq8fJmeTj6/nmEdG
JzvTI1Weht9v8L9ma18+H0B88z6BeFB72fzcn3wEeCsoroOeTqfGzrOVnSczOk5g6lys6nyM6jlZ
0vlDrOUOo5g6TmDqOVnUcodZyM6TmDqfKzqfIzqfK46TmDqOUOo5kdRyo7DkDrXKHWcKO44JPQPO
ivTjx5PUx89J34c7NM5Qo0zCsWu2NIji7PNFACpsVQFEhp2z0jIDR5Bq8g6HhaDUjh6rhyejynnr
oyH9n8P7Z2cnteGUTKLg7Nl4e8R9yBHx0VlZqYhvGvOamdlusyjKDoOTQ2M7E+rlKHIGmBq4Rq+a
zV8gdJloDw1KcYnS+TY0NMC3yM6DnZujE2fNZpXVqedHbzmKASrc5+vDqOrze1LwFZpM1RlOsky6
WJuQXdZx8PR5Y4bIpoEwTGLrnvM6GE7wZroowro6Dyq9RnlL0Og8g0RDsM8fUs+f5/oeA3fiegdi
9JJ556fKvkaZ+ufYgR8DPYVxz2cpvgM1cWc3REG22WptvhyHXv48m2WzJvVnI73OOfZg8v0/P5Dp
mZOh8geti8TNayZX32ct2HNvNnJ7HN6BzcPu+Odk3BtPN1Hk4+rwlPPlNydTniczs1jYwvHpDn+j
8w4sfTzOGMbNN+HsPX4O7xzzDfU441zJe2IJINK9U0zuzNgepfkB1Twwd3R5G5qcwapamdcep03n
wnp7eD3Hp82nIk+N6fItbedqendUb9PLgfcAR8JWfLXr8/NubcuWZ7OfJJ28a5TTfL608Di+gZ81
1PlPWwXaeLr3+Mds8uZ6Pp/IaH0nmcmh2Rp7J83phZ1Y9vKa14/eeph5WpRv3HFtzbmPP0ZGN6ow
6TkL7/KR7PHcE+h43YdETZ0Lis6c+GjV4ydRhyntdvym56/V4+5rPL3Hb5voeac/LKF16+cKKZLO
o7fXXmFVngQcvrGPs+fJx79Ox5x6QefPajkW2hw90eMej5c0Pfn9srmvE556cieP1+Uy9rw7Oo4e
o/RgI+B876D52vR6OHY0rGjaMwWWyL9jwfTJ6vL6ifU+b9Y9fnOWO/4r1PKrr4vV4znNMzkru8mO
jTlZ29fllfU8XnbinWhbeV6RtWOh0cXRR5eMyehr5PoHZwVsPXHE69vM1Pcjytj2Z8ujWvJR38eP
eYFgHpo8qunzT2/KXMdmuftGXD2ch5a3wOji3gzkQfS+D9SP5vs5zN56HVx93jno46bmPWco/OlD
mmadvlh04Sym7D6X5vcnnpDrPQmNPQPO5/cR4vbhpH6UAfAFRXXXH3Ge3n6kX1cJXq/P9Q+X0cDy
Z77PNPpfJOau7Q87WPpzyuvpRy/O/WeSeRpehwrso8yfUDyF2Yx1dXJtWfTfOXrxWdfRv5R0z4/U
aqYOi+ajpd9Bwa9nUeD0dck8ffgGenAd2fN6Ab9OhWfldRyYVzHobeL7xtHPxnq4XzHPnWREaQSm
j0DbQ8vPXMblCy2DI1QstaMK1Zn6fChLXQxrTMpOAoYiKHSoxqsT0e3wpPU83HSP1MA+N6eJ1n0c
2ZPH26HHl9DgeC/b9c+Q7fpvnDi1rU9nl9H0I+a8L9D8GvkvsvP6TozbMFr3HiHs4nh8P1XzBOvF
Z23xMnm7+g8etuKPR7PD6K+r6fmfbPnI7OYz5diOiclW3f4PsHTeXqHD25dpt4fr+CRWPQZ6LI9H
3fktzNdUnLXLJ73Dw0dG/Poex891wcPXjznbhNGZVE6TAUUImS4dEOmJuiXQEbcR19eHtHN4/q+S
E6AY7c5mdehzKOGPTvxuk9F8ndStYn6aBHwWvmc9e1nx9Q+vHU+l4vTiPl/qPku4+h+OrhrPmrcw
3vpPoPR+YD3/AC+XY6c44Su/w/VOjJZHR877vzpmuZx15nXWXbxydvi+hynL7vzv2caex5Hu18JX
nRHXPOGjyZPZz9gt1315T9HmOR6SbR0ZBjvRza7dRznRxGN31nJhoFazsVG9HnPtDg317D5rr7GY
cemRbkKlMLGJtFtgkshPSDv04MyblF6Sjn5evSI93x9K5q6uQ45cxXRUHXnyd5+nAH5nz+1y1j7n
lM1rCjp7PK0PJy97I8n3vB+gOH0M+tMF28Rg+LRYoR6vkqzI2g7uX0eEryXJzHTnF1l3Vyc/fzEa
8u8a+543s12ez4fuH5fjvzRrpijd5B0b8bPX7PnumvaOPU335tDq5ZwOnk5YO9cDPW4+eStsdTLp
1sxy74PK011McvS5zmvSzHK+s8tdXKNyjWZkup0K32gWWuZnkIYUaQ+cnK9I0Ozlrqw7szEuDDgC
LukQuizHpfbX6CBHxOXVFJ0k224cV9k8OT6Hq+M5D1fp/idD7T5zzdTSNZBOSopHLzUxqLKTREkx
0tKnecGRNRr08hXo7+N9WT6/BqfGHZ50dHm9fMYagRcUPbHQ3vno9Dq8fqro38TY9nKOo4sPTR5H
Tv3nLw+8j5zX2oOLqz5Tvw8xnodXk4nXxqjTbkR7flZMlaQSNQ+3k7K6sFqZc8BnVSXEsItDJwOv
kzUbb8SPY8vIOnp4+g6ozVdd8e5oY7n6MBHxOGqrW+eEenLxr2GeZ0c3NuTXJqL1fK6DufBR6PLz
cB0advQeUuriNIpj53kTlTjSI0GkiTSjE1wNvp/D7a9zq+f2PN8T6Hw4z2zk7jP3K8bXt0OLn7cC
OZkc2++Arzo1jCTt9X53Y+i7PEuvTjzMz19fA6j165NU2xKOY6+NYw6bPOn0rPGXsYGHP7PQfMO5
F0YaHdx4oGqJSoYQUZEGdIhXBKdEFAm0Po5maXz2dvZxbH6aAfDLi569efL6BcXTyG+LwNcSIqM2
a68jO3LCT2+Xz0etr5COjnzg9TXx9Tuwxsus2awZmmGVjyujNvQu8qOn2PE1rs8fv5Yk0Rfb5816
XBmxFqMi0DANcaDXEMo66M77PLOjLINiZN9ePY9bs8bsrt5b4zprBHTrwdJs4pOrXm3OPxvt/AXx
Ztkq5BORUMjLTijW40EOzG7QkIJ2o556ZMb1ZOzVXHHtH6wAfl/J2chevNR0a8G9dlccl5dG5wV2
d58zl9qR8SexwHPublcdo34OnU466OQtxJ0vlo7c8IPW5bzNOXPQekM0qbHWzrjWrK5uzrPLv1PJ
FpPacK1DE1yg7FvXlLeIU10VHYaHu8HFZ4E/R8x5A84e2cm3b5e56uHJVFZbHTtxbHUZ0dGnLge5
z+RidvNlmaPGhpWOqyMuX0OSFa3IOiKwqmZaXqc5qiKcGs8nPHXxqTL0OXtX9aAT8w5erjFz9HGV
2+f3nVx93nnNch3+z819NT8r1fmj1/My6I5tO+zzKUFax0GFayYYehkc1dAc5pBmXZFGhp1cHr1o
6k34ObI0WaOnTD0Dgj2ZPMv6HtPl9fqMT5Tn6LNOW9jkn0JOJuTXp87I+k0+X1Pol43SbeT6+x8v
l9TxHh37JHlV6W1eNXrQcVa4nTnjJRlqWhCz2zEtEZtxFVnRbe1FKQWlmc+7Z83z788ZaQh5rYwO
vAUSy+7zPRP1cA/MOLq5B8nXmc3dj0m3N0YiybMfb8ruOz5/0vMJ0y0Pb9rwver5nLfCMtSisiSo
UGpzhuRRNFCbRNjL6/O2q4y0iiXXd9D8nsfSc/HsQt6RXfUs8HtdB5XV1ZxXnd/nHykKauNKIWzM
ZuCurks+gr576g8OPqvBORZybRjBrM0N54G8YkbTGwqUGucoovao0dE0SIqiVHJH3fieFrWGemcZ
mgFY9JnHXqefPXzmfZzeifqIB+V8nocg6aNNsLrHC5grPA09Dzu8OLq2PM29uSfW8r16+Yjv5Yy6
Oftrhx6eeJTgCGbOEdUElZFi3iwnTEm1qZaIHUWdHseL61a+v5PtnRnGUbqdjmr1EeZy5ZUEo28P
bsPCz7+cy1x9o8X1dQfueZ7BXyX1XgnhTtRE0iZrImbqM9BjmZNTFmu+XbU49MnNVMuo7hvp7T5q
DzyZv1o8t79B5p1c53acvbXGa6nE9tzye3Xkj9YAPznHuVcUelynPO8nPyelJ5fTtrF3Srn6sOg5
ctucCmdnN0wZvi0OnNbnkzDhVKLSRrplsZFya3z6GyJKJopEml5dFdHo+b3HR7Pj+4GnJEdOWONe
pv8APelHh4b5UtuVEOtjLtKOO7BhKb9Xmi+hzx3nzEa8w3NGcqI1zlmbGAkV147lUytInpOfPfkO
n3dvBOro+W9OODO0ET0mPVnJ6vk+hrXlb8hHpOvQrjw7+Qrq87vPvwI+Czz8Svp+LxvUNAAQiWgw
mczt2nnM5pnTw9HOb4KYW2XoHnc/XzmQgExSpSaVlZoEmtclHRpyM665KOmcNSu3h0r1t/J7D0/Q
+W6j2ePydDbnpi2xRUpAmGVY7DecHT0eZ0nS8Q1z5ec7OOUSnMdWWPRWWe1xzVrkVloGTrQdF1T1
xOtdPQY9K+dO7yZuI6o0Mby6yJ6eWlcOL9Lh2rHi9bkPK7tOGPY6fne+u4z6T9CAj818X3eA5/X1
yoWYaGLL5enMjm6sYWF5HbXLqLbzrOzliTYw1MZlDQloQOpRSEba52mjzZcNmWfVRyV1Qc5uyXiH
ZPHod+nmh79+XvXVTyHdScpSEmCVsyno7jx9vc4zw4WcOGEt2Vb5wco9DjOmuK9MY6cs9DbXh+gr
l9YZ1fNel88GR6EcatEb7cYdfHudXNtnWXRw9Z6fJr4p2Xy+mcufTJxY6bxw+rx6H6wAfnnneh59
Pj7PMizGjS+cOzbz2ejjz0PDpRyvpZhO7Oa9gyndmM7Bk9GZPaBI0IgZnrOhNVI7JGs0bKA3MkdB
yM6jj6Sc+yzzr70ci2wNergk9Reaj1NvF0r1uzwOs+gy5fozl9fqUfN/G/qv5wcEWg0z0IxYJjDr
5dxQ5Ovqx6q1vxcD2Tx9Rc7iHsguVodUhXDd4x11ltXCtuON8RFdnFuew/O6a5/K9jx409DyO4/X
QD858z0PMrTzu2I43crIgYgoSNXmJsZo215NDpORmoUTVUGueB6C49TqjN1UXJzzdRheqM4ujnz6
Mg0x0KyVDcsKhGoQdF4ZHoXwbnQsKojW4413I4dN8hep5Mn1vV8Uz7vzvmOuuRXzxoPEaAoQPbDq
MiZOrk3zBqgHmRrGxLeRrWSNJaGtKI159yuTr3rwn18cX1YaG2mAdnm9/McXby9Z+tAH5l5Pp8gt
41Mp6A4sfRR5c+nkcC64MDTNWhFuAuUyhSa6c9Jtrx0dRhJ2zy6HQY6DimU88j0K47rV46nPn3RH
mrpDE3RlOyMF04kqoJ3xo7oVnJ6PmekEb40bZ9pzx3eaaTBBOmhzz2Bhz+hznOmhgjXYxMtMtjau
PU0maIiwpwFqA1ySNpiitsWFSzXK8DbjbFV8ppzaya9Xn9hl6HmegfqoB+S8npeQeh18XaOW6haS
TOkEq6OeOyY4cfSg82PSg4DryXA1zE0DchTljcIrXBm+nK0664aO3GKHO0mYrJ1nI2MqIemhyx3a
Hn77yY9EIw3qDl9DzfUExF+l5hUtXEjQirrFbKKVc1GWqiLAWdSLfHoFNozNUZpUDAKKIVBM6hza
3I759Tp5d+QpXBKljW7OUIL6uLtP1wA/KPO7YNtsmaC2rCdcQTiJnoo5+wKRSJHRitkci6lHFl6L
PMz9bI807cjnWsLLTBANpDqWbo3SKsJoZW2OxpU1WPTxeZHp93jbnS7useTsxjyfZ4eo7ozqojok
wOnEvOpNeakKXUE1JWRQ8tczJgG+OwmkVk0DLJGxW0S1maLFGz50WZM6sUEGlmOfbBfRzUXx9vnh
2cfafrIB+X87wNYqg0zRqYWOHJ1vmutSKG8w1MYOicUbPBmzzsJ0RE6Bhn2ZxyZ9dHBn6CPNXph5
j75Obp06jnvpVY6VRBYTrKL8X1/MjL1/FzPd18jQ78+bM6s8YO/Tyw9SeDY6KydbKcxvPA6oxzi7
nc4H2Uc16BgtZFaoMrBMyNq5rNRQIaEmgWdldvFZvy9eBkTYCQ6jM134ND0efmoz7uXpP1oA/Jse
zIxu2Q2hkhSTGmEhBo+ejZYBtCko6dDi33ZGiVBSJsgocDc0DSKhsh0i08yqENyipqC/O6ojhx6p
MZ1wWhBZmFvJl1mzZ4JO6+ZmrjUx1eZtpy7jvBm5zs0yVCuJL5cw0FkdGscx1Pmo2SkqJ0JskrGw
i5oSEWkG3peTqdc57HHl6rPG7TQ/VwD8rx2wGZwdBjJss2ObguboztoZLKSog0Qay6qpoYmbQqE4
g0WcG8Yo6Xz7DGi0mZUwIpDEyodmOjxNuRzHJl6OJyLfFZaQ0MAC4YDlm5zM2fPoPs4exML5w674
aO2uGjtWKNoWhzVrRzc/diZbTqazhJtMQb5RRNxoTedGmWmY1TJqQ6b5LNFlYuvn6D9XAPyXPoxE
MG86ENkTqzOqolaMzejM9XNNyFPJGr52arOY0h0SaBm6CLINJy0KKqrJsIpFLPM6FxqOrGbMM++j
z36KOKulnO9ZOfLpZx5ekzyZ9fE859uS86cjAKSDpjo4kEClQx1IaXgGhmjd4h06cVJ2rANiMzSK
1OGuuTPPaRPLpIi+Y00hF1jQxMmnma9nnegfrQB+V4bZCKCXQS6BJotyh1jRcyhjBFsyqoKqZNFk
jV4o2iEVNMwvcBga1zBvOWlZx1Ec22tEVZSVoRLGSxkWGWwc71zJNUYzuHO9FDzYY49knBXos4uL
1MTiW+y82/UJyZd2JyHZiuDAE0MGAgd42aKJOh4M6TJJ0TlRu+WjXHajkrqg5H0MxVST3cXcv62A
n5Zz9PMUuHpNVFANkO2SVBbzRsswtQFZxJqRY5bM60BFsmoRoZsCrMzeqxq4LcWU1QKKLEixMQWR
IwZkaJUSqZC1gHNiVBM0ERuHOtkY084JUmmbY1OhzLqyOXLq51SQMTKR2nPXotPIz9hHlV6mJyZ9
eZzm+a56EFaYWa3y9KMaE0Gfby9Z+tAH5bjviZcnoSQ1JROhDJLkBoYTVEtUSUgFJqpQKrMq0ZJq
iaHTQGhmy0gp52USxMCyaE1Qp0RnZJcNBUsGAIYBJRLCaQ1ghJ5wZ0FRUk652Z9GSNKgKWUHXjns
Y8/bJxdV2XpgFyQbGVEc/VzCa6zDk9pHkLvRhfTkZSsypea79fB6CfrIB+W4a4DkRWZQgQUqFQCc
yarJGsVRBqzLV0Q2xC0qacFNMTdmVWhFMhtANCaokaKTsipY3LKightgMJYgTgbEUo0HjrgY43nF
mcnQYB0GYNZSdCzopCCbkhjKqJLIo0vOTSdJGspN74WejXko9vm87Q315Nj1OrwKr3a8/Q8cxcR3
+d6S/rQCflmWuQS0IiDeckaStTF6UZ1oCdURbVMYTYA0FAiRWJoHUstJAqZF2iVbM6qCk0SWgaRZ
my3DAqBDQgBsoinIJaBz7chjMuGOQlsloECKubJaY4vIWssBA5THrnqNFHJi4VlWIckVLDSLCk00
35guHZz9NdJ+pgH5TzawZrSiL0DI2RFVQm3UlSDljTYDQ2gbihqUOkGjyoGqENiAGSxiYCQXnQJ2
Z1DLl0QmiybJVZlKmIoFUUTSCCdTzsPU8yEkinDLcWSbsxN9DkvozHndHOboxNYJhhFqDq05tzzn
TWgpJVyZXQAANA2kUoDT0vI7T9cAPyzLpwMb2kYTQrYppmd1IDkKsIqAqRlTSE0AxFEsqLRDKBjE
JFtMJoEqgpOzCtQgTKcsStEmiMrAYmGdUKlJsZ5mM6edG8Ysp0xaKComx6Z0bJcpqsEbTijdYtdn
lSbPMLhUT0YamefVmMQIECYJNACCaCCwnt4+w/WwD8xy9jM8mvVR5U+ozy36Wh5FeqzyZ9WzyH6g
efPqweavSo8mPaDyL9RnlHps8yfUZ5K9UPLfpM87P10eVr3aHmR6snm5+xR5b9BnmHqM8qvSDzl6
ZXkv1FHnnpB5depB5y9Gzy79Ka85ekjzX6jPLj10eNPrxHned9Tznzr9fU8nP24PEXss8d+3ieNt
6dnh4e3J4r9u18KvbaeHfto8h+1qeDPuo8KvXk82fTDnx9yDxV7MniHtM8Q9kPEfsh4q9pHiz7aP
GPaR4/b1dh+hgH//2gAIAQIAAQUA/wD65H//2gAIAQMAAQUA/wD65H//2gAIAQEAAQUAu2dkXfJ2
Z8nZnydmfJ2Z8nZnydmfJ2Z8nZnydmfJ2Z8nZnydmfJ2Z8nZnydmfJ2Z8nZnydmfJ2Z8nZnydmfJ
2Z8nZnydmfJ2Z8nZnydmfJ2Z8jZnydmfI2Z8nYnydifJ2J8jZnyNifI2Z8jYnyNmfI2J8jZnyNmf
I2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNi
fI2J8jYnyNifI2J8jYnydifJ2J8nYnydifJ2J8nYnyNifI2J8jYnyNifJ2J8jYnyNifI2J8jYnyN
ifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyNifI2J8jYnyL58i+fIvnyL
58jYnyNifIvmrfcdmXD+9iYmJiYmJiYmJiYmJiYmJiY+mPriYmJiYmJiYmJiYmJiYmPpiYmJiYmJ
iYmJiYmJiYmJj6Y+uJj6YmJiY+mP04mJj/gY/wCJq/8AtS4f3sfTH0xMTExMTExMTExMTExMTExM
fTExMTExMTExMTExMTEx9MTExMTExMTExMTExMTEx+jExMTExMTExMTExMTExMfwcTH1xMTH/I1f
/alw/vYmPrj9GJiYmPrj6YmJiYmJiYmJiYmJiYmJiYmJj6YmJiY+mJiYmJiYmJiYmJiYmJiYmJiY
mJiYmJiYmJj6Y+mJiYmJiYmJiYmPpiY/gY/4er/7UuH93ExMTExMTExMTEx+jExMTExMTExMTExM
TExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMfTExMTExMTEx
MTExMTExMTExMTEx9MfXH8HV/wDalw/u4mJiYmJiYmJj6YmPpiY+mJiYmJiY+uJiYmJiYmJiYmJi
YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJj
+Bj6Y/Xq/wDtS0f3cTH0xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE
xMTExMTExMTExMTExMTExMTExMTExMTExMTExMfoxMTExMTExMTExMTExMfXExMTExMTExMTExMT
ExMTVH/ky3/L9MfXExMTH0xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExM
TExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx9MTExMTEx+jVH
/ky0f3cTExMTH1xMfXExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE1h/5M
t/y/TH0xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTW/9iW/
5MTH6MfoxMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE
xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE1x/5Et/yfTH8
HExMTExMTExMTExMTH0xMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTXH9+Wf5Ppj+BiYmJiYmJiY
mJiY+uJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJi
YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJQP78s/wAn8DH0xMTExMTExMfTExMfTExM
TExMTExMTExMTExMTExMTEx9cTExMTExMTExMTAmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYm
JiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJR/mln+T+Dj9WPpiD64mJiYmJiYmJiYmJiYmJiYmJiYm
JiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYm
JiYmJiYmJiYmJiUj+9LP8n/DxMTExMTEx9cfXExMfXExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTH0x9MTEpH92Wf5P4Y/j4mJ
iYmPpiYmJiYmJiYmJiYmJiYmJiY+mJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiY
mJiYmJiYmJiYmJiYmJiYmJiYmJiYmJUP7st/yf8AEH8ITH6cTExMTExMTExMTExMTH0xMTExMTEx
MfoxMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExKh/dln+T+GP4
A/5WJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiY+mJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiY
mJiYmJiYmJiVj+5Lf8n/AAMfpH6R/wAzH0xMfTH6MTExMTExMTExMTExMTExMfXExMTExMTExMTE
xMTExMTExMTExMTExMTExMSsf3JZ/k/ij6D6j6D+AP8Ao2Jj6YmJiYmJiYmJiYmJiYmJiY+mJiYm
JiYmJiYmJiYmJiYmJiYmJiYmIg/OWf5P4I/UPqIP+q4mJiYmPriYmJiYmJiYmJiYmJiYmJiYmJiY
mP04mJiYiD85Z/k/gj6CD9Y/67iY/XiYmJiYmJiYmJiYmJiYmJiYmJiYiD8pZ/kH8Qfxh/2HiY/T
iYmJiY+uJiKPyln+T+GPoP1D9Y/7NxMfoX+qWf5P0j6D+CPqP1D/ALTX+qWf5P1j6j6iD9I/SP8A
tRf6pZ/k/WP1D6j6j6j6D/tRf6pZ/k/WP1j6j6D9I/SP+zx4aWf5P0D6j6D6j/gj/s57ErVu0N23
LP8AJ+sfoH0EH1H6h9B/2eSAEuqf6dntG23TAG7LP8n8EfQfQfQf9s7N9dFWnu+va3e2r9ZtJmkc
70s/yfqx9B9R9B9B9B9B/wBqbu4urXsX2XuUeLcq1a4bYfW6TYpvln+T9I/iD6D6D/tF3VE3do7F
rW4jO8dm4dFXSNdNvXZ5Z/k/SPqIIP1j6D/s4kCNdUsbd1Ulnc6qy/u7Zf2GxcGtVZlnQsTEwraJ
L1auxXXsyz/J+sfQQfqH0H/ZRj301yzttNDZ3dQF3b7bx9m1iXYzMa7EZzBvoLrEqsSu23SvurTZ
pWpnsLPq7lWuw7WWf5P1D6D6D6D6D6D6D6D6D9Y/6/Y4Su61rHtsKwWewZURrlWGzC+3kUpdhbqM
V3tF6RobfsragXpz2NG7T2KXm1yrbQtGyJZ/k/SPoPoP4I/hj/qmf4RPjs9+n1EkxdOy1W0q9Kmy
2q0DhGrqMr/E1+A9tZFr81uWzUv1dmq2m9BcM261quu1V1FllHbyz/JB+sfoH6B9B+gf9ez9c/Xc
36tUbXZbN5LZjWKs0LbnuOgrHsFWina1loBbAV4m0BWzM0rE3aEvq0rzrXqMzboFi0XNqXa1CHsZ
Z/k+og/UIIPoPoPoP+t5mZn6kgTmsDqZn6kgDf7Zapbscn5gx7S0BAnsxNLsGre5Ved7VgMZpLVb
dbUabdjS4ab6ezTNLrLLZ3/T21HqLVvQWBDva3Fv9cuHz5Z/k/WP0D9I/wCtZmf0Fgo2O1pql3Z7
Lxtmww3Ge5hOq3ksrt29emX99rpNrtNjZjPyjEoy3OYzDkHhfBFk6zcZtnt6wdNxDyh7D2Q9p2dc
1uzr2FGluXnY6xTr7Gvf1+z1nXtv07XUUXaOlQ9PdSz/ACfrH1H6hB+of9QzM/pturpW7uUAv3r7
ibMxrPDP49kNnn2MsNrclf8AHl5sMDchkrA/IDyEqRIaorPW+93Nezp/k0ZWJ0NCmijVso3dbtOg
NZ0O0spNFdV6d11Cb2j/AKv2Z09qbfTU7G7LP8n6hB9R/BH6B/1R7ErXa7kCW7NlpNkNojWEzn4L
ZnIAcsQtGfyGhbKrYRH8EnlAMDkBFrdw6vW2uld6KiYJmrT7b+0YVV9LsNXs2Myr2XUvfNfsNvTb
quzq3qv9p6s62x/r/aDsdIfeWf5P4o/UP0D/AKlmbGzXr17nY27LFySXhbyX8lopLRioYvmM2TyJ
mDnIy34oBORyF4kk517vjm/e2L3JZ7EULMDJGJ0dAa7cvN2zrWeq8Hkp4md11C3im67Vuo2Nbu9D
S2Lui7ep1sWWf5P4I/7AuuSmvd3bNm0nMLYhsnMT7kJHs4orZIjQeE55nmFjg5YqvGYOMBYWinz+
IAwYcQsFnT1M+hu6D0E+D1zm7RNXlQTO86gEa+zdp3/7AdXsdP8A1n/Y+Kyz/J/BH6x+gf8AVHdU
XsuxbacsYWnLMPifcqi1iy4E2eUT7f8AwJMDfgcwMcYJioFirgM5+hIEUhoORAOYWwWnSbFxfvrb
BdidRhuuV+RH33+01dNNi5LbCMjrqzX3cs/yfwB/BH/VM/Xu96M3ktCTk5BLHNbhXst5EwFiB4HL
8R9iBComAJXjCmMcziTCfEUgRQQGcsAuA86jZ+NsdzQpFeuXXobkem1XWvt+6fiSxPgyxLDX11I/
cpZ/k/hj6j/j4mDMH+HiYmBMD6Y/iZmZn6bV/o177Hscs0LNMtORE5BowIP8hP5/zMExMeSIpxB9
jYBDa5PtMBDClfyLcyFxCPDzVKrddX7au1IpnQOy7PadpVr0MxdrLQs6f/Xbtwto6rap0m0u9ln9
f/EH8YATAmROU8mFTOMx9c/TP1GP0ZmZn+If0d/thKy0Y+QZn6gkQr4Hj64zBieMcljFDGYLPbWZ
lTCCJ4MVCxxwRfE5GExyEgf+3p7dVy97r8H6zZOvub2xddta/U33a3Vf65ra88fTtdEvvyz+v9Y/
5eYIDBMgQtn+JmZmZn+Fn6Z/T213t3SZyh8T+efGZnyDicMgmsT2rC7mYBPFZwEdFANZgqBPrKxX
ZYpraKAAfzmCISITxjDJAzq1sV67YC7eggYW6+rraGzrIXt+o8mWf1/8HMz+jMz+jzMH6cTOJnE/
TEGJyEzMieJ4+gE8Q4mRMzP8HMzMzP8AD3tptai9+TEiHAgYzkZmZUzioU2qIzM0xApggGYM5Yqk
ZySSYoLxaXya0w9LLEsZZW6vCZnEGSWmuOWtpHPTdbeqaW9rGjaRTuRVVB9R95Z/X+gf8fMyZmZM
yZkzMzMzMzMzMyJyE5TkJn9Gf+Myhl7PX+NtnEIyft9PvDxSMWY+ZiA4mfPgwKTCcBiWnDzp9B2O
2KP9Urrru131rcZmBHq5TBUraDGJznwfM0mAbW1bU6imvHVVdevJTxHNpzacjORiN+cs/r/Tn65m
f05mZmZmfpmZ/TmZ+mfpmZmZmZmZmZmZmZmZn6ZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ+n+x6
8YeSRCYFJLWBZ5J8mDwSTATBgwKZ1nV27hPX6BqboOtY63W6OqeRmZ2HX1bqbGvdrWN9GUMGUqUc
rP5fYaIzs1q6aqepALUJ9qz2TmZzMa5FKbtA2JYf7mf15mf15mZn9eZmZmZmZmZmZmZmZmZmZmZm
ZmZmZ+uZmZmZmZmZmZmZmZmZkTImY+xTXP3DUh7LUh7LUwu9rNDsUAHc1hPm6wHY36m1pt92YCBo
7kzxPsftD5HmAExUmlqUlh2esintqwP3cGfurRu0swew2SP3LaB2dg7SPqnlZr2IviHBLJxisVnh
pSQt19bto+wzmZzM92INqxTbv2APu0rNLbe3spZ/k8fTM8fTMzMzMzMzM5CcpyE5qJ7FnsWewQOD
OU5rPYs9iz2LPYs9iz2LOazms9iz2ie0T2iewQODOU5TlMzlMzlOU5TmIHBnKcpzE9iz2LPas9on
untM9phtIB26xH7GsR+yafPuj7NzMzggnMJ+nseLY0JVo2Mgtm3wzECFifoMkBGIwoHJAPZF5NKK
MAlieRnITIxmArPOX/pBBJAnmNVUT6aeN1TVxkxFYqQeQ6zZGxp9hr+jY5TJmZdb60Ys5TTuedbp
Km/Lbv7nuM9zQ3me8wXZntM9hM5z2NPYZ7DOZnOc5znOc5znOc5znOc57JznMzlOU5TlOU5TlOUD
TlOcFhE9rT2me1jPYZ7SIbWM5zmJznOc57BOcOzUJ8umfMpnzqs/uCRuwYxr3eF4bJzgeFwZnA5A
gkGFRD4hJJFVpQkLLLURHtLHK55CKcwuBCzEgTxFBJ1aUhZYWM5ZnLzynKZgZhEFtkNV0IImYtPJ
HWsJdUbI9bpHSAkHoN4C3s0FmnmeY5YBdYNFVVjWKi6O2W7CX3f3vcJ7Ybhiz8FFoEN89xEW3kfa
85uT7mEF895nueG2yG14LbJ7bJ7bJ7rJ7rILbYLLTOdkDWz2OYWYT2OIbnE9rZNriLa5hucEXPPZ
ZOdpPNwPZaJ7bioutguuMDbBhawT22KPfZx9zme2yG+fIeG9zDY5gYmczAxM0tc3MNKq9Nml6LFR
yGXEYzDGflgs05kAWNCSSXcQvM5gFrVX7PCO7uSIRAoALZmPp/IeZRTF5TnmcvxJWZWExK7HDauy
tfIxbHWMztPynn6fefaHyH1iZbQ2clTR3jXanLz5mZyhYY2Nj2Hqznsps2a3ta7XUG2rCXU8rFSw
W2VLE9ZnsqBCaUVqgp7Dg7b3IjaBCbFWXu1c0NTe+zoU0VVDWMvWha6bK/bfXRXBdUCLaUsNxIN5
EN4CFuQZQtlVTWG7VspgvVZ+63Ruw5w79oK7xzXuu5YVrSt1dkGyxUXgl7FSJuemLu1spuQt77FF
Aa806qOBrMKrKKl17G0xT8h1h2qwPcgKX6ppNql9XepqXW7TXRtra0b0e9cfIsc+zmwAMLHBesA7
Die6xiLbCFLNYNSy5V0hS/ZbFSOZgwAmZCzBJ8D6BSZxOdbXNjlUqDWkRWQlXRoXqDpbUG+RWjJs
qC++Hp0zoc77eu9Wt8bhbtaPxuJctXiGvAqxz9usD7dJwy69o3NFnLAg9b2KVtWNd4rVF6xRY/a2
olNGrZe3W6FevtS9m+Qz4b18igrBvs1/UgQn1CxtnR29ZUdoCGorUCbaUhdfqLdiq/UqNOxpalG9
ff1mvFsraLchc7Du9TM7Mwwqhg6CosSoS041btM02spsb+qzeszS2rZNTa06dW2/Q2BxQhQsZaSa
67FbG2zHSvz8FayvV2nXWoGzW1NUVVdfVtvdq+ve6/UFtN2udWzRyzdvQzh0uqFf9wvrMLKNat3R
09xtrVxaC7XYlNVzG/W2KVWx67b3Jss17VN+aydjV9a8XZLbK1SxxctzgaLcpq/0dttIrHLEJiHz
DkzwJgmeBORhJlNTOdPTqep9U12/2nCNfVZVvULUDRtA7XTWCnf6+ul9/wBjU7dGwu1Zqi59mp0Z
1ZTW1sJCr+eBycKwE4gmt6gX17bSATZf1Ft721NW2p2FlKtfbZKa8Bx7nQqV1qi21Nm6k7DKj1p8
dht1PUyZNQupC1WMktu3tmWpa5XGF2NKJsalDt27u77Oza2xdf70d7CuttGNpbpsp6+0r1utbTbf
1O/tbWt1N4anQRdjc07Ue+3jULGVkqussuDVR67PXqsLX9aIH0WrFvWW1snV8mGpcEvGwor3Xuse
9/XtF2XX4kXVaoqrYZrtrGx2Dce167Zqqr7O0W3a2/8AFbYvG5Ut2zqpc1t2w+5zo+OTNe4Um+xb
anR+S17AdNrZqh3NhNYbr/I1tnZtut/vDs2oN+tqdZtjb6t0oVPKWjildxOgpDav9PbZ99VQia/J
bLARksW1vUrMZn6UUPfY9CUpq7FzVPZsMw3vyXbCHV2Hc21Xi2zHsNaiFQ0Sm21G9alPU1hrbD36
y6fubNb5VjZradu/7HGxas0e0prs2r6bnsvvolvYbljWD2F0IOrs+lw6PWgnIImtumzsZf6xdW9D
K21qV6iWbFldlSV1W21O2vdTsVa3Za9Netv0e3Zel9jijGvSYWbHWbTlkuQIWcdXuW6dtvcVfJ2u
wG01m1crV9jfUW7e2yz3VLcdl6badzXWrtK9cLTXVdH0ttSer2nVNY1uNdkuemtFNrF27O/YeypL
HVvjdTsbezT12m1c2kZl0vjDXG2lot26wtvZbTWpsWm629nsrN5svbYrqS+566bVbWNlipduXIyd
i6E72tbSKlNSYYNWoqR+LV2NUtttIT3awC7VSmjcbjs+q5eo26dcbe9XaNbUuslnQWtQ/RFRRrtT
dqHx2VRfZa2rXl2w9sqqe1rPXoh3awmBSYFWatXxdf09X8TrxUlnyRQCLObDhOvsGvs7vYdZsrsG
qrcvSlFC0sqX0JDv1R9srKlTZHp2qwvXbLPoatPPuOzTZCnyPE0qTddxIbZcEFSsAzBrtetlT1to
vYjIyMm3uMw6xP8Azps8VuusCPrC17H2HQbNtlxRXBrrsvjattbMEBBWy3UFdd7syNpPizd1fk69
/Xb6XW9bvPF0dJKh0etsTZ6K5a/ZU0bUqaX0WesXOsruZSu5aXGx+VXeCt039K0diq07td7VSmrf
7Wva19qiPc1lpr16ZbspyFXuFGiEWs0Lr7mpv61uj2Hqru3WtfTdtm23Xeo69tmrf8sU3dhs27t4
19hkUbgiancsHrYLbr7DHV1u0qf9suuL9QiKnV2uEp+AKdm5hjaAL2pY7JY6VVVWbNzFqV3Gb19q
G1Nje1W2e498W3VIW3XsOmfPbhjccmVpysvYawZiZgmeFhJM63VOzs7GmllNdWjhtClpd19NKV2B
2001wtnV1ua+v36IKWJra0Lc7Knubj+4aQKbmuBZvV2PsKtGtR29i6DbV5X7RR5+80NQV0W7PEEq
RbWCvrOWvo11v2VugZVYbDw2Fh1fns5uHm1jMz6hdy9Ot7K7wESzVUa9dFtm1paVRSitTtGrWspZ
UOxv/lTv3e7V2dp6NvftOz0350WdXqWy2lKXOPVWeTi1WK331tbTbsp+3MKQ7ge4hVsgIl2wLQLq
3FPdW107G72GxStj5Z7cfLptK2nguw89rcvbtWRkA2NjXtLWrtVXpdyNm3sevY7S+2tX165X2x9l
/bawFXYXXDV7Tarj/wCza5qT/YfZK+40iD3A2G2dvuNBq+77Yrcm32DuNupV29zC9luJB2dks29q
xV2dsMvZbLbOx2r31ltKw/t2pYNfVWltQ/l2v+ZlIKL+WwR8YIWLYX6YzOo1BRrdht1V6JUA+2wQ
G66dl1lVGtVfVVRXvONYW2gUbNyWbRSlr+7503bVt8SvnCCDq7lmra3dJYtu3W1fJc8eRYYmnV7t
m3hVXfeGY+SljKTYzSxeSrQhh16CmGpYMrHqWUdlLq9OxXCqdUk1jT7JiNHfU/t/Yw9b2hn7X2Zh
6jsSW6TsrAy26uzs2a9bjaLvpbNNdWzs1PudV22lqU//AOj6dq7e868s/fBthfV7E19LboT2pZvc
9XdXaZYdmm2plYNzxCxygJGSWTZIrTeqSmkrx5h7krQX015pFYSFWMqNobdroat9i9tncO0raz1l
6rKrLWNYsU1sBr6JazXNVg+SF13bhr6VNpr61KhRpVk3aGqwazINFpqWy6t6tbYtpq60tPh7yPqd
G9qbnWUVON6uup6dt7QWSoWtOv1L9yzZ0daijTP59sP7pTlQoJLPkWOqDInkzTFJv1NrXvGwhuue
utTfQ7JqpfUuzZds0Wam2j9fzopbZ1ydc03P2T2NsMoJ9bQU2sWqszgiCYnGJW04iV2PS77+1epE
xP5gQqeVXUsV9PA2dbt7c3ev2Ov2Oo4ns5du2JsU1U7BGt7WNmzVDdeXSnfdLd69H192wFdraqOl
39Vw78Fu27CtrXOpYIarlBTYKBTyGiHh69gU69nKdaxsXoNms/t/YvXs9NdspZo36rJrt7btexFr
ossa7U2K3ajZFRD1QuYqszagdIamVnres0Gk03W0ShxZXThgl1a1JZrGNf8AIsNa4ROak7FdipbY
nx7Wd6mefG4yqmxC1d1V1W9sbI1+suapbLQzUtXa3Z3+72WEDc2FNF+161S1LD3etrV37SE3b+s5
q29dWTt1xS9d9jfterft7HWEaVgeztiOSHKsCwYmYGPtMzX65rdJTdq2t8lpzuEGw6z52yI+zsPM
tA7Y5tF2blBzPMGYllqtrd1YFswX9bmLXkBFWFzCRj7zkFH44g+8YsDVs7Fc1Nm522N59abnY37y
dUyfvEuNA222gFW28Jb1q3aFFxXdTut0hNk3UUXLtPY9dDaxpvratEG2+uDVvazrfuazDQ0di6my
8ONHZN1j31FU7GwXa5PMUboHx9mfGsx3lbLeuFhFr11sUOxsNFuONlKNyteqsdlqCOl9cW32FhXb
TXeUpCtsNr/MqfrtZradquyrYIYlA4VmrEbZRR+41Murtq1lRosi16NWunYdYkK9daipT7NjUCsO
zFVA7OkCzf1bFynssZbra6RQtb13rbpstdVtbst6BtvWs2bG67aEGveLNfsOu16t3b1twa+tW2xq
2WaNexcN22mvYtufX26y1dgYjH0+56/cVF7SqpF5Ag00kmiuClIalnpENM9XkVjIQQVicZwnJVlS
BpjMC5LAwVkq3gqBOBIAYRTmcTEII2a2WKtjRXYHa2biF3WYdXj95lN+k20mjr7Juo11Vat307Wv
clqjD8jGqJDU2AVa17yq7bJrW22q3rVZNDVoV6tO/arf/WiSeps65TbSsDZu1yrMt3U49nVcms60
y7haDTaQarSbtNWUtxnuHOsqGrsC2XVV30r1LPFp2mufjUUK2ykGolhmsgUdpo19hobANVx2LVVC
XXyLRtBSLGdRfeBrFGqNCsX6/cW4amwp0aNg00adFvX36ta2HUf451W9q6jBaG2Neqi9ko0L6rNd
dQ7FzNXWnuuC03bVmww01X5epLm9h0bqF2dnf6qiXbQW9eyur3Kuxe+s11hzxUlYo8r9r7Xc8YhJ
nhYbEhtwS7kkuFDNjJmRkAGKFBJBhxlEYynVZW2kUapmTPuQJYmwG+SQRsLBsNwGwBDtUmLuIVRA
8+E5C6ShevoT90lh0Bcl+oRbchVO97GiN3+0zfKex6+rcrvL1FaaGnp7d1OhXVc+iLLOx764WDcG
NXsetDaXZdSqUjXuW/r6tivZ6GqpX6kirR9lV63mC++WNtXLqV0V6jp16RH1uTtSU30V99ASeTpW
loMt2GVadmBwTfoq5BNSixjF3HVq+1sNet3HDqbTVsO9BrZ09ZKxqyS9y21cxNepRTXQtrbLIu09
1RfrbEBbttfX1792pg26vKuz2Haa3WY7zZ+WX1qtx1ANpWzVr2Kz73AG0E+deCu67Fd6oAWo4yIx
4y7nOXmvLlqnAa2lTwBhU1hTBiKcmwhX+0wDMeMCeIMGcRB4n3l+vbTWrl7qltJ163ZO2vUAxXUR
DiW3JWlXYXLYNRNu34nX6Z3uwbaZiwCWAsDXiq8gPu1vq6tG09Ojts/cy4enY1xrlm/qNdLQa2rN
LW13vXrdYJ2PS9eNL/XWzuK6Mx+3a6NS7j1awjCovWjI+vsbFL9J3GzfTb2NFQO2t75s/cgnak2n
sqkbc7FxR7PjFTHAFlCqV7Wz1dnXccCzmMB4Q6kOFlbqy5D07f4XJawP7arSjQSvTr1am66soTsX
VFtusWEtXWntFkZ6iWUcq2d0f3KuLGq4kEC0Q5JSuL8UL8h1azsHtQGpkOVGvZZWU94fZ3LEavFs
q2raVJrspXiTSuu5LUVyusWI9X42citetUUrocbFtNtcsquDatbZ7BhxDQZMGcEZnE4+wEwQ2DAp
MIx9OYRrrLNiysKDq7CVV2929aWbdt1pYmAgxPtuWqSCSevrpGx2XX0slukI3U7IpZUrPkkh1mpV
bbZZ2E6lGbuJsblw3brs2U9nYy12uX4nOraarKdiq0djg6P+uuF3u1/2Aa067vrVXfY27L6vJx1d
416et2w9nUUB9KptZbdSlmXV1eNBQuE1uNmx1axU17BrtSNQtTwd1Nqhkp7YOd3lxQESu5ZUde2X
651yuz+SbNhm7Tbe/lTrgNVv7Pw9KoAddZaYbTEuxDhogRSyDIrUHTGKXTk7UVvLddmrenibKGSV
obGfXqZRprk9dmDRYBdPJZK2BIqqtasyuvWL2V9a9RqpBGuGZdFzB123dNLSCrZUtNltlsCq9ene
2vfd2RtD32k3WrWuxcbQDORhwTkTIiznSCWUqGcxMwsBCwnL87LlaKRyBh8sUrJN9YIQFWtrrmvr
Da2baaEsTTuaujb2KA9q2tbv3rPGCCWCUJUbmVAnsPUeO1m7Qw2WqAHVdbZv2DrtRVYmjc3L8Npd
ptak3O8sv067bq4xJNPH2GlXr0vbZZ9/oWCz5euRV2WuFs7DFlW7e9gurvli0C69Tq3jtLUXT2Gu
0bNtki9kU19tw99lasKNdHanrGdXpddbZS59ZbSWLry1wljPoVXCgqtJ1tfdopVRqWoQzNEAUBlM
9QBX8DWSwof1uzAgOIH8i5TPVQ01dHWZm0NSbGpQH2v22hE2dNq1s1yDva6AbWidW3couAbMJZqD
WSyvRSvyK8PvR77HNC7Th6txxiysiywAbGS1lOL6+IIPEYmDPMwFhsAhLNAMhFyRXiFCFPiOeA5T
zgHyPC5wHvcmajMFeuudbSwlo4Nr7LIgZ2m3YtSsSTXTkEATjmHAFQwnVuB287DYa+3AEBMIORW7
BUqK+tMCixi3V7Fot/127iFK3HJiIUZdHbIHWXY2V1ADrVg1066hi4Kt7HuZqhdtutBQ7lY0aKm6
p6rKO00tCoNVTGWtFWoWBkdArlGXtOYXeZUtopeGi9WSq/21uyPSeVexuDR0dBuWrueLyCZnwrZJ
cgByYX8hjiradTXu2oa9gsFtERhjk2C902Lray390sEBHIRWAOaXsWosbKxQyWajomlW6r1dZKdY
itd19eH0BKqGREF61bDsUTWpdG69VD0OpAIXe9zOMQkiBiTZAYpGB5lWqKqeRjsWLuqxmLGYiqWh
ZFl9jEhSYaOMSoHXf8TTbVRRZreyWabrNu+ulHZmKqSeRM9VrRdWxhr00Brq0FukaLOzm9qOdsUl
jXqnl8KlWGtoEovWIBd1qFd7rsjtdJVsup2dGvr6WtXrtVBbXra1e/3Lbt4ORgghc1rgQ8TK0WnZ
3ma06ynNj4iWvBY2WdzMvlzZK6aGrFV9Ruepl+DyFhIYSqyxDXuuosevY1k3Nauuyijf6/Rvoo1d
7TLbFGl6nt1tY27KJVc+AwUYOeXM4EpsPGu2tbC9pbUveyMw4GzxqsGL6qNKtVUsvq4WjzAS40QP
ZZ1b7Es6rZqT1WVvVu7lSJ3Vimvt9VjtbXW2VVWIy27o1mr7yu2vd7Bb0FloFe5cg0uwOsx39fZo
sqCwgQHEP3FZMC4nXa3vu2nW23iDHUk7CMhioxJatYzs0xgMhJqQcriOR7ALU2w7Ftu9pV2uxUlv
eeyuxw7VattiHRYIt7NUl4LEZi1+5TYi19a1TdpL2LWqwKKxzYbGdiEsN1ao+4imvaRjs7WwVTY2
qlps7AB+737Dtdtu76UafpYqBBxgGJ/8qyrVO6KTYbLXqxCjiL+Z9RBZeIBywcEnZQ66bgUWbPNF
tVYxRmR6lY2UiC2vNdiVP19Wru136mhr1roa2P8AZP7O8t9ga242WXVl0fXsVnSwqldobg5OHEXk
qKp4nGdWwJde5etbVDUsQdbeaxvYQSVaNQjyzQraaXThNZchFUjYvIaP1+nZH6jLWdXs1rZQ6BTf
XZ8xzH5lgMk1tOJExj6BuMYeMTJmMwKBNGzjUWJJZNMM9j2AuSqLg2k/QfbMyI1nrDMzEnEx4hMJ
zAJXs3Vqm1UZypdRXU8qvZW9ik2AXNpaz6/cSyz+8ma7LCgldz1Oz233suqY+vXy0jq03vz2NjYo
al69hlO7Qvq08G91NlWqAut7dSqJudABs9n1/H5zeqlb9uXao1rv5ERfBJHG4khbCiuxWKy2KylF
VgAxyMeTMQDEK/l/q7ONbsQNyrXdl1/9vyOxDeA0rsXJapyPTD6pr6j3x+ttpT8TLNb1ltO68qHi
sWHHi3sAFdnFbdq9xo7JSyraqtIsaVdulFBtBSva53W7lQSu0OQ3gPGKtG0tNy/UgBKkYPp1FbNH
EOlsCGspDxJ9YM1dc2VXUvU5mcTGTq5zS60Da3G2LeTmAZNh8iYE+05YgKmPn6EeTCZ95j6CeYD5
pvakPsWWMjwP46s8+yl7Fbhkr5UeRBZyBuem5NpXFtipa97exd9lrfkD8yxK/aXlGyyJVuOE6+xb
LNrQxKhhtWnVbV07KdN929rirch67CPReZct6RmYjyGsUgKMoS0fKoth5paDWKkKuqrWtuDqJUae
j7SjRTf3GtFNm3Y3+0cDtlFEas8V8mrW9r6vQ61uoNHQVaPTqtd2F4e/crsQ2ECq10mtctB4CCu3
Lat2bBZWXcsFcxLSDp7Jeq+wpDdwNG1we3Z5DW7IVNXs1NWXHJXyFYznD/lFh4m3Ko/9w8XvOpSw
bQQrodazDc6Q20ujK3HEyJXeaA9llhxiZn9KiAZjNxhyYSFgaEwRj5wYQYQQAJgQ/bz9BnLLiVBj
FUAaFi/u82MfIVgAxDAyx1W2/iLdQZSuxle8rya0km1mHuJX2HNdnFr7VNpsw9O3XTr7F2tZXRs0
rTbs0FHtHorsKtu9gtlR2Wq1BvNclyMjKSA+IKU4ufW+waq0AZlY4HLJPIwclnP8atiypjdys17m
2X74VWKtZIwYTKdlqhV239lNlXW02cgxBeEwmZxPvASCl7BtjjfaaqLF3tYa1tR81OUay4tDcBWH
8nELglbSCdmzGntFlD/lz/Cy4rsC1WU8SqATj/cXKlWJrotZVrs2Pf2/XOYa4QBMFjjEIgEZuZwY
PEs+9toUgloD9AJ62cimCsZZGM9bQIQeJM4sYtLGCoxa0ERAR9h16P8Avc2dofJfdAgvtaDaQruV
qy2c7Wr51LsJ6rFbwwHD7hQMFvPJuKuVIcCFzA+AxJUWYLMGbOIgLFjk/wDzUkqOQZl4oTmWFbGa
thK0doW/I1K6mt1JVsfez7MSAa9pkbY3X20yIEOOM44BEIwVLkspWBVw1bgYxDjE8k5xMYi5ikme
tCU6Pct1DyBZmwjkDl4P9f8AMEGUuqDW2zY3PFduwg2VsqcPkKrW5F+La7VLjia6cKlZHsXiV7Xp
bKW9bZxiNgTJjZ4ATE8zYOAMtEXEAgEACwvDCZgmKhEPAQuphLCcWwlRgRFNt6VrZulp07cu6myM
7T5JrbE9qTzlGBNaIXfSptFupW1tnWbfJOl7KxKOo3LXt1LxZ5zMNisF3+G5Sq2pVeoe0a1dmqyM
InJIUyPIhdlJcMUdCo9TDjUG1tKktg0X36utY11NtZwSEOQijK0s5bWtymnbYirwijENi1VCw4Hm
WLwgXMrQAWeWVSZ5B5mLxadh1Gzo1Q+IEJQAmLVZw6zXotuq/wBg67PbdTp9ghrdSgsB/GZGHyJr
sqniS2vZ67BevCxFNjU+tlbYrNDFw5xYvEugK11WOKka3I37KqR291dm1bVtWmsCFTkgAWfYQKxg
QKbVyvHiUBwBFENVhnrcEo0FbEhOI44gpsaCnEFLwVHFj1VA7gdb7nIzOjLHuJteNnEBxOQBJmja
5a0sLbN56WXsarS+ztuU3LzDuFnW7Ss2H6frw+/0i6SKthV3YTX3akrYWOyX+fkZsBrEvGawzBnf
lOZyXzEs8c7ciW2UVVNtVGpXtDc3shChFrdoKbEULYBrtabj+MBAUL7LBSmaNPWVbVoVdgNyrHg6
4qo9eYqgx6sQrmdZRzv3ex2Nx2UQoJwMo073h0tuwt1+20/a7WK9bWpbrtZ22enGLUapiWwoVlLh
T7GMR7MG8Bfetlp3CjjdVqthPWq2cwDhkbENyrXZc7y3aZqzbYY1zQlzOWILiIbQSr1GcsLh2Jr5
B1wwHFaqTgYUPieSQj5WrMKlFNhnN5/cMY4D7T8Wc5LuYa8wVzpK8dzNv/2STGj2EksZ135MQot3
wpsldttRo7G5m9atKakNbb1D2DYvOu+ts1KrsGaytlrQtHLJOFVkX11qyPaTpXkXarVtwPL1RBxL
jLlGecWAU/hWwULWc8OUrNRmrVcaL9bhKevqA2zSb2qUBuInsQx7reTmxW5MYLADZ8zdtTrtnA6v
cB3DUrhCxvI19cjxiBfJURWsrn7jsLKO3qlW9pWRwpPgx2wuzVVsrtdbbSW4qGIJHiMq8afsMCwg
FkQKrM7iq9cs1QD7bKBt2z32NAxaFcwrxjwCE4nkz+Qd1htyASJ6fYyVIsYQq0FXkVCOtij/AMnj
ZtW4+ZZF2nJbauZmc45WMAirOKcQwMxOm/8A15teNs5hGY+AwnVk83tIO2SWCsQR5pwLWB4i80ts
H/yBYRKNu8s2uth/YLmS2p6nBIJvETZDH++5cWgnJHAclH5BV52p5BYFXYEf24q/naOMSzk2ulS0
+6una7BH2K6bLqi9uWazkqM6uqszUVqpOuGWrqFsOv0bIKuu00Bv0KJ3CX7NF+hsUPq8aFHOxk0r
2n7Zax2Or29Wn7Qx+IgXMAMWzYrlfY7NYr7ZTK9rUcJbQRd12pcjdPpKzdV1+B1eiB+26Qi6ugB8
TVh1tcyyjWz/AG61JMdczj5UKISIOUKgA+rKssfjniphqWelYtJMTXKzM5NC/lGBCK1jaXXrS7ha
RuXDKXIiYqMcVqqK5FdQAo1U2YVAjHEZlI5mdIQe3m6+d0nzmWAB51Z/uXZNt9RLnAjoHJUq+uzB
9q6017BzdNc4up48qa3SneH/AJ1ddNlVmrxtSkpOSqtlrTlmFzyFmSXGCfDLmLWCD/QuRFXKD+rM
p3NiqalBZrxTXZhs8AQPDa1Iutp62qutt7pdaN3+zYLbuwvNZKCltmsBSxXc1alfRG3fT09oCdZq
1lfVXP8AYfy60tA6sSMzJicRLLJhsFcRcAau1Wta7+m4sqqvXdp2qJr27KHb2qzdXjDNUS9gwWUz
hmes5YoI1qLDsAz5REa+14tbsAlKxn8FmmcgRKiYPA8mcgASTMGAcRrdite0l1ezV3G6Uj+SR4Zo
EYlXbH58UpbiqEFlM4YUU5nS18e5m0x+UT5xmHXVj8WaVPrstX+5avEtkFE5B8Fc/ha4Nd3+SUnF
tH+XUOaOzGOzrIRMks1hhbxkGFsQmBjzcnlyIRWJi1+UZ+fERXCjmHP4A15LfMaue0sVb8S4htIi
3ugqtNr11UCCp2gpqUckWV0bV0r6ixpXo6lMyoBcknP07yxLOtbMVfJrtYDTvM+MiwrorW10DZmM
wDEVSx63rk19ZevpM7M6+vsi1AWtfHtYgnMCNhVBlluIzEg8oEYxayxBRA2SIZlpXRY0WtEnNRFK
NCMHBgXycKHrbYm5obWlZp9ntab7G2Nt2dVhRnnFRGYCBssmCFXBQBTtuXIXCLYMdSEXtptHG2fy
ZGZYeJNeBAMS20h/LFlyTcqq7sSD/bZsC7i1iJl6tZ3uWjZWU071Wt2FjHfpLepnAiaGxbWw4wHC
hvNg/EElUILZyazllxFsCGxlKjLQ81iKXlblBaAWWoMrp64thYkEHiWNauDr7DUslosSddTSKDZg
8jCcQtie5M+ndumxRTRWlqulur1lxq0NKoWpa1O/TsVXcQwImvUL3frr1nqsRzr1MvWadQ2ipMqG
F73I7DmACxMWvAawCL+Ue3IM5ATkcohYHJhR4KzPXCigU0KAfAZYVAiZ5M0qotuZdGqpV0OY26m0
m3u32t5GRYIlYMIYRp6eZ66uhT2PX10BALU4uJwYxeuNgapfZ1VZTt5uZGz7wGChgynK5nIguihy
wVzaahYWd3S6tKFC65AYa2lmze9FiMuqyDcsV9exrtazrku2tmptdyrEbL2lt2i/XZgCtg4NYeU/
mPuW8rK3VmcEscmVMmSpEA4hgCxyYuSW/IetS+cRSHZBwiEu2rXwp/nWBXXyxG2EBSvcuidbXyro
pqGy7JRs2lpQ/gt+RPl+x2dPsLCNxbdC7lZrWoSllNlTl0pIsHgTSYV3Js6rTY3KKF3NOzs3bUdY
iVVx7lyhdozkllbl67INewwJWha8T3NC2SCROUorLMqzEZYwAiKZySuU07qntunSjV6tTtdf3F73
brOBFUs1f7bt03UNp31iuwbekapWMlQqiitNkbdT6hqoTaQrYhouCnsdeuxOkLDuJs1/+T6YayJh
c+IykxlxLASzEsNapbnsU2OmmXqs1mzrbpoqv2WtOcN7nU12Wu9zpWewKMQ+xu11LQj9mTff8WpN
a5snGE8QEGWEZ5nhVyEbyCjpMhHR6oLvxS84J5tWQ05mBgAGzE4iLXhNRdZGUhhSvK717V7ftteB
bqUCzdtaV38LG7Eu3YPw1drcqU071YJ3aS4tBPZVlmrY2KNK9Fo3r11H06mayypdqnCEcVI2bIlF
zCyyuo2+y+XgfI4szGvDeApcAl7DFscSwvn8ieMCiELK05ulCAcCAr2rA4ZWfMCFpbshJ1vXX7ux
2BTUr7fvrbreq3l1esvta60KBEKoByBUu5otzEVbE2KbtW5dhLIlN7hNgpStL67WH3r4lTI01evp
bfmwB8mun2TY02oDKmSghUTEZQQ2vWxoq9b32pqO+zbdKbLXq9nXo+w9FtapzGzz4itxVTamzUdm
oUJc9VmxZUH+QrJv3a/YXN+LBsh/Bz4zDEOAvEx0cl6zkcQoP445IDGfBVhOK5Gc/Y0qxVqXqmtn
1a5/vPu2tGdmhMLS3aprlfYhL+xc2arVaphq1DGo0jPjapja2RVWEAWelWNlQQqahMZh8RnwF2LW
C3Ex7+q0U29muy43s0rVnLVx7AsLuZkzkWGMHMOfprAKwIykZhFJMp1Gedlr3VNWh5dBqvVq97s+
3dZ/PVaFVumo5RhiDMocIObQluWrcSNuoX13azTV3r9V17JNuUChTsayZKlqVcZ6ywnsJsOPeNyn
Xbf7GnZBA+hxPBhQGetREwrOyNLDfRbRtGypNulQzrtbL1Pr21rtX1/IsRV3KfiebXepkaxaGNmQ
XdgWYGEwjEzB9/HI4wjcS1uJ7Dy9qrPbzVWw3NRPwhYQlkIJaDlEdAKMkVM3DUGbcy7booD9vWwu
3di2EZil1g3NoqXcQtmECEHDOiQEkCywQbFqwbdjEEEYnNxC/IexEF28QHvdpnM+0S2tKmay0+kY
I4lfLDAjLmcfJEVeRlYhswETkUrrQDfIerqtzelmp1fVDc/2DYthLWFvEBsp16fMeouPieqMqqAR
nlVK29b1MrDYrNdt2q1jf3Km1uydYl/N6b60S7XWdUw/dJ2NnpdLmtdr2Sa1gspOPqQIOUYkm1q2
svDLZjW+Dt9guxragSuvW1dnYAba1rHVbHal1WpjkMWGgvXVVWW0q+1XxIA+mcwiLxJn8q/u1DmG
qxRhhFcqFOSLDn2ZJdVgseJcQBcmeasa7jWyvYCu9erp2G+1bPXY4QkfafecRjBEJOMg/Q5Itpsd
lyqkEwqZk5qsbj7cQ2jFuy0Z2ecGwVRYXRYbZzaV5VLHBYeqwPSUQkCC1XVV5FlbKLgetoqkirUL
BDtG6jrvZV1+vTq07H+zU0zd3bdu3BY+MUBS+wfcK/Dp4lxypyZjMBVZro1jhvjWbBSysfe5BaLN
ZlFOy9Rp3leLaSOtrD9pO0QW7HALsUawsuSlatZcGZAhbE5eeWZUxs27SgoWu6+/5F1NNYa2xNDZ
uC3b+jPkF4zV2hrFQGwEo9ft2ghjDBDFy2M5xMkT7wfckEkkn+asykXOILFaBaWnx6zH1Dj1OqMt
kDfgpILNBYxPtxDc3Nb2UJeefyPzS5AlLW2gpgcayTRaJ6rMrquSfB/KDMMPiBS0/BTYXaVJc9mx
TfrhiIbgI1rGFiZ5gGZVWS11hJOTEODSxsraskkERCRLCiBbCznb9UXdZl1K7to0aVOpD69Wje3r
r3doTyIQKDiKGMqu9YZsujAgDMccScCaWuNh31uC7Wu7VI9lo4OsIBBpBj6jMbNS2uLsWJOj33Pa
S1S27TrI+16qEQ3F6VtQk3V8xsJkPWw8ZCqksAcIlj2WKa7iXVrdy4WP23OqsmLssoNhaMMj7Cy5
RqMxJ8iZH0B8ZP1z4ErJxxBAKrPKhiyqHyqO+Q9RjV0NDRUxXXNbqgDOMPWQZz4kkpFdot7BqN+2
qfuZLrYbqlsqDC2zibHEwpJDCZE/kCWhr8V1W3NV09dcFyVLuUNtVW1OjYmJiBclVjMK6mfJ5TJE
ovaprVDC5Byc+FDOccFrSy5uv6euldfcrdka+9+62OBdyxJyaum2wrHyFZyeu3RWarlYgg0tlQ2J
sDK+0zU3PUNjskXWs3Npl17R66bFtW+oqa9RthTZasq2WC2JXcep1nr7mbPnYJNW1Tba4s2WEWx2
Is8i85F4wuw0W9stYOLW2q99ru4uMLc4oCwZ4kNny0AbHAkcDkVEk67mLrz44E9Ag11MFFYhrqnC
sAqoLkYrY8CCpIsLAK8WtVVnCoWBBsUQPyhZgRYCfYhmKMjVpy+pzj6bhfi2oMXCVnLfJsEN9rRN
u3NHb2hfnkmzaHJNipmX45j7AxpUaT1Hbwg5OdXqb7pr9dra4/2rqBZWy+SPoBKly2xYHf6kAjVb
NdjHhgTXr9dY0jetdNeqg7qiupu3QW399slb7rr3YxQZ7tgRmzEexWr7fs6xtb+7tL+cpYgqczjy
DhQwTMZCA1eZS3FqLTW+UsTVDUP29XrgseDYcHpdpv3WbRAv3MBkYLRexJDFQoUBuOCuAbTkXNPf
xX5AyGVoa6jGqpIFKYNQnATjBgQsFHIGcjn2AQOcFXAap1exbQiV2BVqZga7Gnpcj465FKLGBBKE
luCs34lhmOzrAwKozQurKGUJ7QDgNFdgV2XWJtBp7q2HGpo2vW0bUEfUfIqAVMLGbjFfB9oBGyxi
3HGr2DUxN2hz0VetbQMGZGbEV07bV+JumY+jnhTiY+gE/lSwBb7ocykiWdvr0Js7l2y2MBVzG8Bn
gHIrggAggRUzFo4g7NNUvvtug8FTEabVP9yz2A+1kC2HHNDEORTcVi3MJtj30ueEGZ0uf3mbbf8A
kbL8rlbNdn3JBmcxvE5HI8HlmZ8YAPM8ec5cF+QZ8orK7ec5JOPKFbRErsCrVZPUca6CqG8Lb7lY
khp5yUYn0DPpUuAim1nmXKIWQ2mtQ/JoX4HDEqQkd8gWKQ5cvglgOMDcyQJVgMbWVuZIFxEGw4nv
UzlQ8aito+vZga9qxqnihsYJgIMptKtpd7saTav+22IW/wBm1hu7P+y67jskp3dEiExBze5+T/Qf
bJMziUg4Y4gHEPaRQTkAEjzn+bMSD5K8QBgzM8ADYZYzlvogLMykSo+BkRsONrXs1j7WED5CJWZV
wxiVvkA+N6s+7icdLWD3E2mPyyrm8HibKFMbXhrdQczwD5P0DEDPgEcBn6ZUtxEXIjWMsTY/L3LF
uGfZk8gSOSy5mJpbD+/MSxwygzngs4IFh5EByy8VLViNRdYr6dgX4dxDtYJyJArIX1tg3EgOTGtW
Vlcnk0UeuBSxTwrhsU2lmDCAnAdAK7PYBewnuUxjS8bWyG1bTBVYk5MH9rCGwkV7jVyjtnl2CSTK
Pxr+/wBRDACxX8VcgNktGEZCSoYGMphOIFM+M4mOIYkkHCL5Y4hOZrn87kZihwymEAyu+p6+w6x9
aVDyuMqSCHYxXiEONsYJE6bx3M3wll1ftS6zTYtxwCiz1KJbRmNrAg0vCCsPkwYgI5FskkmcgQvI
K+DPxDMSSW/MMc+zEGyxKOjyyvkErVT5yfYprd3T1tBVktW5dSyj+uV/05fLW21q1Nd6nQb2Xamx
yXkiJW7rTU2XptMasFWAVnYlyrYU2APcWXJlFsXBi1Nt3VhxaK8LXkEggIeR1qrrQ631T2VmFNdw
2pU0+BhRp2rGovBbkBYvHXxD9BDKE8khVPk1DLqFYuQgJzDLHaVjzkGczj2ef7JgrUqVIiAkEYNb
FX9jK1gHJT4BlrANVvtUGAJGY9hClrLE9lizV2mDbTA1Iyk9Rrqe1m02Nu3YFjm0KldhvUoQxXD8
QW4rlqg8NH4trrDrgk67iEMJkYgOVyc58+MfafechC2RYqgV5RRbgCxjBe8XYJhtUn2ZjNieWDVt
lXtrNVrBPkhkZ0tCWqhN5Crs5jqxc/iFIAclQDyK1qpda2RVBHIrHYTkIpy1QybFrSL+VleOLP4b
xKahbZr0vqNdfQ+qF/EgwEiLa8GwRDtJg2VWCylHl2sUn3+n2g8lV4JdZkCUjAblWy2q4V5kCEAn
A+hhMM5EEuYLWAW5DMZnIwksEmSJe3JpiZoFTtaEWu5VaogIhBVhZVyweltJ7adkxS9HBmthmQKs
PGE8RkQqDHHhiCSFY+rkTQ2fVcQ+qpDa5BNLqP5kARRmfzxBCZnADkwEcm8HChgQzcvBubiLMD2g
j2mC1WBK5al2K1MpFrT3njXaTBc+eTEs+GFn42WkOLwkNuZixm9Fxg0hFoRC23YEAcQbHFVfFy2A
wODOBIqdq7a9+h2swbVxCsHgFBkpklRGqODy9LWPWtiq8IxD4FYCRrGcsfP2lYPrwIa0wK+JCsYw
KnJ+gi+YcT+Sr4KDJqzAzoQ3MBgIpAYAEWkc1RmjIckGEZhdzMxj4qfi9q+t+kJ/eZ2IrOwuVbUJ
5lpgcuOSUGeKg5rxypB9qicwQVOCFwfs1fg0qY1LGNQs+MGhosSEMJ4+n3+gxORwD4BMH3/mvgfz
5HPJgPYAKruQo9WeSq1tUqrctZT+Na2Evr24r1WYPrViV6tTR6UQpc6hizE5A8E8ASKWJ2aWWmsA
uNaqeutYEYQMwHNSAmRwOMMIDAhMSrm1qUArsPUPY1tf82RlGOQ4ZjHMPgAZP3gGE+mCYzhAMs2P
MI8KIRMeAIBmBcl9cMPRag5DIPk2cKw45fJGGuzGuANbCyLrsxt1TxKWYIOfJnSj/wC5m+vLa4/3
NesBTcRPY5nJpjJInB4EaNYin3oImw5YNyn48vucGcDB4BrDD1ZHrYj0IY1FZJ1LcGl1AmQIRmeA
R9/sP5DzPIhfIDCI2JUOcOrPirgayieiuCpMJRkimoRVXEuv16jTraL0Ci/2v1KmtaKeKlYTfN5i
VGQ1N68F4MGqSYMZCYKjM2IWagBLFVnstsBVgSSJgyk4bCq2F44mPwMc5IyABkkgTIir5dwv0AwM
T7ACKIfIIzOMV0nvVYbnzbt2FfYHguAjWo9ZAMankxoeuwV1l/YOerb7ADNpnNIdQSAZ0v8A+xNy
lxt1pYAWs5e/EW+skelhc1aQNYY7vioMbF06SRVSpUKYoWDiIoAC+QmMKqmetoKmi1ICaaw3ALGR
o1eY9XOHWUg6ziNW6weGyME5gyJkgfYYzEYCUMSyEzyRynkHkIocla7WCoqznXNuo3nlbW633zn2
Vs10eqo1q0s5INjTFwPWODTQ1aKMFnsErrRi5q5ja9aMOZWq0xqcQZSG3BNyZ5Ajic7LFHDqYWzM
ggiAZLHET7keR5ljY+hHn+eJieFCiviXUQ2Jhnc/UmFop5NwOQTPMAxFsYRlSwemtxbr1eymu7Nj
/gchgZ0v/wCxNlsbLWeHrdwussSqtQDgMpiEAXEFamVXSxTDgxeAUBjFRgFCCYFY5HBc4aysj21M
AOQTyFVQOMU4gVCTWqxqwB6uJsoODr1RtQktr3CeVLMTAfC/1VNxi8jK6HMFFYKpTApE43E+uxSj
1zCiEAjYQC6tsQdxqiVbSbbeIxn9xGeyxS24BK9wwsrw0owNAhVInJIbyIXaxfi4gpURhrJG2QA9
9rEhrNXgxg5iVWOzPkQCFSSg/KMeAIJmIV8DiIDWT7F5Oq4AIH0yYMQ+ItfI16VLJZoeCjIxGSCM
mCeYtodHuZJTtER7NcnaqrJInS//ALE325XowVUdSfYGJsySxYEtklzGXM9YnIzk8yywvXg21w3g
xrnI/MjgBMCDEUuCGtEV1LFPLqMFEwQMHBDIMNWzKB4FSsBUwPq2CRqBo+hVBogsumUZKgQKlENK
mcuM9igJythpsyYqoY3JZsgi9fEJwevs4R7mLLbH2XDnYZ15K5yFjWOkr3mWV73KLtoxIpZjdQoO
4qhr3YcfJIE/mqBxWAKlWvIXE5KYy8oVwYgh/EAZAHksiS534+97ArWKA4BLEnz9MgQtObxazFBB
qvAle5by27xa4Ycn9JIbAz4BjWYHtET+mgEhQXZ0GOmP/wBzN1SdwIwUIAMHAVjFRjPWwgQQmsHm
ATY5hZ2mDMCAQZmGhKie2kQ7NYg27ATZt2H4t5ler6h5I8cMjHEEBMhUyBWog9aQEmAsR5xWsPJW
cBmKlAHaC5SDyaep8gsJlIHObL3CqiqOKGbylbfU5liOrV5rDuxYWkN7HJVgosbJ9hy1pw1nIKCE
DDiGIClyOOAWWNhgKm5ccEclFRMQM0TiGKqSSqqwLAqRE+/HMIVRtOrHVLGbLYNXEIMk+wE8swkw
wflG9zCus8+XEBrArVqSn4z7g+YDg5BHmawV7vjUuG6+oizR5o2lcjFWqfm7v02R3U20I22VMfhA
4UczOVhGGgSEeZhzCjQvSsOzWIdvEa6woKth58YgmivHqHGlVCFUyw8HnxXyoTADqkNuCS8LFgAC
uDhMwB88WUzIyH5TxkAKzIwjCtwOKwtcsNqmIlKw1IwIsQJZaz7lPI4urHF3D54tklYfMU+Sck/d
fuSMewspZhFcqWvInuZigbllhGcCF7Mi8GUsCBaM88wF3A5A8mVySSDg2vYFtV3YBBGXggFjAGj0
n+1EtJUG1otFwHC4x1s5Vs4UmHyQDMGclzxBXImczzMxTgpa6xNy5VTfLD5uu0LarjbSlZ0wB7ab
an5YxD9wCJhzDkQtWB76RDtCfKuMcX2N8a3I10ydZABwCqyYDAQ5YBSSVxFYgm44Fjzk8w8UEkox
BXICqYCwP9JIGWY49gaK2SC6l8EqnGYLjOBlUY+yciwBGfKQgsF9ZnO7lY4ErvQg16zzYQIx54ZW
UCA4g+xAwcE+YB4bycoAM4DfijEH2+Fu8PYHAIA5ANS2NQHkEdGUWqZ7fw9vhbBxPFoUDRUwWBaM
HYemwgoK05ZZWxFquIB/H7wlRAVaKCRYzqSzytmceoBqscMLyJTIhBEBmA0Hq9ThKw9leWS3LlmP
TeO3m5wXaa6pY20J8mxpWbmPptJ+OBPTUC4qVKiDFdVnIElyYXeYBgUEL+MxODziYtYIK4YAZI8B
WM85HmcgI1qGNcOTXEj2uYcmeILTA1dgDEBXIJZVh9bQlhObAutYP5MK2Zp6+B8icWYFQBnVrNm0
ImzkOtDyzSVi2pegZHBIKkHx4nkwhgPOQeMYkiZGPuMT7EHMYZbZIqqXy2TFLcvcIrg1i3iNdkaB
8QbBjWKR+CQXKAK0DXawlFYUexy73Geu6xF2Ckd/zqckvR655D8n12cAkrhGZ1V66ytdYwcTAn2h
sHGu849gBF3MMAJ0/nuJtUltz01qSlOM8ZniOYIFmDksXQiL4IzkDicZ+n8yonAzgeKhcBcRVwXX
MwFnsUD3KILiI1rE8jPOOM8T1sQEMFJxwAgsAnsUQlHFThg3JSFcBVriswJRjPWka6lYdoxthiGt
Yl67ifS7xaGi0WCYtBFkyjizVpYnRVo2hYI9VoPkQ+RMkTJMJyftBmA4i8uWpX7L9q32XAGEYgBy
2FK4xyAgdVNziBl4luMNpwRzVGKhLsxWJJt8qRlw9cakvBru5sR0dbbAr2gxieTNS8rTAsARfa7L
Y78qbfYbLK1hC8CsEsEqAjW5PSqD283ePy1LAfcsuB4MEIBPEQLmFCIVBnrOFzhQpLgCFWwqqwzW
h9gBa8tDaZzzA6wEmYEwMhSYARAmTxWEqILFhuM9xyW8pWWi1Vg8AkyGilzAnEmyoRtnAfaIBsZ4
EvY/HM+KuBR444ihIyKZxUTgsKiGtYa8EcgRYYGWeuto+pQ0brlj6VyBkdTif1QZP0GTEA1tQ/0n
7QQHzk4HHiTCfIaElieZi8iXIBSxsJaILawAXWNc4hcrBYBOVb2eoYsqJrXgUVFLGyzJZbAwIZVa
xTml7HFrJbX68gkeYwBBHH6dIf8A7ibRX5XiEDI8wqYU8cchFbBGDjJIbirNjwTnjDdmG0znmDM/
n4gPkq8VGnAT8ROYE9gz7G4glgbACXYDlawayxZWoKIQIAQcZXkgHvxGuaNcIGvaLUxldCT1hTjM
4qITFzCMxQ8JGSQYAGgSMMEgmeSOIhRYQIFeMziLcMZrYNRS0bRqYtouhtqtzrVNbZulnnkEnJz5
rpeyDRYQ6hw2vasPKcvByYPtkkDEViJyyWOG5DiDki1s8wxJVYpOFtXl7XYBk4mkGtFsICIpauu1
jpnA1Lfa9ZzxArUEQZMVsg+ZkzpAf3mbin5Q4xQwgH5MPHgziwAK55KJyJJsJhJxkmeBDkxROBnD
MFSiFUENiqFvVwXYzJhIM5qCXzOTQIwGMwErOJDessBWon45L8YXImXc+lifQmQoAAxPuRyz95gC
HzCpnICZJnnJIADNniSFU4IxCuYFQMwWccnAnERl8muemGtxM2QORZyDAqEQ645XaTMW07hKNRVG
cDkDGTMbIgAItowFyAcCeeOSIxzGAWEHPjAgbABKqPJOVLfdGLKrgBLck3ExLnENjJZ7TFsUxlrc
2atUXWClNYAPW6D+oTpR/wDcTdC/KOAFtUjmDOZzyJhInkwFhPJg8niZwzAgEwBOYE9ghZ4SRGsE
V8wWBS7lhg44+RlZ4YKGwqsJwVpgCFwsNoJDMSanM9aRQuABPOAQIFgUGFRAs4rMgT8oOUYefBnA
GcUBJOADjmJyOSpM4BguEh5GKhM4AEoDOEK+IVBjIItIYtlnw8GcO5nIQkYJwQ2YyAhvEVnEdeDL
jJPkjwfM854+CWMTU2LA+nsCPTas+08sK63sIq4xlfKieQVLCFyItzqWZvWl39pH8Ncwd7XaKzOD
RGBM6Tz283QDt2oXrSyylkbmDxWAEzi0FZnACDgIbFnshdpyJjNABCwB5vGZ3DKVlX9PHmFDCBDk
oZ4E5ATkI1oEDPAGYLWM8ACoUxV8IoBtVMjAgOYMmYxPGDkFczxDkEWznygwDyXDsqkOGjYYFChy
hgaEMYEmMAsyQWZPIQ+QAYfxPmALCFzaeI4gKzqIXJLwZEU5jAmKcFh+LVghqWhVhGR1JBnmAkHJ
BVjnRqrYq/I2VuZydBkPD11TA6ba5cZmYa1aNS2Qljr+GMRXAg5O1dOIQFbKg8sQueRfmenHHups
8xtYyb6a7VX2a7o9ZU2iC0kszcvJijJPHJOIOBGcHLtOBWcBgAxlAGMj1icAJkCFvBs8cmxi14tf
A8VniACKME4MAEUrCVBJJARZ/SSeU45mFEYqSA0CZHkQryi1wjBZDjAwqoI4zMGGquArgkiAgnzC
MEBfp95icMkIQWAnIAG38jbiM4MDeScBhmKSpJyq5L5wOfI5AjMSFdRDVTYH0KWNnXPmzWvSV1OW
1676bP5/IOHc5FrRbmllpdSBCPMBMY3pWweLVYyikgqiKA0JzPBjcRC2CG8dMM9zNpD8rKKeQc3K
GVKQFb8YvmWCYQTlxI5PGqwQqxSYfBLLhC+CMzAEL4ntBHIxQ7Q1lYE8geABAuCQvH1GccHImAIz
YIw0CNADhkgOD5gJgwIcGZgIJ8Y/KAH6Akwr4UgTkROUyBOIaAAT+mZmJ4hVoMiCxQGdxAwh2CTY
+YFWOogQmepocz8oRmL/AEVKY7F2GFDNiEmcgYljoQ7kG22HYeKbGhU4HiDwSIREPgCHIU+Ivk0a
65Jsy1RaX01M1HX2XQ9T2AD9f2Sx6N5Yy3YCB1dQsRszpf8A9ibyt8gNUAeQIJYuqYPkJ4jFsjJg
HjHEkggcscAxARYXALW+PbB7GnrJgrAg+wWeRCgiq0C5JQgA5gwJxYzxj1eQgUAqBB5OMwTxjwZw
WEYn4khFEYkFeWMNCMxV4zIhVSeLA8FMAUQ5P0JwBlieAjO05c4FYQos/wDjaGAXCLzOfYc+yC2G
0kqSYeULqJ7hOYYf0BM4JyWIhAAyTEJU+DA6whs5sifcDEdoOTBOCx7pq87GbVLuBVUovENvsFjM
JyYsnuQ0bz8V2CQ1zZ+QMb+z7dvrtZNm23gl3RWBu3m8uNxiuAHxxzGXjCsxxPEzi0CTCg8wB7cz
mccnMCHIqWCsiBDMAHEC5g8E8jApilsEPjkTFJwxGFLQEGMoJwwAAMxiHBmFYeRCAZwzAIy5io0w
AePgkgh/HIQZMAMPj6+BPMIWAgQkzhyKrxjZE9pz9wDLjzsvfLwggkGEnGZywCfDmAMYFVYHEY+F
BJMP2ziAEzBE/qPBoEUT2Ko5sxSsLLmOAORYgN8h1I2riWsdj+QAudYt9mFvi3mNYRKt62qUbS3K
WBXY13otrWwU+myf66pr7ubRA3GAMHkMPOCYEhCCckENsa0TmzQKzQVARFxOAJ44gEAg8Q/QQhcr
gfQlQA4yxGVwYDOIBVlEYjAY4wGLAwFiVAMOYMwgz7zOJ95zgZsf1AKBDADOJyfE+88gk+TgzyIS
BOXjOALFaACEkHOSrogvuhYCKCSo5M5/JhDGaZJmcwkgIohAEXyPIjD8XOBnMTwvIABhCVaEEEIW
ikJFYmHLB34gNmecgEkLD9j985+imZMBzK34E7LuLLbGHtyHtxOkuY91N3LbWGh4iewCNacm3ILF
phyFrgRcCsxayYVxMQAiBVMbxPyMwJ4E8mYImIpjKTAGBwAwhHKcDiZAmVzlYxGFKwMY2MiBiISD
DhW8zisxkGeSfEyIrZhsiuuMqYQZgEYAmBAjZwJyEwIwInJlitmOAA1VjIcg8swOFHOAZjTGZmfY
mKoIyADiAlg/9JJZgvEE+AMk14hXEVvAcgg5g8QA8HOWgGTgCDxDDFzFWBQJgQfTM5Sv05fVodOk
0qf3abljDbaxstYM4cxKeUaniBgTj544KjyFbHGY88czg2fxhOYOWWUQBc8sQDlApgC5OQeQMKmI
IPM8Qhc/cL4HriqoIIUn8oCJ/SMgzJhmcE5hLCFjDzigNOIEIYHBacBMCZEIik8RgTDReTTAgbEO
CDVmIoQXsWBrImzS6PAc/TM5T7/Rh5xBCfAJMCWGGhyPUyTznkxgyDkZYCMs5ERWIiupJYksfKKT
9cQwgkquIMTMzmAzM/nnE5eVtas9PelnbTersO2tCEFVQg5iowHHMCCBPOFn3i8gTwyWIhHgZMIA
inEJbOIMQtmE8phhAwMZnEVswKYcrDmZMIxAVEJ8/ecjPvAXhGZgCcvA5GMpmQJj8Q6rAoAdX5Lz
ihzP5FiD5MUEEFTGTMAbPFs4wARGwYoGBkAsDPzIXD3fjLqhYrqa2+n8gDAjRVeChmHoAiV1EqiA
tYAeZYllEF7KPcCS1TQpWQaXEOcj7sgMzxIxisgy5SLcgAfcCYmIAPpj6gTMLQtCwn3HSqP3mbpY
bQQY48WQFiFeHMKkwKwmFyzEFcmYMOFnLlAGzxwq8BCTCCw44nEYIxFIMwcsDkORC2BnIUAR3cMD
P5Boc5GZnjM5Ay0KAguyxTygBA55JAM42RMpOc5+Q3I+YzEHkDCGhDABzkPA/nIyc45mKxaKCDyB
jPhRZ6gd0x9l2hPMFcwAFuOG4YhKCNYTP6UqOVQcY7iM8NnjnDZPZgc8wNBaRBe051NPWGLo4i+D
ScTZxzrrYwV4mFE5AHlCxP0JBmFnGEH6GcSZxmMTpf8A9ibiBtlqsBSmMkOzvn1eFPjxj8Mnjk+Q
M5bjgYinw+GASczGJMCsZ4EJxMwGeQAAD9oGxPBAZWnA5VQB4EAaFiIGGcsCARMzOATkYacRAFE8
w4IIUEHwGMIOAMwKgmROUalTFQpMTiDOJBKqwPkcisBJl7Yr3VDVMSIDAxE+8qH5nipZ8wyqvMsY
E05ljhQzmGyF4XJ+mfAhzEQzh54gT8YLwsN9TylWBsp8/nM+c/XH0xmET7zzMkTxBBDOlH/3M2sf
J5YJRSEtwrurwlyBYSBloK2zgYycYJhXwFEAEZTBkwqDHJSB/IUkBQB+IgnKYxAwMK+VM8TBILEF
bMQNGHIKAI5fAsIJBg4qMiHnE5mHiYGwOcKHAV8+ASSRyxPvMqI7EQODDkRiJyxPywDCrCEy42E5
Z7Hra1HqZLDgSlORdF4ocFjn6V1lo7wYyrhFttjPk8vODMGYOPWSVXACgzHniZweGmwxdZJ6VEAV
WsXkChEIGSAYVOSMfTEx5xCPoZ9vpkiBgZ0wP7xL9DpTsft/RQdf0U/b+jh67ouf7d0kHXdDy/b+
kn7f0kPX9Hk9f0Wf2/pZ+39Hn9v6OfA6SN1/Swdf0c/b+kg0OjwOv6Sft/SZGh0mf2/o8jr+ln7f
0kPX9Ngdf0U/b+jh6/ooOv6Oft/S4fr+iwOv6AQaHS4HX9LP2/o4nX9LyGh02W6/pcr1/Sz4HSRd
Dps/t/UZHX9Ln4HTQ6PR5bQ6Uj9v6SfA6XHwOkjaHUGft/Sz9v6WJodMBXo9Jl9Dp8podLg6HS5r
0ekBOh08+D00Oj0sHX9Px1eu6NW+B0s2uv6Bi3Wf67lOt/1/gOt6AT9t/wBfz+2f69B1n+uAt1vQ
kHq/9e5DrP8AXJ+2/wCvcH6r/Xcjqv8AWoOq/wBan7X/AK3gdX/rc/bP9dg6z/XcDrP9cyvW/wCu
CDruhwes6TJ6zo4es/1+ftf+vY/augxV1f8ArosPXdHP27oYeu6Cftv+vz9s6CftvQY/bP8AXp+2
f6/P2z/XsnrP9en7Z/r0PWf67k9X/rsPV/65n9r/ANdn7X/rk/a/9cz1fXdEnYz/2gAIAQICBj8A
XI//2gAIAQMCBj8AXI//2gAIAQEBBj8AmBuzAEj9Y59q/Gn94/FfjT+8fivxp/ePxX40/vH4r8af
3j8V+NP7x+K/Gn94/FfjT+8fivxp/ePxX40/vH4r8af3j8V+NP7x+K/Gn94/FfjT+8fivxp/ePxX
40/vH4r8af3j8V+NP7x+K/Gn94/FfjT+8fivxp/ePxX40/vH4r8af3j8V+NP7x+K/Gn94/Ffiz+8
fivxZ/ePxX4s/vH4r8Wf3j8V+LP7x+K/Fn94r8Wf3j8V+LP7xX4s/vH4r8Wf3ivxZ/eK/Fn94r8W
f3ivxZ/eK/Fn94r8Wf3ivxZ/eK/Fn94/Ffiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP
7xX4s/vFfiz+8fivxZ/ePxX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s
/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8
V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4
s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+8V+LP7xX4s/vFfiz+
8V+LP7xX4s/vFfiz+8V+LP7xWyDuSIM4ggyOY4bn2j7/AM7dn/eR944bn2j7/wA7dn/eR944bn2j
7/zt2f8AeR944T+0ff8Anbs/7yPvHCf2j7/zt2f95H3jhP7R9/527P24+8cJ/aPv/O3Z+3H3jhP7
R9/527X24+8cJ/aPv/O3a+3H38J/aPv/ADt2vtx9/Cf2j7/zt2vtx9/Cf2j7/wA7dv7cffwn9o+/
87dv7Uffwn9o+/8AO3b+0Pfwn9o+/wDO2H2h7+E/tH3/AJ2w+0Pfwn9o+/8AO2H2h7+E/tH3/nbD
tHv4T+0ff+dse0e/hP7R9/52x7Rwn9o+/wDO2PaOE/tH3/naO0cJ/aPv/O0do4T+0ff+do7eE/tH
3/naO3hP7R9/52jt4T+0ff8AnaO3hP7R9/52jtHv4T+0ff8AnYZTLRGJWzs7YaB3YAnE9Q4T+0ff
+dLmwTQkCeXA7cC8IYDEr0+o182DD+8OE/tH3/nTKW4RYtHNAzLRJ7g6O3tPqlQk0ojYBem/3sP8
Q4T+0ff+dD3mflCMpyMiVZTjvwMtMSduQuC1O5Q24g6twnSy2N3WJCG5CUhyBB4T+0ff+c5nItGI
qjOXSBSMcWXRTmvmUurAoeq3jETAEQaACKjGMxIyIAatX4T+0ff+cVaLqnEd4XVux8U22+4eVl0N
DsqURKZMTg61FgtcaxFyEwUZTPSCCR3re9FK8Drg/L9C9NswqfNhHxkOE/tH3/m91zA5Om1GR5Bf
s4EnmjpkIA5J5bhJ7Srkp5eCaNBknJpmowP4bsStB+U2ITisTcYEIeo9MRpH4kMY/oUYguSbLZ35
yEhPpkQbgU9y2ZSk22N6Bhz6gQ3Cf2j7/wA3ZTNoglSlKpJTgUTxIVS6oFqkUIxeRJYAJyw9qIO5
TEMtcaxXkzLzhbmFpl3HEI6SxxGEgpboHXSOgXGqhIU9qT9JE49gXod+JeW3uw29zP5gx4T+0ff+
bnYpbG3LVKVyLBMDUppBnsDfwRMiCweRPNPtxDC5CdgiWcnmogYFAl0xK0GoIQnGjFxzCG5G5Fsi
iDe8ZZLXA6ZxK8yJ64u/wXpIRJ0y39uJGBBmOEvtH3/m20uqZtAfSmMtMP1Y0CrdM9RZrrpmNqja
p1NUNcjvTP1pVEW5Ibe0Gc9RxK2yS05x1EYI8BR5CiM2LDHmq43TAVHylHanSEy3Yc0+BWqPzx9q
juAPH60TYr0G/tnVGW/sktg848J9p9/5p1XzDxVCPH8hzQZo7fpyJSxncBGU5PI3epVi4zTFUHBp
mkmBJW1KcX82QG3DD7RUdwWj0rJeTvHTrpCeUsHR2nqKHtyW1sxIG588xzKEZQd6girha96JjCP1
TTVyUvVRjEbbj5cEdrdk0tsOwDkjkm29pv7UqlaxEiE/YVsem3TTzIS2+0SBbhLtPv8AzQclgMSt
O3+0l7EerSDhGicyJ705JTglDZ3JNuRs+ITbm4AcrnwCbahKZwJoE0paYvSMbKsigRjYoiRfiXtw
2Nndk8BLpfBTOTF1QunAIW3Oe2JeogG1YSaxIQ3N7aBhLExZ+9aIttbuZDp5bwDglEbm6ZnSZSHJ
R3ACBeBs8VD1Am21L34qXpQKs8JG4kF6SExplD1O0COyYfhLtPv/ADO1bkhEJtmJJ/Wl8F1ycZYJ
34sU1ym9yMSa5q9U72Wod6Y4qt0+XAgVe5K6T3ISsYlx3KG1AETLeY/LJNENzKIupes3xq0NpBxk
UHiCLSgRYrz/AEL3rtjDsXlb7mJoXuFKWrVGZcjlhFGO2P2u0H2+wYKXoN8tt7p6X+rP9PD0/rYd
G9tbkJyymIyEuEu0+/8AMwzmQIi5KMfT/eP0IynIyPPjy4ml/FNwcXGKKKY1CeK1Cr3VccEGugYg
ubBrrTN4yH1cVua5iMogGAl9ZPYihC5KMWfEra9LH6o1zHM2Q2yemdCOaJjgD20R9RtjTvGpgMf0
oxhIxINQU1I70R1w+kIeu2A0JnqbCf6VHUf2+107gzyKHCXaff8AmWdzcLAWGJKqdMBaI/KICAGF
0e1WXcuSYY0dFOVW2SpY4ZJhfFaxESlhqDqMpybT8ojQDwRlIkk3JRdUxvwO5IUjfsC3Nw/WkWHI
WUJ/qyBQIsR71gSj6jYDb0fmj+sPihu7RMZwKnszYTIacMjhIIw3X0atG4M45/SoziXjJiCMQeEu
0+/8yjubhaMUZyLRHyxyHGtVWicWTyoukM9E6J58H5JiuaaRfksymvJUqcE5NcVQd5Qf8h5Fgpyj
0y3ARHvURB5CQclUr7ltGRqRXuouSAlUjHNS9Z6aNb7sBiP1go7+yWIuMxkVt/zDZkI+oh07u2T1
LZ/l29Ey3DuRhtyf6siB7H4S7T7/AMyTORaMQ5KIiW2o/KPpV19HFsSnNZJhVAiybErs4EG6oPZw
YLM5pzimFAqLNBrIEIUqmjUpzdbXpxMiErobYLQZ2HNZDBbZsYGQ53RiaSHtVUdchPcw2wQ/epzj
EQEi+kWDpjivQtb952vbOPCXaff+ZP7rA89w/R+SUJFUoFmmNsAqJuarjxuuaL2XYq0CbDAKiZ7o
B7XTRpHPHhVbJYSE+kvzxXnSGqMABGOcjnyU/U74MNmBaTXJwjFbu2BpjCQlGNyxRlD8VzIHnh7F
+7+mOgkftJYg4xCJNTmr+KEtsEwJY7jHS+Tr0RuR6jaIPLXHhLtPv/6nt/1DPd/VFO1Gci8pFyrq
/crlVL8lWnNcvyeQ/IojzVaDFdI7yrqoBVLZKosqfLgE6p4Ky9Pr+XUH76IRocicDmo+kieiPUf1
pSOJUgzRlAtHNlKMD+3k4EC4MXxTmpN1pjUqPqPVvt7FxG0p/AI+k8sDYIbSKMvTenfUI7+0QcwZ
gjhLtPv/AOqL8L//ABw9MPmlWXYr/l08OA4Uqr4K45K48VUh+1OCCvm9ipId9E5FM+FFzlR/yHlc
2itvcNK9yjthxLSJRf6wIUfURi+qhPMWUJgHcnJ4iAIqZUupy3h+1JbRGtqMGQ39wjbhI6Y7f15F
2bIKO/6geZu3jA/LH4lU4eh9ZAVjvbcdzs1hjwl2n3/9SV/6k3ZXYsOwcXFlTGyf8jUaKpfsXSO9
XZVVlZORVXYK59ypIJrLqpzFkBFmzVMMOGSzlgMu1OanNRpYlem34fPEDT2hGen5oagMRIYeKBB0
yBDHEIw3IGUt1gPUmpJJs2HctwRA8okHUKuQ79/5AfPhLtPv/wCqLf8Ax5nGBng4sO1GWJJJJ4V8
AqUXYqgVVQycmi6I95VTw5L6Uyb2L+0vpV0wFUHHitJCeNQqWxCpQ5cHPcESak8JxyLqE8Yaok4g
Pgj5lNsTMQbs4F1KEKxPVBq9JsVCMg8IiEvMB+sLhCMQwFvyBwl2n3/mOYyqDQgrc2/qu8ew8H/I
rWWXxTk/k5HhpF8SuSYY4ISht6IG09zpHcLqXmeoJ3SOnSGiD3qW1uDqiWOKfg4umNCmn4oPlbgy
lA8n76KezuRabkgYkFCADyLy0veq29zeJMtuLRjghGIEQMAArq6urqNcRwl2n3/mRt+oH2ZfRxZf
Smhf9b4Kp/JpQ5KtGQlN4enHzTxlyio7J9PAxiGBbq8U+iQ5CRZatnaAn+seqXiVfg/y7sfln9BR
292OmQ9vYm4MbrlgmNQnFk+J9yELawYubBRhvEa9LSlhQKIEh0gAFxZMJAnkU35DSmAeZW1tg6pS
nGNLAk8Jdp9/5gNOcQRgSvxPYV879xQOt3yFQqbgHI0Tncix5hV3Y+KfzIt2hT2xuDVeN7hMqBPI
Bk1hkODrtTp04qcQqobnrHG2Kx2wKy7eSEYQOkUAAADJ9B8QqQ8SqQFOaFBEnvTiQ7gF83sCEN5p
NYsHHYV0yBjzutTiQ5KqbApxZcsQnFvcoE21AHvQgJEygBf6zD4JiqFXV1QnxTbm6QMnZfNqOQXp
BaPn7dP744S7T7/+lurq6vwurq/C6v8AlXV/yr/9FdX43V1fhb8hyWVdwe9UeXZRdEQO2qejdlk8
pkjIWTkLpumN1U8C6DlUCa6l2qvhx7ODEh011QLJDckPsj6SnJJPNVVkyqPDg8SqsWVmVC3bweUQ
ScUQIAc0HqDYpxbFOPBPGh9y2dy5YQn9oI06J9UfgqcXFzZYyJVYt2r00pScjd2zTlIcJhvrH38a
lYqhKurq6uVdX/8Ajr8L8L/k3CuqzFF8/vXzHwWK+UsumIHM1XVIn8i/imNEWKsyYVCLP2KrhOJM
huTi8CWEuaatcUZyNrYF0TZzdOXJTaV8oXSASqnjSqBlbniUWP5bgsjpqRgFWJHaq+/gJ6g3MgIM
XnjVOJW+qmkGTjwTihX7vKWkTfpNicG5rX9aBfxoeFV0B5G2C1bx1y7aJogAck5LBeljEdJ3tsOf
tDhuP+sffwoUzKBP1xqDKitVWIKseZVi3YqA1tRVHbRWVInvBVll3LFUcqoPgrHwWJ7laXgnaTdi
sW7FQrsV2REpCMhg/wAFfnwv7U9SquAmBcJqnKysQWqyYiXtTurGpbvWoOQCxAuFT2lfQqRkex11
RI5lPZ1cK6Yv2q57FQWVPYqk8bUWthphUgqUYxEZt0ysjtzPUMrL5SRgqjxDLngysaqoLC6arYMq
05lUD+1dIPNVtjRC5CqKrTA9LuQSw9qMYlzndPIucuLmgTCg/IzWo2xKcYcrK1c1UDkVaqtw6ImX
MLzJbchD9ZqcOkkPiLqpJPM/lMajJPA9xoqBpZZpxSQtgv3T1B/aAjRufrDKXP8AJJOCAj8oXpP9
/t/4xwnpgZS1F3LYoaYFz8zm3e6fSAc3LIaiGGRPwW3LzoRBFBImJNb0CMQXILag7dz1TmUQMHk3
xXyQk1Pml4oebvaNy4jEGeGKMxu6YiQBuCxRGzvzbCRcE+DqUzuEzN5fFPrZDzN6QBuYuU+1vTY4
SFX7lHaG6YymWJl8o7l5kd7U31aYXsUTubhg1oguT2Ib3p94yw0EnU/+nJAeo1Mb6T9KfZ3tUSep
6acu1aJTJJsxYP2ry9yRkWvtyce5Dy5l8RVdUyD2Ituay400LkKveDf6FoJiDjiM7h1GEAZmXy6Y
khimmNPawHvR0sGr8oNRmg5gRGwMQhKUIajcsC6GggHsHwTSIjk0RTvUh51JAghiSRlioTl6yDSl
pMZRm4IFiL2U4me2GfRKoEmNrZLREWcivuXzmL3IPvUdO6ZGQOs6flJpc8lOEIncjKLEyDeGKA3N
qUg/Udc7eKlKIMoAPpBNCeZwT+WXNi1HRmYsBj+hNrqA5OkEe0KpLwBaNCCyO5ukidek8sGwUZQm
+81YG18U52o6eyncxTShEvjUN4JwBEHL4FSfb/a2EneJ7YokHRSkQCQeSI3HiZAhgEXEqeCnN5+Z
aIAHtQjGRgIigbVXvREpHSDjFUOprkRYINuiMMTpIAOWKIka/VNEXPWLEMQ6DSriREFDyyHyYDwA
ROsg5Uf3IRBet2COkiIeuaB3DGUbmMnLjtCls+mfSTUu7cl9PCiYVPsTm/GyuyAp24LRqAagAr7l
pEi2IwXUbdy0x2jLsclavLYC4K1Dbp4+9Ax2ySb1BRG3tsCa1Xk7gHl4jFaZBjeJNSp7flvvVAkB
j2um3YDW9+S0af2uBagVGGTBkLhUknkHAwQE9oRGBYH3IiMYuBdkNQAAF7EFT3YESN6U8RgiDQhD
a9QHFozI96DiNQ9giPLiBE3IAXTKEGDkFlLbiYyMmqAmiwAvKRYL08pNubnmwOrCPUOG7qBbXL3r
TIOCKLpyszoAkv2LajXXEEMQLIGReOQohDaAjKRAESbnk68ze2tESWFRJz3FFgK4KUTmDSz1QYAE
0JW3t7WmUwcGL8itQEWJYExD88VHa3ACducYjC9CtuEADANrg5NclXY0ym/XGNItRwfgpS3tyYAF
JAGRly5JySZEvWqqdVbYt3Ixi8tRsXIPgpbR6auYyrX3oeW0WDEkE99immesYt9C0xJJqKkK16EE
e1Q292MJE7gBcASEWclxW6kdv5XOkdhQkCAKPHEdqG0H24UB0SId8z3onfkQ4JMhIu45IPsDcIJJ
lLMWUpHZ8rcBp5VIsa11c0GBB5H2LS4pWJoS5VSTIl3cX7EDEHUDycEomW2XLAmUXPaeaMtwAeZV
tq1DbkgZzG2TgSQfBlLf1AxA+YGhbtQjJwSXIIMSx7UBPZ17gjq6xU4sETu7R9OdI6IdILFnLup+
l2idEWcm4DP1EKcJyJjERAqbk3Asjt6hISGuLBmejexEf2fpWxHbIEzIsZFhbNSlE6XLHTL5+zNG
EpAS/tlm8UNMotKwcZKWrf29qEbyJcnkyEIkSg7aj29iO3GLB3Bd/gmrowIVZEgWrV0CYkgilwhL
y5EEtqDSH/ZQIjKM44fKX+hRlt6twyFXEuk5FxVbfmbcown80pEl/BS2yTLboQQ4A8QENqGuZI6n
IiHwNExlpevU7UQlpiXwoT4YIz2xpyb6FKl7mik4qOaPaumR1RGnSRRzis3TmvIJzFojF0WoMhwo
q15JgGV1aiY7sds/W1PXsa6eUToGIBD86rRCW5rPyx0gD7xUfN2zIQoQz+1GAAG8SGjY+KlL1Wkb
ey8jtiJMiM3CkYwnsSZosHHgFplsGU3cVBHe6fZgNgioMcUIeqEYh+rc0gSPej5J1RjY2f2JpdJ/
sgVRFGH1qakNEwGuDX2oR1amwxCJMexw6iYuWFcgqmIPaAVWJIAwIL960aIibOekS/8AxJ9uMRAV
BLRJ7tRXlwBO5kVMQiI7sQJci+ClCcTGUSxBXkyYx+qTXSnlKhystUu5DX8kbDNMYghsl6cgmAju
QLCr9Q4boHpwDrNROeeTqW8fTkbECBJ5SoT2omMZ7cg48yMgQMncoajqEgJAit80JGBlDs+lCEXI
54IbkYkkF4jmFq3oTm5dyDp7mDISOqOFA4+hEkkRHzMxdQ6ZSINTqYNkzfSo7vzyiXYs3IW9q1ho
AkNGPyMOd37kdwTESatK3TzUtzdYylXpPSaXBigICcyzsASyEIbM/MuSxAI7xda5end61YEnMqe5
ugjcAJgC0Y2/WBDIzmSbE2NX/skrentwfbMiROR0Rr9pNPc26X0SE259JQhGe3MMRKDGJPjdSjte
ihvx0sNILg5khlD059J5e7EvLcrqPtVWq4i96qO3tw1dgH0IxIiNRDMQ/gjOQpKJkAGPyllolTKQ
umhNmwoboSmRDVYuO3CJUY7up5YCQNM/lC0O07sZVbOkSn0QLUcAnDltoTPRCERCTAnUI4kygEZx
pRScmkifao7xm0YBiLmuS/5ndl5WkFiTbBgobsC0zaJIJLnFnXpznt/RFMZAdDFyL6hT2qW448sy
j1PlGoW/KUhQRLA82UJ7EhIRiBIuBiTiykdyBkCGBBF0BtGMZxLiEi0pPgETv+mloGkagRnkp7+z
6eTbj6DIUfnqovJ3PTgSZtW2BGT9pdNCDyIo5r2GqMN/08ZyAoJBm50Rl+7w26j9rEF/ch5UJ7j5
Cx/uojSSTdwtvzA222kXq3ahMTj1TIG2CdQxcr95I1E00kvVmUYEiMgQQSOnvUYDdgd0Vlpcjuov
3ifqRGMQIyjEEzIGThkRt72+TIUkYxFlOUJF44M1O4ogyqLAIwlGPVYl9USiYyeXaFLUXmb2+hSR
GZRR3SdO2BUn3IACyYCpstW+dMmcQ+sVQMMuMduIcyLBlo2Y6ht/PIYy/QpQ2r3FHI7CoznsndlG
5kXdSE9uEIGkoM4WjaEIxNhEP/idGOoF8JEOpaiNtxWBdmXl6YO14W73TmQjyB1JwYgjmukDcb9W
QcLSYyEvrEpnkYioEQAfajuHb1bf1dRY+xlXYMdw0jIEs6aYcHBEQcPQ1KO8aF2i3NeYIASMdLPQ
cwmjIx7EB6zbG5C2uIaY8GdD931R22cRjQHxQ3NoRif1m1FPLek5oW6fciZF5frG/emN0BuDVD2h
CQIINuarcIzNgHPcvSRiSInf2hpf+2OG4dTftJaiQ4upxlMkGsRg/KNFLbh6aQ3CK7kZY5lCG4Yy
hK2piwPcgZsYbYqxlbsWvY2YbcY2IFe0qUPV7gjMfhbkjY96lt7hO5ONNuESGI7WUvOcbZLkA1j4
qe5sONomj1XSA4ob/QtvdlFoag4kHDY4pvT7eqIAcwAIfnW6PmNK4IYiyEWBjgcnXmShrIcZe5bf
qdnZLx/EhqbPFHd9SA1oaad2K6ZShF3jGv0omG+duZbVpoSj5nqZjUNJADg/3RRaPN3GILSiAInx
kCEJbO4ftGkgfvFbZlKU93cYGMZG57wy6RKO+WOoykWGL1KMDEndf5wWHjJGEdrcLfWDyfvC1HZn
A/Zy5BETmIzi40TBp3LWZwAkSSHZnrSicAAzAOqQ1EjOPfinluu1A4mTSiiTHzJOI6j0gtbBPPcl
tbkgCdtyGBwW/Hb35GWuLQEmkQbs1VtiMhPb9RGdDUwwIryoiN6flxI6S7EFCG0+4NLy0y1dV2Lc
nQO/GYAN3IpiSEI7W3KW3tgtraQrgNSaGxpmSzQixocVDcBnCO2IxiLaWDUQeZczcl74onUZAl2F
We6iNqsj8ox8EP3jVt6g0ZViC2QRgCJGmNkNYI3wT16iAP7oRjuEESIOo6npkbow2pkR+s8nB7pI
ebGO5E4AAH2JobENuWciTRHcG9Ey/wDDYv3OE43AOUiyjPb3oS3D823QN/eREoQ3S2BZvulTjM9M
6VY/FVLg0OHuCYRi3afgtUNIkMQS/uQnKsne9bqcpTjF5NpJL1xpDBDbMhpJvUryNrd2h5hMDIki
jXWmO4JtRtqpPiCU+oRmTfcnRvBAQ9RtxL9QBlNz2CKmZESFnAI9hUkz4WQbqljEW7yiCWDvpFk0
RQXlgEBtgT3ZBxuGoY5LVM6pG5PD4rqNeSjMdO/6jpgT9WOMkNj9529en5wQS+LqYkJTiAWlEGpR
G5synOTmBMmAHOiMpAGUi9CGDoE6QQeoxUfUSiZbYlWQBN0IyrPA1iQqfIWauCjKOyZarHBao7Mn
F3/1otCQGRBP0ptMmFni/vKbcgQCKMPgiNsE/wBk0bsRhoLmg6reCYA8yKst3zDrG01TQc3UfT7A
/ZQvLMjJWunkq/LGpCAZnCMI1rU5Iuq0iKovT9Un3LRMNIXTO8MQfoWqNAjCAaAoea9HuEh/3jaA
jj844btTISmfecEIxLAVNGNULN8wBq4yZEdIi9NO2A3eurUCbsQx/uoMSInP9DrQd7ZjEV65NXvC
MjAEC5DLVuRIjdxipCLmBwJwUNBk5k1aqQiKCMCOkY3bNb0Yhgd6Qljh7FtbkpaZRfqtUnGMYlSG
ztnf2j9eMZAV+0yfyZwldjj7SoD1g3dqcQKxgS/aHwUd3Y3Z7W3IAiJBle2AZbm7DeE/KBlpMZAl
sqIax1PQEImJMTdh+latuQ3IxuGaSaroSMicgCrv2lPKermVN4mbhiQWW1KO/AAxOIBBbmpQhM7s
idXmG0nxwR3DAT0kDqAI1ZsaLzN0adqMSNsyADuX6QPetRBbabUMUAR0kgjCh5Jt3XKZDxY0pdyc
lAbA3Az6hI6nPLGyjqMtVqENfmm3JCQxsaD2KWzECNX0tEPmackS+vZPVDEtlK1VOM9EZGoBLMUY
6I+axDxJaT2OVF5U4iO5JxUYY3UjpE4hiKAFjWoAKhvD08ZzgekESZ8+5b8vUehjPdl1bBjAxEJE
116WdDdMTDbYCMBSMWHU3aVLc24mUBch0RHbmxFXiQ/iFCR9Pubm3D5HLhjkCVKR9NIEmg0lCQ2z
AcwYj/tLVHaGkhuoxkGI5uhtny9qV4vIAE80BP1e35mIBJ9wTwmZAYiq3NzchLcnKBjt0+WRxRE+
stR6L6r0IhIsa9qHnbI2om8h1Dwq6EieQGnQ4zZCsztkh4iLyY5FmUo7MJiLn5g59gXlbMZGRAIj
EVRB2dzVHAxqPFHcO3uQP1ixiCO1lSPlnCspA/ekUJbm9OMnr1fQAidneMxKug4MpJxJg1RwEXYY
yyC0xA8qQ6WzzfFVKpVfrFVLqMT8kazPIKe9MdG3DTEWuumEJ4NqDroBg99JCO4dyYEa1AKrKQBy
PxZFyNyZvq6mHYtW1M7T4OTFatncMoyLaizHs1OnnIyIoQREj/CtLBgaBCcg1WxTyGmIN0RLegCT
Z/8AUjIziA9HyQ2vTbh8yUqGIHvZHdO5LU9ZGsif7LrcnpBntkRBOUsSpx1MN0vIDFkyfJVuhr6T
LqJWgB5Pfkm1CL3QANc1pwcOtIOs4gIa4g6aArVEaeYouifaFVejH/z9o/8AbHDcMSBLXIkAAOxQ
kQ4DVREY6gC51SERTvZCW8SYSBBjEvKBzDO6Gz5ctyAf5iwD8tN0xjMPcCvvKlt+YfTwI1PIAGmG
ooafUym4ci4D/ZREfUtGjgiQiRzqo6DGeoPqg4BD9pQMKamc1BPapgbcS8IO8sitw0H7YkWp04KH
lzgCAfml5Yvgyjt+cWtPRMmHNs1MUlGMuk0JIPbVftYaxzNkNva9QNsMAISIkWZqOVIahNoTGoDF
nY1oiZ/Leq6cvlRIjpBNKLzREgxoTmDyRn5geIcgjxVyHWl0/cF1SBGIUBKUiNuOiuQLgKcKBwXP
dkiTsjRtxiNsmTE6QxNittwI7YgATGuqrvJ0Zwj5pFCW1APyxTEapkEAyFQGsH7U210zYDpYSIjE
B69igTuS79gEoDzZHL9gq7ksvwFHXuzMS4hq2gAWvVGcdRkR1ERYmlWghubemBjSUcbO6hESMzQw
lYd+S0zDOQA5bk915URGOmgkJStk3chGLQkAA+o/Qondnr3i7gCgc+1AHcjtbQ/sEhlE+n3DuH60
TExHtdTPQIwp1VupThsx3dvbqW6QCcWQEvT6t3GEm0drl0AdnajhpPU3Oi69qImL6YxH+MxQ2/SQ
8uFp7s4xLfZjF6pxuiWyflkYw9oai1fsJQDDVpBqewqW/u7m2JRDPoMQxtYIGURpj9ahfvUtMRSk
g2fJMaAYGI+CeQMy7irIkAgXAwHioShtdUKiQhX2KM9/VOQNi9FLZ3IlpC2kt7Qv2m1uiVukxFsw
huQnOMSPrAE+woThua4s1iEexCgPTb/R1SxsmFFEGxtyIWkVWmPec+IlIde7WT5YI7AIM50Z7Jwz
5hUmx7Vo1GYNw7ra3YS0yHTKlC9VojI+YTWQy70duDy1YmrJnlpd2BIQGskE1dz4Ohvz35wGEQXJ
7EdqG2ZVBE5FjT7K/ayJF9Nh4BaYkA5GiYobkIxkRhMOtO76WMgbtKX0ujtbW1oiTqI1PbuVRTJU
qtIwuoQwJqjMigFES1zVuFDRE+xMaHAhdRJGNUWfVgaouKFUuvRh6nf2/wDGOG8N31EdvcgZlpR6
pVNsFQuDXJS6Hm7xiRdSkfTz6sBEsOxCPk7kdVqEVTDY3C+JBTnY3DzIVfTzIPJER2pTbJDV6edL
Sp9Kj6ffBju9Lj/UtUp6pSjECAoKcypDpjAbnTGLWa/NNu7kds1pOHmG/sWuG4JRB+YARDdmC3D6
jdZyNIZyfBEn1AiSD0yjJ/YF+z9RaIoNoNTnNGWqUtvTKEWiInqBD0NUfMm2d/gpnY3tPqoA+XGD
tKIFHyQ2PUU0zDkVBat1ubImdIk47DVEO4NK1opbZuQwRB8UGsODv3K7leVIFsC+eYUNoyoIkSo4
rZa9tmNSxIwyNlqkQQRJwpB4kRdhK1C1SFAjb3CAKNvBm5PggTDdgMZDei4Hcnj5swMY7wHi7JzD
d0CTF9wGIsTTFbW5tbg2d7bg83OqU5ZE+xDTH9pIACAfqNrJ9+EtskUBBDgID1NdtuZI8ECGlCwc
BSAABAxAAWkwhKJHUKC+VE3lxk7sAWyzyUo7O3HT+sBgtI26HED9K3hNtsSiA4HNGEpkyZ9UT9Cl
GBE5GolMEt9CPnbgIesQRVPtT8qYyIY9q8rcnOWIiC7m1qoSjCQh9ahv2LTtSlEy6RI0oiPU70pM
GhEScN4FE7e8YTF/9KL9n6gTkcyfpdR3PWS26107cAD3yTQ3R8pnLbmC+mOUgtO11AUjUfSjunbM
YkuRqYHsdHclCYkaNHqDICUNLZjqqvLjvmEBU9nYgdskSgLEvqR7FGxcd6MiwAPzc8lyxJQ2D1B6
GzFHb2vltKWJ4clDzS0AXkeSI2z8uHJSkRKpLMCmkZA9h+C/YAzNzRqd607n7ORu7Gi8ue4NyMWk
IgCJ9iI8qQGA5I+cBtklw7OmlOLdoQEJQ1xqBdS8yW3u0DGRqG7LI6R4VVimESTyVYmnJNxsnZsu
A3No6ZDFnWmchpxYMT+Tpe6Et2YY1Ah1H2Jtj05kcJ7xb2KI3ZbcRaIFG8Aht79MRIWkOS9GQanf
2v8AGOG6CKCcmoM1rmNMTckPVCMZGRj8jDBNub0xMGpkbJhvTLWckM6Mtqc5BqAEh+xHb3Duaoms
ScV5k/M8sRINT7EPU7ZlCBNQTqBPNaN2AEqx1NSua87baUYiDtWwUcAAPbxMDEmAOohvaobW5tsx
rRpEGtVKURJo1PJaTGTtqI5LpjI6a5IQlCQ1Hv5LVtODStQhtbu5KW0BYgFjycKWqH7QhtZyDrd2
d4NMwGkHHqQG2Hc0GKBDSJvR270R0RZqkZpjtxkDQSC80QOklnQMwRKQBY5JnQjnmiNwuDgtUS8T
bNDclExJBAI7VHX5RLm8S7vimOg0L6IkGiZot/aBIUzuCB2/M6XB000/KhAaDpekRT2o7nSJwkCD
iByU475AiIE7UpNfAInpYi9F+zIBJqDnZeTMgShRi3bdGeoR01NBQGyg25qcPEECoUuthJnHYiRu
NzQDiYlBwD+rmj6r08mj8tqOMFpBEJgfMX9inOW7GJehOJKOxoEnl85FaZKEhplIHpBFSUYEdkZU
KM5QAx0vVTO3BgKEVonmNMJCvS5UZEPDA/FQ25RlKYDUR3zt1kLGotYlPP0m3HsJC1DbZs5Fu5aT
swlEfLmhr2TCM6SHuqEBukAShUVPuUR6ciMxz+KOkuwqyjY0NCjY9qJ1AtgKLmrrmqo7saTJp2BT
EayjEuBzCfRuDGhKruSjyJL+xfizPYSPpXTOQ7S6aW5Ip3r2r5m7186IjNnDFrsnJfnwoSgYyIIs
xK8v1UfMDMNwBph/eiRUPROmeqrU/kOU4L/kF7ojb3JRBuAUNe44H61So6I6pEuCbUXl+oETCrNQ
g5r0QF/3jaA+/HhulpSAMqSbNCO3HSAMU8SxnQyGAUfWCRO5tBzEl9Qdbm7PceG3AzAkXBkKAJhu
6Rg0QAtW5vbY3ol5GUXlIYA0Utvc3Y7B0l5zLAl+S3do7nnV6ZwPR4FCMX1vUmka2AUYy2wMTmQt
colgOmIwR2px0TJpMlh2OoSgwmDpntgUIGOqyn6iena2JBoj60wO3BSkZE7wm0aD5YqUTA6AOsj6
U+uw+Z1GIDxMgHfB1QsSCxyLLq9UDQua42VfV4NjfO6r6wuQ1/bdbQ1eY22BrzqVUEnABTMelqEm
l1GMg3NAA0NwjqJ0modY64fKRcfoUox3BRtII9hWiQIlzFwhEhiBe6YFnsSadiYyFHo7VCrLdhoL
GIESzucexTI3JTZtOoB27looYDqEsEdmctMtwa4SNteo077LcG9GWqIaQIAkAOxB+79CANWuclU2
5I6aclo3ImZ/WNx3oQLiFSWv7UQZEEYmjLd3Jbg3JgPGLkFCI2zuU6gQAHbvJUJbbAkOIiMiR22C
MdzakASesfLai25+ljrcETAu+a0HbmAT1EjFUBezkYKoOtqEixQmZCUg1WL07U85MMxio7oA3Ilj
IxrZdFQcEak7lxAV+8j50RqJavUjGQBAZgtcTGItGIDeKLMRgxQhOOkn3KGwNqc4xrr03OJWjaB2
NTBzEdfaUY7hG4IUMbP2FS2tnZ1ay8pUfkvLn+z0Xeql6b04G5NqUH0pp6QbM36FViD9YcK3TKWz
uSaLdBKjvxNd4AN2BNcdqxCpXvVArKypFWVRw58HVQStWGHARNHxbg+HEsHTswV6qq5qJABdUiVQ
soFydFuRTaWkcQvQkUP7xsuP/Mjw3R6rZEeqTSjXE3UpbW0PJjfSBq/7SbbifNJI0vbtCG1pmNs0
oGoVLbhtSkCGLBdQlGQwkEdJLnJVDHOxTgOpGIIMAZ44IRE5Fr4spx3YSIBpIxMQ2CcmpqxFE+9W
QbQDZSkDIN0ivuVGBzMwpbpMTCVCxcugdAeRrWwUdI+uK96Al8rV7GTRiSJOO1lIxjJ5ByLhgpNr
AlFyABYUWzH04JiYFpSHMliU4AoBK4GNkx0VlpPUGs7qMpzEAxIAOpiMFF6sn5LzBJolnCbLHkiD
SbECWOZ9yB290anYg8lLYAIkDUWDBShJzKJY1pTJRjWgo5RiCowwW0RTSI+915w6d+EXE2qQPmih
tNTb+WRo4Nlp06QbyClr3dMGYRuZdigY/KCLqQ8uBaJ0lvrZqUjGEDgYhkwkGK0yMdcqkgPJRjt7
kYTJ+aQUtue/8hYsdJK/Ekf7y3tozkTKLuWZhdGG+DOMJa2BuQEZx2xonFgCbSzQ2xEebqczcWyU
ZR2x5YHVF7lbuvbLn8JlpjCWrHLnRbm5tR8ucAJSkQxClGnmaXk5qXCn5YLxcmQsAhGBJnIfMSzE
dqi0+vmBQIa5RhE/MfqsMkdwRjOQBrYnwWk7M4yAahLVUZQ2pnTSNQzLX6jZmXp0kAArTHYnuSar
GnivN2toxLuX6qLz4ggg0DMiN6ESDJgSWIdFjqjkSHdaJjH5gqVC7FzdRgS8YUCoWKYiqyVIvzsq
RCZ27FcuqlVVVRVrw7UIC5Q1BSkQMgeZ4NhxOkmUT9UXWkggi78DA9xK1SLc0ACZckduMGar4oyl
Fhgc0C+l87JzU8l6OQjWO/tOR9uPDclPfECJyDAObo+Vu7kiP1QwROzIx3AXM5kuhtjc1RJpj70Q
SYklyY5qMfMnrNSZNMe5Hcn6jZhgTpD/APZUBubp3Zi89oFgjCEp1GoagPoKhubewQYPHTeEgTcu
VuSPp9uOphCQLY4gMj6SGrTty0EFtLilExMRIVNbvWjIj1MSZfVnEUHc6EB6uBcv1LVtbsZjOICO
3vDVC4ALVrkpSHppEvSIL+5Ce36SR3DeOXsQ29yEomvTLssmj6NmJ+r+jFBvRtTL2WWifohpIfKu
ShHciIGUTrgTatlTajIsG+ldXpoM8rDD6qOnYiCYhqC/1luwfTEEswyClF30jBANdapUYsgImpyQ
kzFmf4rWYgTGPJbeknqJie09T9iMSXkMU4J5soyu2aAyEdPddR1B90iQcinU6Z2ApyQ0lgSzj4LQ
TTU7tiFa8tTshuAgMKjNEgMwcjFWUN2LgmLkvioEyOnUGIK3I0mQRUnJkbFzdwt1iAPLkpwlIaiH
Dc6KIMzMAvQG6k0SRKjrbG3GTW6qDxRhIERlUTdageWlTi5OBdRETcB9NFojI7cZXjBh97NEwPl7
8Kgm0lOcpReFTYE9iO5EExi2qTOA+aYxB5Mm0Cl0AQe5aoi+B4PG61Gr8GWo9Q9qeILGmlOLGy1E
diJzwT4ppGibA4p8Pya8aBeZuNEZE1qgasLITiMaFPuEEqPpomtzwqHRaxTkVQO2AJYSlgpb2/vR
3JyqYxYOhvb1WqIXcpoRG3tvYX71csq+xPCZBH1TQoGcnLtzZTEZtuBhEYlHc3JHyxVsSvQ7cY+X
Eep2gRjL9pG/DeB24mEtwkiWIdA7BES7nbN+5EEP2p5bYfsTmF83UYRfbBpQv71oJMgbl6lbs47Z
1QiTEubhAf2CjEF5R+YZJ+a3eoVkZE0xVZRJHNadsh1WtCte0ZRItpLKQ9WHMCAJ5vgVq3ZxhGzy
IRlserhDbESGcFjhJR1epjvEgUGJb5l1bkAHNjhhgtU92JoXbPwRj58QWwLLY17nVokJF/mOa6t0
CkKvl8V879UqVxFu5NroYxzqAaFepYV1Y9gRlFg92ui5RILH3oiXgogHvGKJxGB8Vph0nJSjIdQv
gnFcwoyjIAEA6cuS2yYAgCsqZqQ3IjRpkQwBLAaqK7A2KiC8o4tmokT0gu5POuCBlMlnYjmv2Zt8
yYimIFF0WOajtD6oYHNAE9OABUZEanoc3CGoGIK6JEuKsSMHXUS/OqPU6iNyJ1DEG/cUGmfLgXhE
FadwmUciyIAIL0PJERDiVyUHgST8sihuiQArexQlGAnEhnicluzMGEWLDMqUYB4S+aMqgshuaow3
In5AG9qOBxZCBgTOwLstOmQIpcIziZAC/Jaoyq9kQRfJdW2XzDobcIkSOMrKUJROqOVR7FSEiDix
uhNpD9YG62yLVcLJMq0QchxcIgWwQwTJsMOFk3ASIdi7I7k7E+CYlGMvlwGKbbgA1iUd3cLyKBIZ
+OgYXTBQl6lztvVi3evM2QGAHTn2J9uhxBR3gBOA+Zi5HciGqq0BWYQMYa4xrIO3tRENyUI/q2Ze
hkS4HqNouf8AeRPDd1FwNyV+0rVtvEjEHHkhD1HU3yzxUYyYRl9Y4KTMdN+zNCbdIIfJBrkOt/7E
vcq4QkvL9OYz3SCJNaL0rzQ2N2WmBo5wdGZMpVIdrh08IGtLISEGnI9JerKJ3JtGgkblk+1vTIq+
ClCO/IDc+Z2uE+6DPF3JWvagDHNveh5cRGe1VuXJa971E46S5iU0N2e5IAiJLMXU2kZSgLDIr00a
kaJaeyrp+rSIwpyei6InVql4tVCZjKPSCHyewW7I1kSPchVjchUo9VKMKg3fJRjLpAPUTc5J3cOA
M3TYIxBAkcV5oIlJhEjEsiCGIf2LbOJgPcF6cwgJDcnok72TRp0S8dJTFUwVVplUHuTRpgU4Kd1J
7ysVAi4I8FSh5IQHUQXc5FHpltvRxWifVGQpi10AB2lClRR00iw9qpKK+cIapdOIZaRQAMEIwuLg
1ddIMT9YPR82Q1zlCP1v9Ami8CLTqX7UW3AWtdUkCMSHQkCIkVB7FKQlFheRLKQ3t2MHkwIPvX7H
c1NUTwfvQmdJ/WIo/inBeZrdDclF4iRo6E47QgcRqd+1GUJGIPzHBCUN/VM3FyhIy1EG5Wa5ckGw
ToexdVI5qgL54JwajBUKqTwpdOQ4CGmLAKwHYmVQ4GCePSclpkajJOLYJpGuQXVSBLnsRG0+kUcr
zoReDtRM7x/VlUIziGB9qO3szMYmkmxVQ74phfBaH1yLEnmjGJ0xNwEcALL0Q/8A7G1/jjw3jJwD
uSrnUqhdTETGPljU8ywUf3j1IEgGaBf3owhu+Z6dwJTapitza9JMD0+4B8weVE+objt82DZBbsAT
CcokaWcHsKJhIwlalCiSXKGq304KMt0mEjAnSGPUjN2ELOMeNaIvIBqGJXlTk8YikwHKJ2HEJfMD
SudEHiwH1hSi3SIGkRpGZxKiDttFxWPMqcfTNHSWJqXHN0BugHSCICNLrc34xLwkI6SHojKMyJPR
lOMpCYlEFpSsX7VKQLhgx5LI5ojc3PLiAOoglzjZSnHdgwJYG5AUN6FNZIlSxjZu1be+5lEuJECo
aioWehHJOCyYyIp7US7Gb1N3W3GxEQ47gtva3iTolqiIlqowFQxA7wi+bMqrV4J5VRBlese9NI0H
tRMogDBAahpJcuhINy4GqaQoE+kHNS6SLW5ovKQbsUdG4SHqDkixmZ4BHzISjMWaV0ZRjIiIeXVZ
Btsn+8vOJ/aD/uS7+KIjtaSK6nrwgYljGUh4qp0vcmyAjPUT8xCfUv2ftVSVqgSQKCqaUJN2Ji8D
4Ih31XdB4iNeoxpRTAEqnOhCjKJMgRTku/hRW705NPajpHig5fgAKng+HD+0bKqpdVoUTmiVgBwI
JOn6q1V1m6lumgYgPmURjmpbT0lUdqYjCy0RpOVTyCv3rVLwTCg4FyhLNehBufUbQH348JDcAMYT
kI+KFByomHsQAuFIxtH5s1q1Of1bOnjGUuToeRtHmCmnARGBoCPBGW3IFg+mqEDcSAPigAKIbYj+
IaNVygfLIejmiec4QAqXkpbZ3PNcP0iniUwqO1PKIMi7vhkgNuEYRasqP3LTGQlPLFapEtizra0y
juRIcCUajwqhvbhEZSJHSGJbErzDE7pj9QmhXqDrEPMmCBKjMGIR3gRKcizRDx9qYzgDlol8EWnE
t9URkPoUdJAMokmOLhAkfNUNVCQYmJcA8k27tiROMaexHb0R0Elhk6jPY6JEHXGRHzVPSg+3Iuxo
H9yaECC5YsUPMGk5G9VAnJbe/GAlLXpa1C5UZYyqe0rcjlIj2oNUoLsTYCgQB+sHHYn8AuQWkHpO
BRfqi+FU4N00gulmK6aH2KhDpwHOYqtcyTIvyZlpBrkyNwMUCRqAwNk846Ik10YDkCvL2quceVkB
ukgGzVKjA7jNJ6hk8N6j2Z0xm/cyEgXawwRJg3OK6J9xTCWkiuYKlG7AFwUNe1qk+INu1a9ZjR2Z
0D5wY2oqdcf1hZRjGBIHJapjpFmDeKdUVSgBgiFnw83cLSl8seAyCrU5IykXPGo70xOrkEAKRyVq
oaj3KMRQZBNFR2twkTl1E9uacVJNCOaYD9K0wI80gAY9qeRcnFBxRAAdypA+5EEgEYZr9vEye1bd
oQMY/shSWkUXoRERHl721pMRU9cb8N3QwBkSy29qUdMiSBMVweqiYy0TNJG4Pch50jE4MtUpAnM/
oTmUWGUSUBqkSagCLJxCZZwebKJjsmpxZb0tv5DAgjEZqFx1B/FVBPaUd4iMIxrryUtvZJHp4AkZ
yNnVyRz4arVYqocLpdrKHqYx6o4Wd0QC2nqbtKlKVcsUBF4jvC+Y+K+Y+K+Y95XzF18xL3W2ZS0z
IdwWso+WRuQAIETT5skdW0dvcYtkSyfamJUevxRhiHB7lzzQ0yIAdg9FISGtwwcsAVtR0xO6dVYS
GqLYSfNQjqLgMQQXBtUKO2d0RGrUCGJcP8Vtx3dyMS4FStyQNZbkmHImhUNQEtx3gcxKilPdixMg
dOFh7FOMPlc6SMQqWIB8Qok/qiiAfsTeKdFxQU8USHdiG+qSoTiXDDpsxNEdYLirokzZmYlMN2ua
lGU3oGPeqxHbGmKMmEjYA8l8p0mpBTSGZPdZEiLAc0ZHAMO0ob0Z1ZtMgzIjQZEF9QqGUtJMQA4N
Q6EtbizSDpt3bf7J+K63jyIT7LjdNHeiNXPyptOokMWwWnd2dvTG5Zijs7O1GG2ZatX1v9SbUWCl
F9QmGKYmUofquvL3Zx1Y6wz94CeDGJsRwYU4ZKlEBMtCHVI8gniWiAwTCpTAVyTjsLcKJvmlysqm
mQ4OT4IAJyaBeXCNcZEpxRdUn7kIUIFjigBDTJmkTiUSvMY+WLyQkN+IZpQLZ4FDb3CIyhQOKS7C
FpIbChcJxQjFGwmKnMry9t9yQJJjgO1eiIpI7+1Qfbjw3CS/VIe1PE1wVf8AQrUSSSQ55ImXS4o6
MXer0QkxIs6IDB7DmvLjCURaUmZSG3OcYyoWJYrXtykREas2ZPu7mvBjb2Lb9PuTbbhQRGPOWakd
zcFmIALjFCUS8cCq3RAJbELkgRe7960yNRUhbkgHEiPB0AKjmrUuyZmGaIBTkgMmBpd1pNiVt7E9
uJ8pwJChLly+as8RYHBCLUyvitQeHMUqidVS5dsSmmTIWbG1ENMj2Ecl82HeoF26gS2QKO/GIMZV
BIriPoR3d6MIwjUlkDGMdMgCCMiENqAAhGEZBqfNeqEtTkMQ+CMyQQbDsCGmpCiTGumNO5QpYV8S
nMaJ9LJgEXGSHOpC1ajEhkTr6SxdaZSowYgLqqApbgmWwiMDgFHbfqxJzUnoTYmwZROp5UchViC/
cQF0nSSSADyUd6cgAS6EYl2k796MmaJBLqOqMSJYEfSm0mBZyxp7UJ7e4JHASGCl0ansY1UXBEvr
BmZCEZOcMURPbhJrlmKJ0aQcAsVmqhuPI3CcFwceFKKq1Gy3cDIgdyYB3RluD9p9WKO4TU5YImRu
ETI+CaNI/kWXT8xTlMeFON6rRGR0/q4I64nuoqSrkURsg62dn+leXuxIyJTxk0hZVpO1KO69DqAk
DvbVQf7ceG7RxrkD2ugS42jJ5VsDkonaDgi5KB1CJuCA/vUtz1T7hJYWYDuQgSDCP6vudERLxwdS
86BluQrt6ajvQgJ6YzoZydo9q07cvMgwkZgUqhAkRctI4Lb3doAhgJmD3zKiCHiCxUpjplIAFq0F
l5W4BrhJhziy1eojM7eE9ti3aCiT5pOAJZ1KPpvTvI0jKUj+hbcIDQdsEE5uXUvL2ROQoZVLEqW0
JagIxMjkThX8iiMXZ7ErTmPYUH7lrIqLoAXPudCQwRANqjmVXBWTrUhI2W7C4gQB3uVP0YoTQyyI
aS2ozDEREK0cii2zbXtCnYSEwqgVVAkxLBr5K8V80Qe1Hy4ibXMUdycNMRclaRKFUBGUdw3aNUTE
gmpMbGnaniCY54IReiBBcLRE9+ZUS5BAOogYuiJTJif9KoQ3JHyzmm25uSXIOSqKksOQzQ2N3b1x
FH7eSlokHBeiv0saKMpRB+go6S50v3KHLFTL9ihGcRISu9VIiGgxxjRSltbj6hUSCjEgGjEcwidO
ljgqSqRQFPf2rqg8h3KxBV3OSnEhy9OSMZhis1aqc3UgBg6/eNyoBaIOJR3CKm2QCqWGQ4aRYfkV
RIwR4ObfkNw7VWnCWln5p9wkkJ1zXo9RqN7a7+scN0CgM5HvdB6jFSxi1HwVUasXZEkiRPzAIHMs
eS80VBDHkVIwLxlfuQ9OX0lz3m3gi5tVCEDphL5hmyBFMGsp7JyIHfZASFRSR7EdicgxtqsXNluy
2ajbZ4/ayRM8LDmhumETPUYmmS3ZQFJgEiwCnKdIHqpaiEoV1YdiBZgcTmgRF3tZdUSBzGKImLhg
EI8m70IyrIBwRliiAbYmizIDd6zJNSMcUGFBbJxRGJALfWIqgHLpjIXpmjEjuzW4d5g8f2faMF6g
b7vMxMAA7kUKntx1y3iNIMSBFpMfcvST3CYu0ZRiagszl1tP1NtsDmRJDMp4OTiyqGWmAfEnAI+o
mwhFwSZEVCMvJEq0up+S23rwwdNKcZwxiBQphswHMBiiYkg8k4kxZvFAiIMf1cC6OmVSSQ6eXxTx
afYa+C6gQxqCnKAyqiR0yAdxmo9TyJLvdk98WUy5MJ1HJEnqFmRBFygJRcadNFtaZXlRbpegDhbU
ravYtwAv+socsUwFC9bFSqQAWL1Qdjl/qKgbRarrQQ8WdSeIB5JwdI51RGpq3Do6ZA7saxrfkjEh
iCx4yMQ5kGQ1GgsMOL4myY8GxVTUqv5YAuq/kV4OLFcs0KOV6CI+b952Xb/eR4bn25e9NigHqEEI
D5J/MOaNy9QRgtyMg7D5nQiZAiVwU+2QImpC1DAIE1Mac0InA0WTITBMVr26iQrHmhOJLihayJjM
GRYB3ehqCtQP7YlzRqKMTPRJyJNzsVoBcM36VKIJL4ZIP1AYLb2dvp0XajrbnDc1bxcGX6uS8rfJ
MSGJxd7oAnUBks2tyUZvV78lfqY/3ltyiHcDVzTiQ1dMgMnCMx8oPYgC4BDhkACQmuc0+OQQ11q7
ISDECxZ6FQlF4kHUTzQO8JbcJAyMgLAAAdq9IdkER0THVdxLmgZ0AwVKDJVDrTGIa68ibiDkgYOV
KEZsTYoiRJbFe/i3GhQLVBopbsjpnIuWqFEEBxEB40NEIQlrgQDq7cEQclExLRdwqyftWl0+HJOh
ggxZsERqLFQjMkyBpSi3RdxYLaFmNVpa5LKYJvfBQerIESMT4hAUY42kpM8c3qjEES5uyYEg4p3J
g1ua/etqFD+I2BzVbqqrxc2F1WmXEHNMKlOU3CidqLALNYLlwsrKoTOhElyfBUDBFl6GRIY+p2af
+ZHhvAi05e8pgvmYZhDbJlKTM4u625QJlugtOLN2VR07ZBwUhMdMqGqAuCHftUjOscM0JYlyiFqx
BoFkinOGCPAC45oAVBxQcuMQqOeSOWCcglkCS4FgqlgRTtREfFOT8wsgBnTvrVCWABHJ1oBOrCXJ
Zxch1IR6oNqPdyTEeOC1Q+Z6hRlcmr5KMmo6Y9QsMloD8xzTCpDOgCTKMYkRD0D3UBMD9iCAcSDn
4cGe6ZUuqqib3p5RpmCqHxVA/ZwHtXLg2KzJV1mnMWlmKKXq9mLxFokdUhiQmI6sUXWlu/hyXJRi
1EDKXSKxAzU9YbUMFAPUGqMTgaFSYgg3UfLdnYsqDUB4qIkGJFMVJi/JSDMEAFEVrzTSDg0Iwqjv
enBnsmpAvFVDMrJj4KlAu/jRE/qhOb/kOfyGC6iys/amEQmAVr4rmcVmU8ywyTRpH2r0BOPqNn/H
Hhvl/wDvJ07yqoJg/enFe0omQ7UNb6PrGIBPtR3SCdsfLqkAS3JfsYmEDTT8ykNvbkYCoJDBu9eb
t7EtFwaMyZowvqEjQMpQG2SYljpBIojEguswnjEsMcFpPeeS8zbkJQtWhB5ryd/ai0GjX5uZdSAl
0v0nAhARjp34ktK2sHNEM0hcJmvcKl8FUVGat8CgLHBESLc74oAnHqAy7UABURJBfEmhIwYYLZpO
ezuxPmC7SaRcHlkjKBAjF4vHENRudVI+n3NREDOY0loNVifpQ1dEgwlEYUcJpFpQr2ogUexyQDVP
ipE9IdtSLB6NqNlJvk228yQD6Xt4qtBj72TNd08K7pDObQHJWT5oCQYkO2I7VVPcojAI071Qqocc
0zFzktrd3R0bocHInAqi5ozcMMMVRahAmObLzPVS0bG3WXPkvLD7e3GkS1GHYpep/l04HevKESOv
uzUtubxnGhBu6OqgVUCaKijImuOKzDvRESNCKArUa4Fqe9E54phIOe5M7jnVRdg9wokhmxUiGciq
mxL4OgSxKB11uzLVIgjsqtRk8GqLqUoR0SOWPB+AHfwYB1WXVgAiyZFc1Z5caBZKnitRqVUMqAkr
LsWS6yXwCkIDS9jzWiTdvD0GX7zs/wCOPDfI/Xn7zw5LV4K7HMKUZdTY4omDguhCcROOOaHWdmbA
MaCnYtR3DvCgYF3Q2564xBYAEsOTKEfL29vbcORFyOaEDPd3ZyaIEYiESpgenhHVHTE7sw+o40KE
jvDfkXeO3g2afYaOcRKvtTb0B2ikvYjCI0mVycRkUZM71JBdeWQDG9UNUjEYEYKW4BqluXQkR1ux
OYT4LVi6qbK/UKupRIpPJASuBQ8kdZLFnPcoQE5bkZV+ZhEh7BQEYnUCXkeYoiIyI1dMmo8eaJlV
7jmnLiINxdeZAEbZZnqa4oSNdZaJzaqjMxeAN+XJREYgSel5C1jHILVubgG3MtOMXqRV5DJ1qcEE
0ARJABwAogN15RyFD/qWo7QlMZkkKmnbpRgHR3H1OXL5lDV4Lzt6mr8OGJ59i+nhfh506bez1yJ5
KR3JkwPywNmHJW8KKntWfYgfLkY8hdRaJ242IdgyjCEo7e3Hm79q/absR2BON2TjGNFq3HnI4k1R
OxOuEZfFaJx0y5qprgETKXUPqpwKqjpzIUzNV1OATVkNM9PJGO5F4vQrVGJpRsFDddnDhlE62OWa
YsARfFSjHvdNcjJCUSYt3IQl1Ae1MSByC0wwuU5kwVCVmnMSufNfQtRLDBE+xGPghmU8k0QqldMV
cBPUpyFSiuVijIytUsjoNsTdapy1E2CaI0jFVd1yXoSf/qdlvvx4bx/+ZP3lVTpnpwnXBdTsTVkD
F2we54PtyMTyUYbjFyK2N1pnUc+xGW4WleIBsBgjDcEokS+YYqWz6bdoXawLH+0jubsXi61EV5h1
SERLMU9ieO4Iy/VPSm3BEn2+xOCYcrhAawWGDoGJkRgBEpmrnJoqIOIehcOhE0XMGqnHB0ADUBF8
rdl0XFq/pRiLEZc0wAJFwtVw9loNAKtanatvbiZuHEIGoEew0upbg0y1S8vS4cEjCNWxUtqRlt7A
EdJmGlqxpcshuatYBBlPamNWk2oMQokAmDMZ6n1HPktepnfpxQ6nMmoLt9CEiCDVy9eSA25dDVIu
jqc80RcH/RlEs2lnxsjuCEtNovQAK8Scnqoie2YCRpI2Xk7J1CHzT/WkgIhyaKPpIfMerePP9XhX
i8Jkd5WmR1jmE29AnmCg0gCcJLXFiMQOBIDkYJtyIORxXmR69vCQw7VZUxXUSgdRfJadbxyN0P2e
PzDFSANXritLu5qLLy5TAAoBkvKnEkjk4QYkHEJogAJgW5BPIhk5Dc1QumTcGZuGCoaZJpDvC1RL
tcITdhinEX5lMqKqxT7e0Tzai1loRXzEhMHKDmIDX+KMXfsXVKowCuz3OKCJJoE3D0P+Y2f8ceG+
/wCvP3lH3cCBnwmOSkGcGxxUXyTsmKg+Y966avbFQ29yETq6YyFwebqfKR96FSCMUNsy1wlQuvLi
dLuXODVQntzj1R1ASNT4KW3INKFJBA4ppRjI5gMfYtEIR26CrB3TndJhgxp9CLFk0qkWPJB7s3gi
W7X5o0RIoXvyCEiWEqd2KDlxUNyKFLUJROa1RNDQn3I6jUhgexQ39sebphPzRI9QGkA6AcjLvXp9
n0+zr2osSY33IsWOT1UJTOkgfsQZiXTcg80J7U9O5AtHQWNC5fNSMAwu5qXUK1i4btQEXkHdjULS
TpGIwRABL0Y18ExaL/pFk0Yy3OwME+mG0LOalEzn5hiWIensRjARBBGlhX4qB9OY7ev5pTOnSEQ3
mRwnCoKPqZ/MKbUTjLPuRNZSlU4rqAgOfwTbZ1HsKG9vREYOBeteNRXg2K6SR3qpcc004d4TCWl8
00ZBjRjZEHbFavGhXVqHevrD+8qA/eTh/vIVr9pGQLE80Pirko+WNL44oBOeDZYqneqA9qeRA7U5
k66QSqj2r9Kv7V8xCaM37k5lTkukK7K6qhCAqSwClHdi+42qOXNb4OQIjhWiO2D0i/apQO2JvjLB
U6TzWlhq/Wf2LpcCxkpN1OATI0MexSiXjOIMiaMw71d8kxQ0ioF80BUEL0Dmv7xsn/tx4b5AprlX
vTp0e3hP7KI5YqJlbJMLK7EINZ1Bi1Y40Z1tne+aO9JpYMR9Cmf7R4Q7VHBn9y24mbtAD/Uy9YOR
94UaVADkXfTIqUYmgLBOaoABcl7l7kzdo4VqCovhZGP14inNaZYI6rW8EBFzX2oixHeC1kJVyObs
pCMiXFnIbsZDfk+k1MXYdSI2NRiPdzQFCDV8Yoyg/aUC7ZqMdogzJYGVKoH1e8IEByAQBiyjoj5s
meRvhmVp9JsAAGhZ/wBCP7xv6YkuYA/RFGMZTkCXIdgT3I+WRtA3YV8alatyRmeZQ0QAlyD+0o7k
doye0fqhdWnbAuI3TzJmWeqbbgBRCWII9/ChqiSeD3WkIOKFUTm60bsXBLhRjskQAFXz71Ug5FGW
3CO5t45hR3R5YErg5ICEYuz9AKecQBkbpowD80wLDkmepWQGacVTEv2LpiDzKrEKkQE3sCeZEVV5
FMLZK3F5UGSoKKtB+Q8jQKMwRHQXBOahv7ZBEgxbB7o7bgkdIOJ5lPw6alOarScEYxkQJUOS+ar2
QDE+1VBAxKYB3sqleh/zOz/jjw3qP+0n/iKy4OjdEuQSESZaaOKKPU+oOFpPtRkR8uKABZ6oPkFG
ESSBJ2OZUu3hE81DIkBbIudF8l6sZxP0LbIDvGrZtIIk41Wl+DIIPlVAoNYpzn71Eciic05DvZVO
lq9wdasc+RwQYVFCBmjKcdT18V0lxiEduDtdsFqNDJRgW1G5daWYi6oR2L5mlgQow3dyRFHldDyY
CRwMjqK6pMMh+hV8SmAfsTbe2QMzRA7u4z4BO2qQLVqpRhFo4Ikm4Q5oPgpQhIGcWJA7VQ8GhEnm
Aqx0/aov2m4B2VTjXLdfH5SE0YsGxVeLAOShHe/F3Q5/s5JpTOrEMFLbhAvCjmx7k8Y1zTlNbkEQ
g9O1fNQXWmNuNVemJTQHenf8hzQJ7lVXPhVfQnKG2JaSS3Kq0b8DEvQ4HsQltTIGMTZDcNCBWPau
eSeVBkhkhgE4XPhqjQoxEWGYUpE1FAM0dXzHLBeiGP7xssf78eG//vJ/4ijig1U5oSmKopAUehVD
ULTeRxR2yAZANEpmYIVwUSRYqWkF3vgoibxiSxIDoafl1Aajapo6HljUQWgRmMlsmcAZbeoSET90
reluDTKQqO4KIPyijoc6ITiBIFmD1ryRBoRQhE4grUMLhRL1N01KIA2RjIUFXT2wCIsVKNyRXvQo
4I+lGIZAxN78lpLPLBEAaWvmtTeGKjRs+xUq1jmhEgNniqB44IPQ25KhbDwQMjWPghKONuGuUQdw
uQUGGFUK0BR5orSHlLKNUNEBtxzlfwT7+4dzcwjYJpQBicGojr2hE8g3uX7HbhKWcqn2owERE/2R
RETBD1D4hcwuxHbdpfV5qg1diAlEiuScgWUNwh2ciJzQ11MjcJitwHuVOGrcoMsU0A3PFGcz0haY
hh+Q8qRWmApyVPaqkOrgK7oFqnNMFVWQHDTAOcgoH1M9B3X0dozW7tj5QQ05UaQt2uhHcHWasht7
raAAAAMuADs2KcVOaqgtUZUxCImWlLsZl+8elOqH14O7LVGjXVKqqlKRLXAGaEI1kzUzXoAb/vGz
T/zI8N4s41y95TeUWzTgGI5oWITgNyQTMTKRcZLSKclqDGRNFqIqUBOJD1D5KJl8xCEcyhIB4RIM
hia4IQmQDB9IgBGvNbcI7kYHbBDAXLKEoyIlthhRw1kDuTj5k4vtudLAXK3/ADtxmGnbMSCDJseS
OxJv2dCxohJixscCtuEZDcIiIgxpVrURhujTMNJr0K1YG/eotSifhRPcImwjUdqGTKTC7klRFwRZ
ESFTYHBCneEDiMMUZNVq9oXUCDfuV+5MbOECBW6qCUCae5an5kIACpsoghjiOEI4iIfvqg9lpB1H
KNV0Q8uJ+tK/gh5+4ZyP1XYeCaEBFTlGhAoi5c8D28AIzPlzjQXvTFA7pcgXJWraaUeRRlKEgMaK
EgGkWlFWaWIxBRExqbNNgoznSAvRdMo+PxQlI0NtIdT39uBiMJSoKdqIlIRY5q+qSI01zNVgALll
pJ6exEAL5SnI09q6pauSYRAayuyrxBPyhUtwoqpwhqPUbBQG2fK2/UMHxY4qEtuRnOHzEm62YyAE
YEiZzIstzUdQgTEHkFzyQEscFHZ3o+VvRpHdjb+8EdmUhIXEgaEFaNwIbkDqg98k2GKDG1l5UZCM
yK1ZQDhjiLURlsSaYvFHUHwWS83bDbmLYr0MJP8A/wAjZZ/tx4bp/ty95TqqsmZMCtVyPFarHJaZ
lgtsM7W5sv18gBZbkydJgzDA5o6OoRIMiMEYiTzNA+A5LrsMEJAs1QexE3EgRJ8VCIJOEY4La9RL
bpEtOLM4bFR39sg+YHMR9TkVUQEYReFdIEY0oOaEttxMaaSs4rJav+8IAOLqf7y+1uCOraykQfl5
IYhlGXinTEUzQEbALT4rtsqYoEWwKchzc1e6IfmP0o/rHMYKWvkQead2yfkiTcYogMRdOWJsU5FH
RlQCzBHdJcVAHcoylJ5tQEe1OC4UBmQiduGiOBll2IH1W6Zf2XYeC07MAWxZMDpHJR3JOQDVCMAw
JubqZONEQ+o5BdTx7UQJsXxQYgrbmPmiWTa9L4LVAkBnoVL03qNoaZOBufWqo7g3AWZhIMaIzhNz
MPJhTVZS1N1BqBOL80wL5Bat/TtwzkK+CA9NCMjjOdu6K/bbkpD9UUj4BSYNEFgnFlWqYlnwCoO8
p3TOr04VPC6EQbrMqh7lQ05p2Y48MhmtG0HOaDlpEE6jYAL0kJl9GmvII7eyRHaFMyVuep3NwSIc
Q2n+seSlIUMi5OFVzzRmT1Wj3pwi9TgtErhHbl8sqKQjUD3JpdMgtW3EyGYR9P6zaMtol4yI6onM
Ib/p56o31R+lHegz/XiM80KWTFei39thubfqNqR5gTjw3fty96ewFyVGRIlDcDxkFZU4UDqoQYMU
TCX7RmgBi68raJlIfjSOByR2NoNCIJYYjNR2ZR0bEZjzZWc3YlR3IRkCKxjfU+HcoeUG3GJ3Hvf4
IF8vaogs7swuob2g7emsTHFs0duerzJAxiBYm+Kn6TyQNTASJqJDFNQM4IwWrYJ0kC93xQ3XI34E
ENktsDcMDIftN2dn7AiHfSWdVqycMXyTYIB+Go1wZXugBWIZClzUjJSixzBZYkOxe9U8bijIEhMZ
UFLd6cMeaGkO1ymFhdVpyRO01xfB8FCTOLEXZsuSiSXBFAzMoFnYiiaPSOSeRfi0pjVkonyyYPUl
SluEyiGkwoGRl5dMwrEKpZdO5KJOL1QEfUPyLFCoJxszqsjH2ppbgIxLLqOqJxBVOntCoQewqoKp
TmgZTMjzquod6E9yXn7pDiAsO1T3YxA1EkRFgmXYnkU0alX/ACzM2Ap2pwU5tw0p2pmoRhJolaYD
VORZ81Ld3I6ZyBAHIKQjN4wAAPYFiSvU+s9T1bezHpg7dRWWSLqqPSC+apRasQnFWuF5kfnjcckZ
Q8E8JEAXCHmM4+rghPZaL/Ptn5ZLz/SyEJH5oGxXmxFqSjjEp7ZL0oBvvbf+McN0f25e9RlufKSx
F1tQ2Bp24Ro93KvVXWSuqhPdCTWrRTO7IjdlYRqCScVLW8dzHCnco7EYayCTKJPzk49ylvjYG3ub
Zi0TYg3ZNGPliRYDIFS2/m00J5qJhDWJEgEgXxqpws/zdyEIbbbokP2r1ohEnSZlnWq4HzD3qEtq
0hWJwKaAJfJNY4ug2CYLn+RJi6YpkGqMQqnUckWAbJMS1FZ+1B6cgmfuR0xdUDHFAmsXWiJ0ihPc
gRIEV51TTI1ckOVeD7sxHlij5UDLImgTatIyiqrpkexeX5h0kM2HG6rZPKQDp4kr5iO9UndDXJxz
VOFJFdUQedkxNk0FU8Q3zYphZFy54clRZEIjhyxQAFAqpk5WqRDZptuLiyG5vnTE2jiy8zcOvdFY
uav2Ix2X242ocE8jwMDIxEw5jmiFQ3XUHLOOFRRNodaog6cU4sURhgtW2K4hVeMghGZQnAuCOqPw
U3i0pkdJxGJR3NmsT9XJekwPn7dP744b+5Sk5U5unkXkbosbKM8ZBckW8UHVCVmFojWUrDsQMRpa
2alHcJMszchD1MQIboaUe46WUNvS24DVgwjFefOZhUxY4jkjOA6AXaR+bChxRgZHbMC4gLOur5ib
o1DXZcwKHJGMqarkXW5t+smwOnRMVID1ZS2tqLESMhum5jkoy1iZlUth2oe1U4sfEcOabE3dOCrK
y5Jz3q7CyYG1HQe66kclUl8Ucyy0gswchESnFgHJFwOxGO3uGJzIowRLiQzHzJ5yPmY6lTqHJVvl
+RXhRMgwAAxN0BjjxzQBpwc2XSKLEpyuo1yCoK8GCMpXNIp8c0wHUjNsfYnwTStgUwrkUzVTZpxf
hqBjqyJUtoRBbKyGv5pBHd9QBFpEAyvRGOxt6pfrGgR3d0vI+A4MtU6RC1DC3Yh4LknuwYJ1ks1p
NIYlCF4S+UrUaSigY0ODLqAMvArVDqGWITYICa6J1/0uvSE9MxvbcqWLTB4T2b6twk8qqcI/KCQm
nY3W0YxYSBA7Qq0ZFkydqLJ1uGTttDobNSkABJ3BxUYz+eYBBzDKfpdwPEUIxzUYbddwmgUtogCQ
lUyLMo7cvwvTyYkhwNVlKchrlJxXA3cKJEdBEQCBmMVWua6aOomdYP1jkpeW52zKh5Ik3Fu1F8Uw
rz4dqfhS3FgSq17VWLqyLG+S6ZOmDO908hZFzV7ckdVGFAmF8SgAGKD35oAVegOSoxbOqvjZEmhq
+NFBji5IoaqnUP8A5gp4hde0RzgXHgU0dwPlLpKpFxmKr5SqxNLtU/kZ8KL9eWQTyOgZC68vZjKZ
NgKrTux0SOBXVLuCaIYfkgeJVLCgHC6lElzgUY2ITLUL4LqrLJUDBAbZaWJuurbhM4SFD3p47GmA
vNyAidGqUiz9uCMqAQDxfFEzL5RwHBgueKouSbSJdq1ANWyCY2RGS5og/KBVACwRlEfLXktL1GCq
OHSWliqsXxWoVCvbBejhKolv7Ua85x4epmQ4E5CL9qk5xWkQiJi0o3PahtgU25SbvWklje60u3uR
BuqFswmxKIFkyG2SYyqIkYAWTbj6sQbobkXBHyyFCCo7gmRpsTfvW/teWNO9EAkn6w+stRNCtOIo
/JGqoUwNVDaget33G5I1TqvBvyu26DDtKoyJIZFx34IYEoiUXC0mFUzMmBq61fM4oCjLcj1E+ARB
BIwUnwZkAC5q6eMWJDOV1mxqUw8VpB6aEhVLjwZGUYDcAD1yVH2S9SJUREdwTBqIyoURLXB76eoM
umQPsPtVRwyTQiZnPBftJf3Y2Xl+n2zI5RC1et3K/wDhQqe8ry/TbY2o5j5j2lGIczvFGMwQRcFN
xvw/tS9ydV4A4Yob0Kg3TixqgB2o3LIyIQjEapHBebvjzNxnbAKXpRDRKFQBaTZJttxB3kczkFDZ
l0iOdyiUwqUN54Mzs9U2K0xDnILUNiTDFNKJicQaJk2XDULhWRjg780ZQ6tw0jE/StEpnTfS+a1W
3ds/eBXbdOPlKnpPyh6LSSQYlqoDdiJZEXX4ZjzC9BIVj+87Nf8AzI8N2INdcn8Ue1wtyci+gllp
BIxKJemCrWQRlgFq9yDEjmbovUZIabjBeZAsRcdqMi78y9UdWBcDB0XF1UJx3J2pimCNFTDFVqrB
sVTBOap7J3qE5KqmWnxTtdAjxQYuSWARjHqEak2utRkDHEZhaYuQCASbZoSeqYHUXTgmOaYOcH7k
IvpmcU2IoSqsRzQox+hGJABOKBjfAovIsmjLsWq5CcigTSoLgptuRGDAogls0Hk/MrQGkQbkVonn
GM6oSgdJNZB6KJ1Bvrv9C1SJmcjQJo0AwCG96iUpy/8ACAYd5Xl7ERtbf6saeJWZQlMaIZlOI6pf
rFfvuzFpRpugYjNH8ithUlPG3CvGcDgHCiM+BP1ioghoYnEoy2QNsxHznPm6bcOrcxELFedCMYzw
NyxTenOgYyZkZ70zOZxPASFMk2st2p8cUJQOk4FadertTbsYnmBVETDdqbPgyMcQq0TiyfFGMqPY
p36TdMcURcSxR3oAaZ0mPpTiyYEr0MZEF/U7Ua3rMcN0yHVrkPaoSHzNVNAV+aRKJZnNUXuVqdyq
F07viyAArdV8AgSexZ4ugSASnZsSEB4FXTYKi5rqrzT5p8ywVE5LELPNkBIMTbm6MSKxDlPEF6P3
qJMTKUiXHJFnFyAclQEG7L9UgWzCYzpgMWVHc2IRMYguPmJwUYzLOXpYIMXjm6eR6bAZomLaiwYJ
qcxyTRFRZ0ROLlFyRLI8lIkM5ogQHZAk3utIJDGpTEv2qoVaKhDK1ETGmSo7ZI6qSwdAvyTWqiGe
N3TgMmi7FVNRYICY6clWgzR3YgSmJMTdlRM9RgpQmHjIESHJbu0PlBeJ5cXTYzqez8jkqWxVMaLs
VUDKwQjs/tJcwwCJ3JHThEWCbFAKnFrckxsmum8AE+6Rtx53TbMNUv15/BftC7YWToHgdwfWQL0w
KLHWcSE8h4KvtQyWkmmCujA9oWgUIunX8vB/+p2T/wD9I8N2tdcveiLgUUou1Fd1VV8AgMcVpema
+lOs4/SgAbKkqmi0v2lErLIpjVOWAZBqN708ZXwOCIAcWDpiHOAVQ5zRIRar5rQ9iwdAK1JUCBHy
kjtCIiwDFkCJWqQa2RaRBKBIqX5l1KUSRpDgBEhwfrPayJl1aqNy5IUo10zMW7mXTVwA6OYWmR5o
zHy0tyRkRf8A1oC2QTzwuM0TC5RMr2LKLExDXR6noml3BAxqqFlmqx8FUVXT4Jo2d0SzqsU2V3RY
VzRBuMSruwsiPTkgyI1g2kyMt+Op5PGOQUdwP5JiNYbqdafTgziRU2IR9QI/tIhxnxjHDFFrCg4u
eIKJWo9wRA+Yql12LlxZMqcNRTQaPPFPIkng2Kqmx4aTR8UfMFCOk4EclQd6+hdUu5aYl2RTZKqe
IoRVM69BLVb1OzQ/7yPDdxAnJ/FSlIdGCIwwTmP3V0y8VUVzCc3TokJyvc6dFZJ8PeifAYIMGPOy
oKWKNGonNE5oMCjmF234VPfyUSzdq1Ph4LSCH+laCepnpar/AATEvE0BWrANTkMFKUKSHuyVneoK
cR1UYjtTkEizIA0DV5IMBpJfVk6cGgDMLoOwAeuLIvERlCpOKaUXIucWTioNQ6MhggJgHB8UBBzI
C+SYVDNXNE8qoRBpYLQTUozmQckSPlu3NCQNFpka4cTKRatkTCVRgq1VQsjmumQZZgIy0EHkurBU
7irsWRY9pCclwRpLoygXB4T3Dkw7/wAoAYrSMAmyQCAyVAqjhVHgCzuH4hMVThzZExqB+R+6+qGr
bsDiF5u2fM9OTSQw7UTgqxfsTxiARdVogQnzQ58PQf5nZ/4keG/AT0nXKoGLlCEnAlJiTijLzR2J
yfenrXkqAnvVACqDScQqF0xFlde5NcpzUJ+HcgzsVVZhCRIfIJxcipXSbXIWZFQc0KMCnYEjFUkw
vROKlBjb3IMQYRqaoyZv1URJg570KiOm/NHRJnqK2ZOxpQHNGDAg3aq0hgBbJgi5ADnw5LUflIYD
uRJDlr4gc1OQLACg5piCYh7ck7F5Fg/JasBnihISFXCDtyZRdgbHApoFxYIG5ATyo6MmLG+SEck6
Y3T2ZeTtFtwPU0stBlpqxPYgSRVEAAyYs6BxN1pcg4KWmdYsK80TOLgYppRomIHuVD7Uwl4hNQhd
ITSjXNRjG0iSfyjLAWRJR5rkKpyaJohufHJapd3C9k0g4yVAQuk9x4UHAFdOK1CnL8gwbXA0MTZE
gaQS7BOKLUR2onbDhnJyCZ1okb2WqhIXVFehnEkCPqNkt/5keG8TbzJ+8oPEHTYrVgaEKjgWa6GK
cZKqNLZIluSbFB4sVdinHV2I6gQmKdW70AbZcAGrclUquxckKMBYKmKJJZ8EXN7BM5cKlTmqiuSB
NGe1kHquXJAGnMIaZF8HxTAOak9vJOaVYhaZXqFTAO3anFNR0nsspF7yYIaagCvaUxDwBfsTQZsk
JaWAwF1QOJewLSSNJqQteApRaoBpBPrLhNJpAptLFVFMENA7U0pCPMoThKWr9cUTAuCa80DK4VL4
HgIAs9ipiUfMjuV6bhuS3Ixk02rE3QL1KzT1CpJVDpyCgQznNQjgA9EZRqMvyGF0I+JTC3Ak2Txr
BMTVMVXhl+TRUKzTGhTiyun/ACWRFTuYEJnBiK1WoAx1UOAZZlOcEXuyovQxe/qNr/HHhuiw82f+
I8NMg4NWRIoMrMnJbki48E2JxCpU8kAQgMs12LkMQESAe2yqARkVWLHkVSVBmjRxgyrThdkyrgi+
CpdHPBAJr8yqVa5VLK/yiiGYqgxb6UwLnFM/VgVTAOhM2QL9yOmQap71qEqWQkcTfkhk7+KJjZ3I
QGpi5B+KDBwCDTIhahJn+aJ5UREm6rEKgqyLVe6pRxUIhi+QXykdqB3d2IJwFSnI1tgcQjDa24bc
ZBiBGvis3uusYMChMWdYFZJ7jMKMwH01C6xoOZU2sSUyumft4ODVMSzJ41q6jeJB704mJHGN6LVG
krrmnWs1OATYce0plZPGvaqpjTjXiUx40qMlSnDt4Fk+Gazy4sS44g4YogFxcHkvQf5nZ/4keG8D
EfiToe0ohPyQd2Qa2RVQzLssU5n2Lqk6LB10xATAs/JVJ8VVkBE+IumTCnMJg0u0JpQY8k8S3anZ
+YVQxWefBym4PjkicceFEXqcE/cq+1MCnehoyeJrdj7UaMwqpiZ0xIcE1R2xKDc/ltggdUSAGoao
6TE9OJAdATERVgYkHuRiDWNi7XT0L83XXKMcnQeWs5xBTy25yPgE42tPbVARIHMAOiSXJXNXVCmZ
0ZENUKINA4dfMZdipAnmuk6RldMQ4TEMVQ1Vn5qiqGCdiQuuY2wM0BAy3DiSGC0wYDNqqTnqu6d7
rUC7qt8CgD38HxPBslFV4UTCss8k5/Kv+Q4cSxay1M4FyOARKdnTM3JUTXKZ9OTqtAn2qyxB+hNO
Bjz4+g/zOz/xI8PUSkS0dybhv7RVLYIzwAtiXTCPislUnhdWKwBxqqyHcsT7EBEUftVa8rL36inA
BGYqnw5InUD2q3sdOzHwVwGX6/cqwbvXRJjzVGKrE80yLVeiDYqteAOa9qzQZVDoG5RlfMItQSiQ
qyV6c0+VkDpqrAKs3zZVD9qoAArrTuXKM4TE90AUFok8im1QIyIZRntzGtnO2bnPStVZJtvb+8qG
IUYSkz3e1E4wUZmLCQuE4qE7Mc015DBVp7E0ankm8XumlIk5RFPagRUDOoWnV0m0RQLPkmCchNga
FEEOmBHYvgn4UwVeAHKybBUK0wrmVZdv/QEPUKgeWCoWGI5po0e6qGlgVUVQYVx4OC+QR25RIIqU
GlUB5ahjgEXsTRqKQfri1Dl8VWhCEbgF0el+1WXoP8zs/wDEjw9Q0j1bk3Bt8xzQBAIdynAYZWCa
Zj4ur+xPU+z3LpiO9ynD8mCqjEmhBBRx7SvlAI5OnZ+YoizA/wBo1RzxYOEdIJBrkjQd9+9EGVeS
NzzsiNLjMogkRyReZL4CifSS+JKcQAGd05NOSeMqpxDVngn+Vs10kF7MqgqyrdfQqUTG2CFE7MLo
2RIwFuFae1NbBUNDdNVUiSmIYDNDVJuxUBkgYx0sGINHRYmMhRwgRIkumbcN2oQht7o0zFweadqp
2dO7HAr5wyETUCycBuxVtzutctwRf6rVRg5YZ3WiEhGIo8WBTx1EmrppADJOQug9yGqPbimNQbUQ
YP2JwGODoSaksk5NU1Cy04I8RmeDBaY3xK7eFONVq1AjJBUBdGtONeDO2Tq9k2PHSagrSGBPtKGo
CBiWJButQJiZB49oxXUaO2o1ClAMcE0gxGHD0H+Z2f8AiR4bxIAHmTw/tFHRJicQnlZfMqkunLgZ
pwxGDlUnfBlV9T3TkOMR3K4GSNwcWRIjq5qkQBzTSk3ILqOolUie0pxEB0+vusrl0zMc1h2ppyqM
ERUhFmbmiNQHIVTFyc7OnAcZnBVbTkF0E1wNlqlAEYkXWMX7wiYyBC+UkckHiacGTZql8exUB8FW
nMrqnVMImR7F07YHMpnERyTk6+2iaQY87JxZVCmMiUDkxQLzejxZSnEGIFADdXqiJFommaMNvqiA
4kbKrVy/SrGtkIgiL1Mmcp5bhJtUqkj2unlHVzTMOwrpPSPq3CfSe5dI0g4i6pjmusj6U5Gor9nA
DtVU5qYlnVUGJ0hCjohEpl2cKfMfZx5qvernwTRq13XUHOWCYUCr+RddRYLpaRzwTwIMhgyIIqMF
kVpN8+JRhpF3OaiCS45oCRL58kz3sSKjMFHQdQwOKqvQf5nZ/wCJHhvh/wDvZU/vFObJ2JjzKoBF
k0rXBCBDyFiEwixCYs3JVNVdzyTNdZBAvTHBfNXxXNMAmdgmenMqpCzKrdDSCUXAA5oaZOcYhO3e
sKZXWqvYgRGqwCck9qOI96agKND3I6XpZ0xiJDMp9zbiOwp4yL8qsqyHYaIHQ8R3odXdZVqgR0kZ
JpNWxVT3LoYNfUnJdsGouqFM7roOnsKpMdhClqqTVNkipOaZJyXrQItRszVaq/6003pYovQM1bJh
UZoSB6cUxPYE+oDkgJXxCBBaOKp1JoQ0qpYck5kaqpVahPYYI7VwKqzMjKjZYpo43ZVuBQsmN+Dp
8TZV4VNchVCUQ0QauiYMJC/NCUQBKV1q+ufYnN/yKI07WuF1GrO+acgmJFeSIE2jhyTUMR7QnAZl
1hxiv2ZLHA4LTKuR4kgViajNEjOxqtQjpdTkSzVQMZMTcjFGRNcl6D/M7P8Ajjw3wQw8ybH+8UwH
iv0qyoqFuS+ldRosfBPEeKsG7FdV4WdcsliPYqyiO0uvmfsHxVIk9pTRhEPYs601rbBPMsMXK169
ROCcCVM1WIKujQ9xWR5oxk4OaIkxGbsjpNLtf2rVGDv3KwiV1HwVACMzdMWiMMXQEg/ZRPEEe5OW
kse6q6JjsNE8nPNMwPaqxb/TkumZYK7g0dVqTiVWI7kGqCE4FKpiGJr4okuHC7FqCGo0NU5JJegQ
wBuCgxwYIAXxIQgQxGKdgztW6FGyKNerJ/cqx6TgveODGgTCVBhwH1UQcinFxfsTkuckSQG9qIFC
qkZJimTrqLPmmjJ4hHEC0jgVZtV1F4sRaeCeLSGBFlQKyuqlEQZxg6JESCzMtW6ORCMY/Lg6iSRp
OAqUSDVAi9iieNVdCMqRFS6qImOQXyiMeSEIlogYpoBw1wmfqy5qp1UX8vfH1Oy3/wC5HhvHDzJv
4lUlXK6zTN4qgATOmJVTVXdWQDADNdRpzKrId1VQGQ8PcjogB7SomJOqrgJy9cymnID2otIybuQI
AvXsQdgM0wJlyVIkZFYHNXITFmzTEvkqRLHGwThgidRpkq7bo6WA5ogEvysmNeZTg0yTgl+WKcAu
mmwKYkyPJDpaOZWqIHNkOqqADg9qcgTHtQBjfEVWkXudV1ZuwqlRkVpMdPNQMjnVPAiUbAXvdapC
oAi3IJwHkLpyi9lyCCL1ThWdlaoxQGKZ75LNAOSgDY2TOwTkuLLmnCafeVOQLgC6DFkdJvdNq7cE
xLNibd6A/TVHVSRsODwououUIzgXFyiNt64KrkRqyLlhLBDRPUDkiQOrIKhEQ10ZSPYqkRCeLJpl
zwpTiz1TxV64j8h8VSTIGRftDoSMNQdiqnSETt6ZEXe6B246ZktLJl6AG49TskH+/Hhvk/8AiT/x
FAuhi6t/p7Fkqyb2JzMd1VRyfBdEPEumpEcgn2wTHNftJCPaXQGrV2J9JLZphGIVSjinCIJrkmZ+
aDX9ia6Z0xoE4NMrKniVYL5qpmTEBs070yCMohzi+KcNE5ogmuLJgHIzTHpjmahOZeF/YtW25Jzs
iKD3oRma55rpOoH6tz3LVCBBGP6EXk2dEInqOBVAdOWSdwyo4IRAIIH6wYp9yJH9oJtQmMiraexd
LkZrTEAA3K0nFECyawzRZUwvzRwGSfALUT3LJBq58NIuq1ayqa5BO/a6Z25IapVwIRk9GqFKTNqL
AcgpVrgEGeMo35rpNkKODinagQlQgpx3p9RBwXVV7FSd+mzoB3EahGQLg3qycgHEA1CcgVwCMdsa
ZE17FWXTdAPbhUqlWRbCl+ESS4xZM1c0YihqxPuTFixWpmzTDhTgxURIVGV6IHbk4NwckAQLJxHA
Fxinkar0D/8A1Oy3348N4yI/Enj/AGiqOewLpj4lUAHYF1aiMF1UPNDVMc2QAeRRAgxzQGTou/PJ
aogd9UAWBGS+YsrOVUJndOAVZVpmnIdMA3JB04Jf2LBVLcliW50WXaqG2VUKeJYeCZg3IP8ABCpP
+nJVIH+nN1Uk+76F2WL/AOyjEmVsaJgHGBsr6U8pauRoniC4sWTyjpB+sy65ERwIt4oyjJzi9V8v
tWmUxEjBqouNQvq/Qnh4YIExYrrmOaeI6uS0xAdNOON0xi2RC6J1yKZgRmE0geSrThz4DLBOsyUM
AKKqoiceFExqU8RyW3tOxAr2lEixTgsE4Y5pmbmEz1BsqGhuFIg1lRkNJP8AaBTM4F094+xkDEOB
UFFmBOaBj1R+sHdGe27DBeZIEn3Ix0sI1KIi/tohMSvghGcXMaAozA0k4BaSzG61FyEGrgQeaIMQ
RIVBqtYNzTktcZdQNQ2KiZjUZB6ZLWHiCcVQuqp+DSvgQtMhqyJwKcxBKEbNYgpz3r0BNB+87Lff
jw3w7ftJv94oai45IiMa5oBgOaYSbkqhymAYKpbJPcZJ2oqp88FZWVaKlSqjvVw5TVRDdhVaJ9Sw
Hey+Z+wOreJCoAO74q5L/wCmCYMPD9Kq58fpZU/08Fh7PpdPhz/SyYMO/wD2U9T3N73TUHaXTEnu
YJwHOZK6qE5LSYgtim+WJsbqkn5ELS2kjA4Jmcck8Qy/aFUeRTRACLytcBOP0rXF3AWqVZG5KMSe
nBs08ZM2BVWKYgj3JixWpiOxdJZOC4QePhZHULWT4rkg+Fk5VBw+lXwonFShKTaY9RHYjWiyCz4B
F3bkjpFOaEgGIYgLUB0y9hRkaGzIAHSMle2LKsg4qCqMD20KBkGHiFLSWGSEiKOxKkBUZ4sukPEh
iy0yn2UUok6SLHArQQTJaXPMFA7fVpDuaVUZ7pvWl0ZR6WuM+xapAgSwKi0gSKXwWhhpBcL5NLZL
RKlLogOSMCEC4Y/Sn4PndaiQGwVn5L0Ehb952ek4ftI24b7/APiT/wARVlWh5JxXhVZKvCtAs18v
bwqHKBiG9yelMEC9OaZxyKcP3BM3ifgqkBuX+0qufd7FRn7v0qrtzHxZfp/2VfwA/Svlcc3+llgO
Q/Qnr7B73VW7ySuk2wFF8tedVQMvmV35qt00rp4hxiMUDE94TEWR6mBwV3KYBu1OZdy6QZEKvSmk
aZ4hAwNeeKqA6Yq3inZuzhZZcLlUDqoomNkdQDJwGXTNOKpiCuxZMmCL0KyZGR+fc9yBOJ4ZLmnK
OSD3JQbxKId05vgnNyq2CpRc8ytNxdiiAKN1LMHHJUoD4FVsUCKunixa4dDctJrqW5GpJxqCtQA5
xQ29wVwkFIRdvqkqGoGgsWFk+4ATgBQhW7lEa3GIOCeJBa/NOwiQFpmLFwUdFhZ1WhzTX58HXoP8
zs/8SPDff/xJ/wCIphbDhRNYZp9VVZdqrjkqXFlZVNEavzFU9fd70zA+J9y/1D4r/Wfgr+DD/Cur
2/pX+v8AQqfR9Dr5aZn/APMqybkP0Jy550HxVW9pVCe6it41RLrNEEsRmnEXVKc0QaEhgVEdleaa
SaJLZHBAEpiXTQCrJlRyU7NyTykSck5D810jT7lU0VAmwVS6p4KtuDgKzK7qoblwZOa8k7KjhUPY
CuqL9iqewFCgqmMWXSdJTwLkJ9N7qMJUAqexDSCYigA5KqrTg0R2p5SHYm1B1YSHIowkPFMBXFWX
NMzZlECuRT4izp5KowTEd606uwoOA1iEaAxC6Hc2QBAfsQox9iYBiKLRMAo+UXe607rSHO61Rsbg
JiHLXxDJhICN35oQkw/VlcFkYzGmUXY4FdAEwankUJOGr/qVLFaTfAqt02IXoP8AM7P+OPDfY/8A
eT/xFVNVbsKJsVUvy4VI71dzy/QvjRO3gD+hfpb3L9H+0mdzy/8AysrAHu/Sv9PpZN/p/wBlOR3/
AP6l83cHPuZWPsCq3fX3p3LDKnuWqApzV1dM6a6YAvggCGfFdMubYJiK4rTKoOK1QpnknLNwtVXZ
YnJUGkc0NUqYMgQXPNfLws/YuWKurBUTkeCpRPdVorV5q3cnFORVSmHgVUPzF1WRZO3eqBlUsn4M
z81kUGPaqB1Xp7VWyoGlOg7F0kgjFaompunZ+xat4OcIpgAALDg4TGi6hq7UZQtiMk4707uUy5rt
Q8Qrum8FzTs7p2umJ7Feouup2uiHYCykSDRMKBFnGnxVeoYOhKxuxWq4IqBZMekioBQMo1FS3xTx
Mg9TF3RMZOPaERI9b0ayeQpmmxFOHoP8zs/8SPDeY/8AeSp3lOaDNUJPYHVnPMj6E9B3fFMZE9n6
FX2t+lOA/j+hfpA9ycn/AE/vKjnsf6FgO0gfFfN3AE+9Y+IHuVWHc/vV39nuVAFQ9yeVEwKLigQA
DjJMI0zV1VyU4qE4TEOqFuSfFNisln2JgG7U5PaFX2qhXZZXVrJwW5LqFc1Sid2X0JqNgqRVaJwF
9KvVVDrpqOSJNFTg4o2Jouo1wwVmKo55Lrp71SoX0Kl+DNRMq+1WYYlahQCg7E9CnkGCaw/I5KoV
1amSMm6ZYcL0s6ddicIMgMk8YeNEI6DTJl1QIblwJxCYY4lGtQrFkWujqxVCgdTkXVCyEqSNg9VK
lCbpxWV6J2+KeHzDBM5D3BzVJOeSchpL0H+Z2f8AHHhvGI/7yb+JRgKEii0TsME4N8A59yrTwCoH
8T8AsvAe5VL9z+9V9pb3JqDsHxVXPeumLK6u/JOQWtVOmPcVQWsbIgPW7q1EVkcCmkK8lQMFWq5P
wv2KnUeSLBVPgnbvQNnTcKhxgU4BiT4K9UQ1ckAvertzVKqp8F8UHCLjvWSc17VVMCrOVp9yrKmB
XPHB0xAbDkqVV6ZJhUc1QArqDLNUos1Uqy6lQIQHzS9yAFQrcHLlWYcOm3NMgWdVo66TVaZgo0Lf
kuLCqlMjqBeK02ZUsMVyzTGAJ7Kp/lJFdK169UQKUqtQ7+AJDE4hOKhMI0GKrfEp2otIDx9qaLl0
CTUJ2qrMnC1m4yQYMV6A5+p2XH/mR4b9m8yf+IrqKYDqwK0zHTfShKLB+SxPaUwACLGizXYmF1dO
3inh3pqAK5Zc800r+9dJrkrKgum4ZK7qg7VcAcl1VBxKeyorMnCs6c2XLNExPaE1GfFfQqVzVu9V
KqK5rpNcsFgF8yYpx4oOfBW71S2KaVRkukd6oUxYjmnJLZBMB2EqqYXyVQruqxZUHdxon1UxCsnN
AEZ3Jt2LPgxrwumNuAo3Ndi5poiufBiAyHSF0nSU8ZOmMXbELSxANCWVfksS61OmZ04DdiyVm5rR
mtOCIy4uNsyBFC1kxiR3JjQc6Ksg2LVXSO9NwqrshWhwVg69Af8A+zs/8SPDec/95P8AxFNK6Bhg
mmAxTOGCYF+ANhkOBAqEMBniuokhMy04hOL4qxPJMQw5qqp4Kid+5OASq0CzCcUXNUoq1CY2TPTB
e5V8Qrq3Yc01imNEz1Ti6ZMr1TmvaqJnVLqiZVVCyrXmi9FTxXI48LpxRNdUYLNZ8kypVEytmuea
oKJyCOeCYIQfnLhZyvcnNFTBOx4mUjRaRdMLr3rUTRFyuk0xCcSVCmBXJMnuck5ucFVUTYp7lEtX
ALmapsTggZyY4AVTbZJ7mCfdLjIKMtsWupDYiJNViW96/CrykF//AB5ewrq2Zj+6UNUJRIvQoA0z
dEGpwPJcwvQf5nZ/xx4b51F9cqf3inxOaeNFpmWHJARNe1ZcGd0xXvXIpqkJmQfhZO6xKyBXUa5K
zKoqqJjZXqrd6rQ8lRMRVEDwTE6hlwZ6ZKnVyKwVKhVDNYLnzRp2oEF/eua+lcs1WnNUNcExx4V4
UNFzVKH2KrDMXVParKn6FUpwrUzXzJ2DZr9U+xVontyC0imSOouEdw3tFPndMq8K1VKBXVZOmAdM
WCfEoyldAJk7K3BwmKeM+4qpBzqg7J+FqImcr5JoUGCk5cc0TYBUA7SF0iKdgDmmxwOCYXxTxky/
almxQIk4Kd08mbF1OUQNIoGRO+W2dsPJT8o9IkdJ5L0A+t+87P8Ajjw3iSSDuSv9opvcgGTm+SBA
piuSbBMODkpyU4RFlmqBl1YqvcmwXLhnwp4LPknimZ1hTBV4PjghZVTxvmE/Dlwqswqi2OSqUxXT
Q5KtAmNX8FTuTFMPDhT2p3V+FeH0qvsTChzV1QsfYqntC6Qw5LqqnHtVUICwutItGyrxDcaqioql
ymNlRPgqIFOnWSqe9M7c11S8FQJs08i5yQagyQZMTQI+WWZMZllclVKYSI71UvyKyKrZUsVpvksp
ZIxJpIMmmHi9xkpmJIM2DclbxXojKLg722OwmYY8N8SqDuSY/wB4pvBVuE471RVdVVOGJVqc0xN0
2Ksnt+RyXLgxVV7uFTXkqBVHesis81S2SYhiqFiLcHtyQa4VAQqXxVPBWugXZN701inF8lW+RXTb
mmKpw5JuFOFmX0cWIpnwY15p3qFQNzVbomdTgiYtU3T3dPLBVtimAV+AT8KJ5FUqStJ7lp/IAPBy
KYqlk2CyC6Y96LohU+Yrmu1MyfHjTjeyBN0JC4TM/JNKoFnXZw/l4z9Tsg//ALkeG8AAP2k/8RV+
xVNU10RZsVUv2KgdXbsVanmrKlllxrwcWVBRXqqV7U3uVkXtksGTx8FW2OSxIwVmT2IT5Yq604YF
VCvTknuPaFRVHeEC7HNcwqqqbOyaQpgsVa1uFFW4zVPBOBRXovo4UNEx8Va+K6VWnaqXWSZXdcuS
ojJ6CpGCcBzIuIi4TGhHCi7U+CYWWRy4dvBjXlweqaxQCYKqZNw5pivdwojzX0J1kFTvXLi3DP8A
ICrVdcU+2K5L0UxIvH1G0dJwInE8N4AD8SfvK6iyaLnNP8qclyU44UPCnBwWKr4pkxICYOSMVQMU
4BV2zCo/esk+OauO5OycVHChDK7qg/Tw6bYqneFyK6ZUyKqXNkQAyf2cHFlSqCY1Tiofg0vHDhQf
QEzu3sV2VaqobmrpwmusFWPddUWaYh+eKa/sWSqF0lhkVfVyso7YprNRyTRqMsU5cA1Cz/Joq/ks
A6cRI7U4Y8gQ6rEjtCqqBk6qVQ8GuFeiARjjgiDdPhw58Wv/ANC6eJtgvQ/V3P3jaqMeuNOG+XoN
ydB9ooG6eNjcJgLq6q6qmcK1c1QeCy5lULrSuaaga70Vb8lSmRVXdOaHB1UqwcJn6hbmnIZYvimF
BgyZ3dV8VzTumN09ifBXKOWC54FNiq0OCZlWhzCv2uqVWAGVynFeSyTiyaVjnZOPYiSHBsypGiqw
7E2PNc8lSnLBOVS+KcX9iYAgoF2OLLNOVbvsnk/iqWCrdUD81KRtEMO1WY5oxl3FGJwVE3CgVlh4
pwQcw6czD5CqYiRIxdlSIHtVKBXpmmFSqy7kdQBGNFZuxNGfa6e45VVeFFWye6fEKmKACf8AIf8A
6DJX4eg/zOz/AMSPDeyO5P3lY+KMT3LpBbMqrKicVOSyTqlBhw+lMalZOmZNO+DLFuar4LssqnwX
0p8EypZASLjBlmFXHFV+VZ9qIJpyWbp7hWcKlRzVastQ7wqAn2K7NcBU+bmmIYomx9hVKDIWTHvQ
ahwTaSWVSRkF0luarcYr6VdMbYFVVA4wIWshuxUtkq1BTBXXTfJPiqd6vpPJdmaMsAENUqy6mVFd
V+YKqZnK0gKvBo0Tm5TIk4pgUwTI8L8aFdTEc07GPYuiT8k0hTNckckCMqKqr71dW8VQJv8AoBlx
9B/mdn/iR4bwP/iS/wARTgktcJwwTwGoG7ZrSzdqrIpiGITGyqSexe4lMKJimIqqCqdVoVUqzMr1
HtVQw5qhTjhRNIUPBnos04sLqtQqGnO6L1dZJwPFMmNOeKZ0SaHNM7diD9yZULg4ppVyZWA5q9M0
0rJtThWTYZJxUJjbJOy6fBVTwpywT/MVlyWSsnNTmqUTGir4pnrMgISF404VT4iyfOyPYnx4VWo2
CZ6DhROufBvyKcL8LphJ+V1WLHMLpLgrUbCyoCBy/wCgp+RavFl6D/M7P/Ejw3nNfMl/iPCzFdVD
iLJ40IxVwv8AR0YmvPJVIQF2TBnVk7sclU8HHgnTp4/KqV5KlAc0XOrNMFywKY1BuqWwXLFPH5U1
jw+KY3CqnVac0x6nQkPlFKYKleSoaGtU5OoHDBMnAcDJORTmsUwtjmmZwnhUDDFAkgHJWVPBMaKt
OapfnZAkUwOCrfNVtmq1yIX0p3cZhWVG9yabUwQ8uTRNj9ChH5jBzJs0YWfFaZUIVkSRQItcKXFz
SK0xoAqoyT/k5rlwfhZWKZl1HuVCAgI1OCCoePL/AKF06zVaL0H+Z2f+JHhumX820yM5Ex/dtwsX
s7qv82/htz4qn82/htz4r+r1/wAtufFf1erV/wCW3Piqfzj+F3P9pf1jqx/5bc+Kr/N/4bcb3r+r
fw258VX+bfw258UG/m4Bej+m3Piv6sf/AG258VT+b/w258VT+b/w258V/Vf4fc+Kr/Ny3+W3Piv6
v2/8tufFf1f+G3Piq/zV+30+58V0/wA2Hd6bc+K/q38NufFU/m3b/wAtufFU/m38NufFf1an+W3P
iv6v/DbnxVf5vT/LbnxVP5t/DbnxX9X/AIbc+Kr/ADcvy9Nuv7Cqfzb+G3Piv6t/DbnxXV/Nuz/l
tx/eq/zd/wD0258V0/zanL0258UX/mzjF/Tbje9f1f8Ahtz4qn82fl+7bje9dP8ANo93pp/7SP8A
93PZ+7bnxX9XcYv6bct4qn82p/ltz4o6f5tXH/ltz4qv82L/AOW3PiqfzamH/LbnxVP5rXl6fc+K
/qlf8vufFf1Zv/TbnxX9W/htz4pj/NXyP7vuP71X+av/AOn3Piv6sw/y0396L/zamP8Ay258V/Vv
4bc+KL/zV44g+nm3tK6P5oP/AG+58V1fzY/+23Pii381eOL+n3G96/q1P8tufFHT/NHOL+n3Piv6
q3Z6fc+Kr/NP4fc+Kr/NB/7efxUm/m/7Mm37tuUPKqnp/m+ub9T+m3A3iV/VP4efxQ1/zfRLP923
DTmxX9b/AITd+KaP857T+67vxXV/OXGP/K7o+lFv503/AKXd+K/rdf8AKbvxVf50/L913fiv6y0e
Xpd34r+uVx/5Td+Kp/O/4Td+Kb//AGh2/uu7/tLr/n1f8nu/7S/r38Hu/FH/AO+/wm78V/XKf5Td
+Kp/PP4Td+K/rf8ACbvxX9b/AITd+K/rf8Ju/FD/AO8v/wCl3fiuj+cAdnpdz4r+tt/6Td/2lX+d
/wAJu/7SP/3s/wDtN34r+ufwm7/tKn89/g93/aQP/wDuGRwH7puj6V/V2/8ATbnxX9Y/htz4r+s/
wu78V/Wf4Xd+K/rX8Lu/Ff1qn+V3fiv61/C7vxX9a/hd34r+t/wm78V/W/4Td+K/rf8ACbvxX9b/
AITd+K/rn8Ju/FV/nn8Ju/Ff1z+E3fiv65/CbvxX9cr/AJTd+K9HPa/nHnbsd7bO3t/uu5HXITi0
dRNHOPD/2Q==" transform="matrix(0.2808 0 0 0.2808 718.4155 182.0371)">
							</image>
						</g>
					</g>
				</g>
			</g>
		</g>
	</g>
	<rect x="342.841" y="335.012" fill="#211E1E" width="2.64" height="21.407"/>
	<polygon fill="#211E1E" points="350.453,356.417 353.091,356.417 353.091,339.092 353.165,339.092 360.032,356.417 
		363.647,356.417 363.647,335.012 361.009,335.012 361.009,352.874 360.959,352.874 353.873,335.012 350.453,335.012 	"/>
	<polygon fill="#211E1E" points="375.127,337.359 380.527,337.359 380.527,335.012 367.136,335.012 367.136,337.359 
		372.488,337.359 372.488,356.417 375.127,356.417 	"/>
	<polygon fill="#211E1E" points="384.016,356.417 395.452,356.417 395.452,354.071 386.655,354.071 386.655,346.4 394.597,346.4 
		394.597,344.054 386.655,344.054 386.655,337.359 395.085,337.359 395.085,335.012 384.016,335.012 	"/>
	<path fill="#211E1E" d="M402.037,337.163h3.153c2.467,0,3.859,1.318,3.859,3.42c0,4.13-2.833,4.13-4.398,4.13h-2.615V337.163z
		 M399.399,356.417h2.638v-9.531h3.567c1.346,0,2.983,0.173,3.179,3.105l0.195,3.346c0.048,1.052,0.267,2.641,0.708,3.08h3.176
		c-0.634-0.392-1.026-0.978-1.148-3.349l-0.172-3.664c-0.123-1.833-1.173-3.374-3.029-3.569v-0.072
		c2.491-0.586,3.298-3.057,3.298-5.378c0-3.396-2.125-5.374-5.473-5.374h-6.939V356.417z"/>
	<path fill="#211E1E" d="M428.191,340.558c-0.074-3.933-1.933-5.962-5.987-5.962c-5.45,0-6.281,3.91-6.281,5.89
		c0,7.869,10.117,4.35,10.117,10.411c0,2.271-1.638,3.788-3.739,3.788c-3.862,0-4.007-2.593-4.007-4.987h-2.64
		c0,4.57,1.467,7.137,6.476,7.137c3.029,0,6.647-1.394,6.647-6.354c0-7.82-10.092-4.229-10.092-10.262
		c0-2.272,1.318-3.472,3.542-3.472c2.518,0,3.323,1.564,3.323,3.813H428.191z"/>
	<path fill="#211E1E" d="M435.595,337.163h2.981c2.59,0,3.715,1.538,3.715,3.884c0,2.834-1.613,4.154-3.543,4.154h-3.152V337.163z
		 M432.955,356.417h2.64v-9.066h3.274c3.617,0,6.182-2.199,6.182-6.207c0-6.132-4.765-6.132-6.646-6.132h-5.45V356.417z"/>
	<polygon fill="#211E1E" points="448.882,356.417 460.318,356.417 460.318,354.071 451.521,354.071 451.521,346.4 459.462,346.4 
		459.462,344.054 451.521,344.054 451.521,337.359 459.951,337.359 459.951,335.012 448.882,335.012 	"/>
	<polygon fill="#211E1E" points="464.265,356.417 475.701,356.417 475.701,354.071 466.903,354.071 466.903,346.4 474.846,346.4 
		474.846,344.054 466.903,344.054 466.903,337.359 475.334,337.359 475.334,335.012 464.265,335.012 	"/>
	<path fill="#211E1E" d="M489.984,349.062c-0.074,2.616-0.538,5.623-3.739,5.623c-3.666,0-4.13-4.032-4.13-8.97
		c0-4.936,0.464-8.968,4.13-8.968c2.737,0,3.519,2.078,3.568,4.497h2.761c-0.122-4.203-1.955-6.646-6.329-6.646
		c-5.913,0-6.891,5.718-6.891,11.118s0.806,11.12,6.891,11.12c4.692,0,6.28-3.714,6.5-7.772H489.984z"/>
	<polygon fill="#211E1E" points="509.343,335.012 506.705,335.012 506.705,343.759 499.349,343.759 499.349,335.012 496.71,335.012 
		496.71,356.417 499.349,356.417 499.349,346.104 506.705,346.104 506.705,356.417 509.343,356.417 	"/>
	<path fill="#211E1E" d="M462.126,385.613h-9.115c0.318-1.393,1.149-2.638,2.322-3.615l3.006-2.615
		c2.37-2.027,3.713-3.74,3.713-6.99c0-1.857-0.781-5.67-5.693-5.67c-4.35,0-6.036,2.737-6.036,6.745h2.64
		c0-1.195,0.098-4.568,3.347-4.568c2.297,0,3.104,1.978,3.104,3.787c0,2.345-1.124,3.642-2.834,5.107l-1.735,1.467
		c-2.37,2.051-4.569,4.742-4.569,8.699h11.851V385.613z"/>
	<path fill="#211E1E" d="M468.682,377.646c0-4.692,0.171-8.942,3.396-8.942c3.25,0,3.421,4.25,3.421,8.942
		c0,4.473-0.171,8.748-3.421,8.748C468.853,386.394,468.682,382.12,468.682,377.646z M472.078,388.375
		c5.743,0,6.061-6.233,6.061-10.729c0-5.228-0.318-10.924-6.061-10.924c-5.718,0-6.035,6.231-6.035,10.924
		C466.042,382.681,466.36,388.375,472.078,388.375z"/>
	<path fill="#211E1E" d="M482.793,372.001h4.521v15.958h2.541v-21.236h-1.808c-0.123,3.007-2.591,3.324-5.254,3.495V372.001z"/>
	<path fill="#211E1E" d="M507.096,373.614c0,2.272-0.538,4.937-3.373,4.937c-2.81,0-3.347-2.664-3.347-4.937
		c0-2.247,0.708-4.91,3.347-4.91C506.363,368.704,507.096,371.368,507.096,373.614z M498.225,383.292
		c0,0.978,0.416,5.083,5.353,5.083c6.158,0,6.451-6.72,6.451-11.56c0-4.397-0.416-10.093-6.305-10.093
		c-3.958,0-5.987,3.154-5.987,7.26c0,3.981,1.857,6.719,5.548,6.719c2.346,0,3.616-1.587,4.057-2.369h0.048v1.685
		c0,1.075-0.219,6.377-3.738,6.377c-1.808,0-2.785-1.368-2.785-3.102H498.225z"/>
	<path fill="#211E1E" d="M440.738,219.06h0.709v-4.301h-2.761v0.832h1.881c0,0.146,0,0.269,0,0.39c0,1.588-0.586,2.42-1.662,2.42
		c-1.197,0-1.76-1.051-1.76-3.25c0-2.541,0.416-3.738,1.785-3.738c0.928,0,1.49,0.636,1.515,1.759h0.904
		c0-1.588-0.88-2.541-2.37-2.541c-1.98,0-2.787,1.344-2.787,4.252c0,2.957,0.783,4.375,2.591,4.375c0.929,0,1.588-0.489,1.906-1.344
		L440.738,219.06z"/>
	<path fill="#211E1E" d="M443.982,214.612v-2.981h1.295c1.148,0,1.687,0.417,1.687,1.441c0,1.075-0.611,1.54-1.882,1.54H443.982z
		 M443.078,219.06h0.904v-3.666h1.173c1.222,0,1.637,0.318,1.735,1.491c0.097,1.05,0.048,1.783,0.293,2.174h1.124
		c-0.464-0.391-0.366-1.197-0.488-2.468c-0.098-1.075-0.538-1.564-1.442-1.637c1.026-0.269,1.539-0.905,1.539-2.004
		c0-1.442-0.831-2.125-2.517-2.125h-2.322V219.06z"/>
	<path fill="#211E1E" d="M450.66,215.858l1.1-4.325l1.099,4.325H450.66z M448.924,219.06h0.929l0.586-2.37h2.615l0.635,2.37h0.929
		l-2.297-8.234h-1.148L448.924,219.06z"/>
	<polygon fill="#211E1E" points="455.235,219.06 459.584,219.06 459.584,218.18 456.237,218.18 459.537,211.68 459.537,210.825 
		455.431,210.825 455.431,211.68 458.559,211.68 455.235,218.204 	"/>
	<rect x="462.994" y="215.59" fill="#211E1E" width="5.474" height="0.782"/>
	<path fill="#211E1E" d="M473.538,215.858l1.1-4.325l1.1,4.325H473.538z M471.803,219.06h0.93l0.586-2.37h2.615l0.635,2.37h0.929
		l-2.297-8.234h-1.148L471.803,219.06z"/>
	<path fill="#211E1E" d="M478.401,210.825v5.376c0,1.001,0.024,1.711,0.391,2.199c0.391,0.537,1.1,0.856,1.979,0.856
		c0.831,0,1.516-0.294,1.955-0.832c0.513-0.611,0.513-1.319,0.513-2.223v-5.376h-0.904v5.546c0,1.393-0.464,2.052-1.564,2.052
		c-1.149,0-1.441-0.659-1.441-2.052v-5.546H478.401z"/>
	<path fill="#211E1E" d="M484.754,216.445c0,0.099-0.024,0.195-0.024,0.268c0,1.662,0.806,2.542,2.321,2.542
		c1.564,0,2.467-0.929,2.467-2.493c0-0.733-0.194-1.271-0.611-1.662c-0.292-0.244-0.928-0.465-1.833-0.807
		c-0.879-0.342-1.344-0.586-1.344-1.539c0-0.831,0.513-1.344,1.368-1.344c0.831,0,1.296,0.514,1.296,1.442c0,0.024,0,0.073,0,0.098
		h0.855c0-0.073,0-0.147,0-0.195c0-1.344-0.807-2.126-2.126-2.126c-1.441,0-2.249,0.855-2.249,2.297
		c0,0.733,0.221,1.296,0.685,1.687c0.27,0.245,0.88,0.439,1.759,0.759c0.88,0.341,1.295,0.586,1.295,1.514
		c0,0.977-0.538,1.539-1.466,1.539c-1.002,0-1.515-0.562-1.515-1.684c0-0.05,0-0.149,0-0.295H484.754z"/>
	<polygon fill="#211E1E" points="492.525,219.06 493.479,219.06 493.479,211.68 495.434,211.68 495.434,210.825 490.424,210.825 
		490.424,211.68 492.525,211.68 	"/>
	<path fill="#211E1E" d="M497.456,214.612v-2.981h1.295c1.148,0,1.686,0.417,1.686,1.441c0,1.075-0.611,1.54-1.882,1.54H497.456z
		 M496.552,219.06h0.904v-3.666h1.173c1.222,0,1.637,0.318,1.735,1.491c0.099,1.05,0.049,1.783,0.293,2.174h1.124
		c-0.464-0.391-0.366-1.197-0.489-2.468c-0.098-1.075-0.538-1.564-1.441-1.637c1.026-0.269,1.539-0.905,1.539-2.004
		c0-1.442-0.831-2.125-2.517-2.125h-2.321V219.06z"/>
	<rect x="503.167" y="210.825" fill="#211E1E" width="0.953" height="8.234"/>
	<path fill="#211E1E" d="M506.968,215.858l1.1-4.325l1.1,4.325H506.968z M505.232,219.06h0.928l0.586-2.37h2.615l0.636,2.37h0.928
		l-2.297-8.234h-1.149L505.232,219.06z"/>
	<path fill="#211E1E" d="M383.93,231.51c0,0.097-0.025,0.195-0.025,0.269c0,1.662,0.807,2.541,2.323,2.541
		c1.564,0,2.467-0.929,2.467-2.493c0-0.733-0.195-1.27-0.611-1.662c-0.292-0.244-0.929-0.463-1.833-0.807
		c-0.878-0.342-1.343-0.586-1.343-1.539c0-0.831,0.514-1.344,1.369-1.344c0.831,0,1.295,0.514,1.295,1.442c0,0.026,0,0.074,0,0.097
		h0.855c0-0.071,0-0.146,0-0.195c0-1.344-0.808-2.127-2.125-2.127c-1.441,0-2.249,0.856-2.249,2.298
		c0,0.734,0.221,1.294,0.686,1.686c0.267,0.245,0.878,0.439,1.759,0.758c0.879,0.342,1.296,0.586,1.296,1.514
		c0,0.978-0.539,1.541-1.467,1.541c-1.001,0-1.516-0.563-1.516-1.688c0-0.048,0-0.145,0-0.292H383.93z"/>
	<polygon fill="#211E1E" points="390.186,234.124 394.291,234.124 394.291,233.269 391.09,233.269 391.09,230.239 394.07,230.239 
		394.07,229.384 391.09,229.384 391.09,226.745 394.266,226.745 394.266,225.89 390.186,225.89 	"/>
	<path fill="#211E1E" d="M396.697,229.824v-3.128h1.148c1.149,0,1.662,0.416,1.662,1.491c0,1.125-0.489,1.638-1.612,1.638H396.697z
		 M395.77,234.124h0.928v-3.469h1.222c1.662,0,2.542-0.831,2.542-2.468c0-1.611-0.806-2.297-2.591-2.297h-2.101V234.124z"/>
	<polygon fill="#211E1E" points="403.309,234.124 404.261,234.124 404.261,226.745 406.216,226.745 406.216,225.89 401.207,225.89 
		401.207,226.745 403.309,226.745 	"/>
	<polygon fill="#211E1E" points="407.334,234.124 411.439,234.124 411.439,233.269 408.238,233.269 408.238,230.239 
		411.219,230.239 411.219,229.384 408.238,229.384 408.238,226.745 411.415,226.745 411.415,225.89 407.334,225.89 	"/>
	<polygon fill="#211E1E" points="418.612,234.124 419.516,234.124 419.516,225.89 418,225.89 416.192,232.903 414.457,225.89 
		412.917,225.89 412.917,234.124 413.798,234.124 413.798,226.354 415.728,234.124 416.607,234.124 418.612,226.354 	"/>
	<path fill="#211E1E" d="M422.093,229.384v-2.761h0.904c1.173,0,1.637,0.317,1.637,1.344c0,1.075-0.464,1.417-1.563,1.417H422.093z
		 M422.093,233.318v-3.177h1.028c1.245,0,1.734,0.439,1.734,1.661c0,1.076-0.464,1.516-1.589,1.516H422.093z M421.214,234.124h2.225
		c1.513,0,2.394-0.855,2.394-2.37c0-1.149-0.439-1.808-1.417-2.004c0.782-0.22,1.147-0.806,1.147-1.806
		c0-1.443-0.683-2.055-2.199-2.055h-2.15V234.124z"/>
	<polygon fill="#211E1E" points="427.354,234.124 431.458,234.124 431.458,233.269 428.258,233.269 428.258,230.239 
		431.239,230.239 431.239,229.384 428.258,229.384 428.258,226.745 431.434,226.745 431.434,225.89 427.354,225.89 	"/>
	<path fill="#211E1E" d="M433.841,229.677v-2.981h1.296c1.148,0,1.686,0.416,1.686,1.443c0,1.074-0.611,1.538-1.882,1.538H433.841z
		 M432.937,234.124h0.904v-3.665h1.173c1.222,0,1.637,0.318,1.735,1.491c0.098,1.052,0.049,1.785,0.293,2.174h1.124
		c-0.464-0.39-0.366-1.197-0.489-2.466c-0.098-1.077-0.538-1.563-1.442-1.638c1.026-0.269,1.54-0.903,1.54-2.004
		c0-1.439-0.831-2.125-2.517-2.125h-2.321V234.124z"/>
	<path fill="#211E1E" d="M444.331,234.124h0.904v-8.284h-0.733c-0.048,1.027-0.635,1.541-1.808,1.541c-0.024,0-0.073,0-0.098,0
		v0.684h1.735V234.124z"/>
	<path fill="#211E1E" d="M447.898,231.95c0.024,1.516,0.758,2.346,2.15,2.346c1.418,0,2.249-1.074,2.249-2.933
		c0-1.71-0.831-2.688-2.077-2.688c-0.636,0-1.125,0.269-1.296,0.611l0.294-2.468h2.761v-0.829h-3.469l-0.44,4.225l0.782,0.075
		c0.147-0.49,0.562-0.781,1.149-0.781c0.904,0,1.344,0.658,1.344,1.978c0,1.294-0.465,2.004-1.32,2.004
		c-0.781,0-1.222-0.538-1.222-1.466c0-0.025,0-0.049,0-0.074H447.898z"/>
	<path fill="#211E1E" d="M454.05,229.745v-2.444h0.586v-0.416h-0.586v-0.953h-0.489v0.953h-0.489v0.416h0.489v2.688
		c0,0.368,0.219,0.563,0.708,0.563c0.074,0,0.196,0,0.367-0.026v-0.44c-0.048,0.024-0.073,0.024-0.122,0.024
		C454.148,230.111,454.05,230.062,454.05,229.745z"/>
	<path fill="#211E1E" d="M455.187,230.478h0.488v-2.419c0-0.513,0.318-0.807,0.733-0.807c0.367,0,0.514,0.196,0.514,0.611v2.615
		h0.513v-2.857c0-0.514-0.342-0.808-0.88-0.808c-0.415,0-0.684,0.147-0.88,0.464v-1.588h-0.488V230.478z"/>
	<rect x="460.868" y="230.655" fill="#211E1E" width="5.475" height="0.782"/>
	<path fill="#211E1E" d="M472.072,234.124h0.905v-8.284h-0.733c-0.049,1.027-0.636,1.541-1.809,1.541c-0.024,0-0.074,0-0.099,0
		v0.684h1.735V234.124z"/>
	<path fill="#211E1E" d="M479.134,228.675c0,1.247-0.39,1.833-1.27,1.833c-0.855,0-1.319-0.636-1.319-1.858
		c0-1.293,0.439-2.002,1.245-2.002C478.645,226.648,479.134,227.357,479.134,228.675z M479.256,230.264
		c-0.122,2.224-0.39,3.227-1.49,3.227c-0.611,0-1.027-0.417-1.027-1.125c0-0.025,0-0.025,0-0.049h-0.928
		c0,1.197,0.757,1.979,1.93,1.979c1.759,0,2.37-1.441,2.37-4.521c0-2.663-0.634-3.935-2.322-3.935c-1.319,0-2.199,1.052-2.199,2.835
		c0,1.662,0.832,2.615,2.101,2.615C478.45,231.291,478.988,230.948,479.256,230.264z"/>
	<path fill="#211E1E" d="M481.798,229.745v-2.444h0.586v-0.416h-0.586v-0.953h-0.489v0.953h-0.489v0.416h0.489v2.688
		c0,0.368,0.219,0.563,0.708,0.563c0.073,0,0.195,0,0.367-0.026v-0.44c-0.049,0.024-0.074,0.024-0.123,0.024
		C481.896,230.111,481.798,230.062,481.798,229.745z"/>
	<path fill="#211E1E" d="M482.935,230.478h0.489v-2.419c0-0.513,0.317-0.807,0.733-0.807c0.366,0,0.513,0.196,0.513,0.611v2.615
		h0.512v-2.857c0-0.514-0.342-0.808-0.878-0.808c-0.416,0-0.685,0.147-0.88,0.464v-1.588h-0.489V230.478z"/>
	<path fill="#211E1E" d="M489.153,234.124h4.375v-0.83h-3.446c0-0.758,0.562-1.393,1.539-2.222c0.563-0.466,1.1-0.833,1.393-1.297
		c0.342-0.489,0.439-1.027,0.439-1.71c0-1.417-0.806-2.249-2.102-2.249c-1.344,0-2.125,0.88-2.125,2.519c0,0.097,0,0.196,0,0.292
		h0.879c0-0.17,0-0.244,0-0.292c0-1.125,0.39-1.687,1.197-1.687c0.758,0,1.197,0.514,1.197,1.466c0,0.612-0.098,1.05-0.416,1.395
		c-0.342,0.414-0.879,0.853-1.515,1.416c-0.953,0.878-1.418,1.808-1.418,2.885C489.153,233.905,489.153,234.027,489.153,234.124z"/>
	<path fill="#211E1E" d="M495.75,230.02c0-2.322,0.171-3.372,1.271-3.372c1.05,0,1.271,1.075,1.271,3.396
		c0,2.321-0.147,3.446-1.271,3.446C495.971,233.49,495.75,232.414,495.75,230.02z M494.798,230.044c0,2.908,0.489,4.251,2.249,4.251
		c1.661,0,2.174-1.344,2.174-4.227c0-1.466-0.073-2.493-0.439-3.128c-0.391-0.733-0.954-1.1-1.759-1.1
		C495.287,225.84,494.798,227.185,494.798,230.044z"/>
	<path fill="#211E1E" d="M502.525,234.124h0.905v-8.284h-0.733c-0.049,1.027-0.636,1.541-1.808,1.541c-0.024,0-0.074,0-0.099,0
		v0.684h1.735V234.124z"/>
	<path fill="#211E1E" d="M509.588,228.675c0,1.247-0.39,1.833-1.27,1.833c-0.855,0-1.32-0.636-1.32-1.858
		c0-1.293,0.439-2.002,1.246-2.002C509.099,226.648,509.588,227.357,509.588,228.675z M509.71,230.264
		c-0.122,2.224-0.39,3.227-1.49,3.227c-0.611,0-1.027-0.417-1.027-1.125c0-0.025,0-0.025,0-0.049h-0.928
		c0,1.197,0.757,1.979,1.931,1.979c1.758,0,2.37-1.441,2.37-4.521c0-2.663-0.635-3.935-2.321-3.935c-1.32,0-2.2,1.052-2.2,2.835
		c0,1.662,0.832,2.615,2.102,2.615C508.904,231.291,509.442,230.948,509.71,230.264z"/>
	<polygon fill="#211E1E" points="375.462,249.183 376.465,249.183 377.931,242.463 379.373,249.183 380.35,249.183 382.109,240.949 
		381.205,240.949 379.886,247.864 378.468,240.949 377.417,240.949 375.95,247.913 374.656,240.949 373.704,240.949 	"/>
	<polygon fill="#211E1E" points="384.34,249.183 385.34,249.183 386.808,242.463 388.25,249.183 389.228,249.183 390.986,240.949 
		390.083,240.949 388.762,247.864 387.346,240.949 386.295,240.949 384.828,247.913 383.532,240.949 382.581,240.949 	"/>
	<polygon fill="#211E1E" points="393.215,249.183 394.217,249.183 395.685,242.463 397.125,249.183 398.103,249.183 
		399.862,240.949 398.958,240.949 397.64,247.864 396.221,240.949 395.17,240.949 393.705,247.913 392.409,240.949 391.456,240.949 
			"/>
	<rect x="400.381" y="248.06" fill="#211E1E" width="0.978" height="1.123"/>
	<rect x="403.191" y="240.949" fill="#211E1E" width="0.953" height="8.234"/>
	<polygon fill="#211E1E" points="405.819,249.183 406.699,249.183 406.626,241.926 409.533,249.183 410.707,249.183 
		410.707,240.949 409.827,240.949 409.898,248.011 407.04,240.949 405.819,240.949 	"/>
	<polygon fill="#211E1E" points="413.901,249.183 414.854,249.183 414.854,241.804 416.809,241.804 416.809,240.949 
		411.799,240.949 411.799,241.804 413.901,241.804 	"/>
	<polygon fill="#211E1E" points="417.927,249.183 422.032,249.183 422.032,248.328 418.831,248.328 418.831,245.298 
		421.813,245.298 421.813,244.443 418.831,244.443 418.831,241.804 422.009,241.804 422.009,240.949 417.927,240.949 	"/>
	<path fill="#211E1E" d="M424.416,244.736v-2.981h1.294c1.149,0,1.686,0.416,1.686,1.444c0,1.073-0.611,1.538-1.882,1.538H424.416z
		 M423.512,249.183h0.904v-3.665h1.173c1.221,0,1.637,0.318,1.734,1.491c0.098,1.051,0.048,1.784,0.293,2.174h1.123
		c-0.465-0.39-0.367-1.197-0.488-2.466c-0.099-1.077-0.538-1.563-1.441-1.638c1.026-0.27,1.539-0.902,1.539-2.004
		c0-1.439-0.831-2.125-2.516-2.125h-2.321V249.183z"/>
	<path fill="#211E1E" d="M429.95,246.569c0,0.097-0.024,0.195-0.024,0.269c0,1.661,0.807,2.541,2.322,2.541
		c1.563,0,2.468-0.929,2.468-2.493c0-0.733-0.195-1.27-0.61-1.662c-0.294-0.244-0.929-0.463-1.833-0.807
		c-0.879-0.342-1.344-0.586-1.344-1.539c0-0.831,0.513-1.344,1.368-1.344c0.832,0,1.295,0.513,1.295,1.441c0,0.027,0,0.074,0,0.098
		h0.855c0-0.071,0-0.146,0-0.195c0-1.344-0.807-2.126-2.126-2.126c-1.442,0-2.248,0.855-2.248,2.298c0,0.734,0.22,1.294,0.684,1.686
		c0.269,0.245,0.88,0.44,1.759,0.758c0.879,0.342,1.295,0.586,1.295,1.515c0,0.978-0.538,1.54-1.466,1.54
		c-1.001,0-1.515-0.563-1.515-1.687c0-0.048,0-0.145,0-0.292H429.95z"/>
	<path fill="#211E1E" d="M437.14,244.883v-3.128h1.149c1.148,0,1.661,0.416,1.661,1.491c0,1.125-0.489,1.637-1.612,1.637H437.14z
		 M436.211,249.183h0.928v-3.469h1.222c1.662,0,2.542-0.831,2.542-2.468c0-1.611-0.807-2.297-2.591-2.297h-2.102V249.183z"/>
	<polygon fill="#211E1E" points="442.229,249.183 446.334,249.183 446.334,248.328 443.133,248.328 443.133,245.298 
		446.114,245.298 446.114,244.443 443.133,244.443 443.133,241.804 446.31,241.804 446.31,240.949 442.229,240.949 	"/>
	<polygon fill="#211E1E" points="447.813,249.183 451.917,249.183 451.917,248.328 448.717,248.328 448.717,245.298 
		451.698,245.298 451.698,244.443 448.717,244.443 448.717,241.804 451.894,241.804 451.894,240.949 447.813,240.949 	"/>
	<path fill="#211E1E" d="M457.307,246.154c0,0.049,0,0.097,0,0.146c0,1.417-0.538,2.199-1.466,2.199
		c-1.222,0-1.637-1.075-1.637-3.421c0-2.395,0.416-3.519,1.588-3.519c0.88,0,1.466,0.662,1.466,1.808h0.879c0-0.024,0-0.073,0-0.098
		c0-1.613-0.83-2.517-2.346-2.517c-1.833,0-2.565,1.369-2.565,4.326c0,2.908,0.782,4.301,2.492,4.301
		c1.589,0,2.468-1.125,2.492-3.226H457.307z"/>
	<polygon fill="#211E1E" points="459.798,249.183 460.752,249.183 460.752,245.176 463.807,245.176 463.807,249.183 464.76,249.183 
		464.76,240.949 463.807,240.949 463.807,244.345 460.752,244.345 460.752,240.949 459.798,240.949 	"/>
	<path fill="#211E1E" d="M466.238,249.183h4.374v-0.831h-3.445c0-0.757,0.562-1.392,1.539-2.222c0.562-0.466,1.1-0.833,1.393-1.297
		c0.342-0.489,0.439-1.027,0.439-1.71c0-1.417-0.806-2.249-2.102-2.249c-1.344,0-2.125,0.88-2.125,2.519c0,0.097,0,0.196,0,0.292
		h0.879c0-0.171,0-0.245,0-0.292c0-1.125,0.391-1.687,1.198-1.687c0.757,0,1.197,0.515,1.197,1.466c0,0.612-0.098,1.05-0.416,1.394
		c-0.342,0.414-0.879,0.854-1.514,1.416c-0.954,0.879-1.418,1.808-1.418,2.885C466.238,248.964,466.238,249.086,466.238,249.183z"/>
	<path fill="#211E1E" d="M472.841,245.079c0-2.321,0.171-3.372,1.271-3.372c1.05,0,1.271,1.075,1.271,3.396
		c0,2.321-0.147,3.446-1.271,3.446C473.062,248.549,472.841,247.473,472.841,245.079z M471.889,245.103
		c0,2.908,0.489,4.252,2.249,4.252c1.661,0,2.174-1.344,2.174-4.227c0-1.466-0.073-2.493-0.439-3.129
		c-0.391-0.733-0.954-1.1-1.759-1.1C472.377,240.899,471.889,242.244,471.889,245.103z"/>
	<path fill="#211E1E" d="M479.61,249.183h0.905v-8.284h-0.733c-0.049,1.027-0.636,1.54-1.809,1.54c-0.023,0-0.073,0-0.098,0v0.684
		h1.735V249.183z"/>
	<path fill="#211E1E" d="M486.673,243.735c0,1.246-0.391,1.833-1.27,1.833c-0.855,0-1.321-0.636-1.321-1.857
		c0-1.294,0.44-2.003,1.247-2.003C486.184,241.707,486.673,242.416,486.673,243.735z M486.795,245.323
		c-0.122,2.224-0.391,3.226-1.491,3.226c-0.611,0-1.026-0.416-1.026-1.125c0-0.024,0-0.024,0-0.049h-0.929
		c0,1.198,0.758,1.98,1.931,1.98c1.759,0,2.37-1.442,2.37-4.521c0-2.663-0.636-3.935-2.322-3.935c-1.319,0-2.199,1.051-2.199,2.835
		c0,1.661,0.831,2.615,2.102,2.615C485.989,246.35,486.526,246.007,486.795,245.323z"/>
	<rect x="489.263" y="248.06" fill="#211E1E" width="0.978" height="1.123"/>
	<path fill="#211E1E" d="M492.855,245.079c0-2.419,0.342-3.494,1.686-3.494c1.369,0,1.686,1.05,1.686,3.494
		c0,2.443-0.317,3.494-1.686,3.494C493.223,248.573,492.855,247.5,492.855,245.079z M491.902,245.079
		c0,2.957,0.733,4.301,2.64,4.301c1.955,0,2.639-1.319,2.639-4.301c0-2.982-0.684-4.326-2.639-4.326
		C492.611,240.753,491.902,242.122,491.902,245.079z"/>
	<path fill="#211E1E" d="M499.612,244.736v-2.981h1.295c1.148,0,1.686,0.416,1.686,1.444c0,1.073-0.611,1.538-1.881,1.538H499.612z
		 M498.708,249.183h0.904v-3.665h1.173c1.222,0,1.638,0.318,1.735,1.491c0.098,1.051,0.049,1.784,0.294,2.174h1.124
		c-0.464-0.39-0.367-1.197-0.489-2.466c-0.098-1.077-0.537-1.563-1.441-1.638c1.026-0.27,1.54-0.902,1.54-2.004
		c0-1.439-0.832-2.125-2.517-2.125h-2.322V249.183z"/>
	<path fill="#211E1E" d="M509.698,249.183h0.708v-4.3h-2.761v0.831h1.881c0,0.147,0,0.269,0,0.392c0,1.589-0.586,2.419-1.661,2.419
		c-1.197,0-1.76-1.052-1.76-3.251c0-2.541,0.416-3.738,1.784-3.738c0.929,0,1.491,0.636,1.515,1.76h0.904
		c0-1.588-0.879-2.542-2.371-2.542c-1.979,0-2.785,1.344-2.785,4.252c0,2.956,0.782,4.374,2.59,4.374
		c0.928,0,1.589-0.489,1.906-1.344L509.698,249.183z"/>
	<rect x="856.769" y="226.8" fill="#EE1D52" width="12.103" height="12.102"/>
	<rect x="870.081" y="226.8" fill="#EE1D52" width="12.107" height="12.102"/>
	<rect x="883.398" y="226.8" fill="#EE1D52" width="12.103" height="12.102"/>
	<rect x="862.823" y="232.849" fill="#EE1D52" width="12.103" height="12.102"/>
	<rect x="877.345" y="220.746" fill="#EE1D52" width="12.101" height="12.103"/>
	<polygon fill="#211E1E" points="900.59,228.736 896.71,228.736 896.71,226.806 906.699,226.806 906.699,228.736 902.825,228.736 
		902.825,238.877 900.59,238.877 	"/>
	<path fill="#211E1E" d="M917.928,234.443c0,0.727-0.13,1.38-0.392,1.967c-0.263,0.586-0.617,1.082-1.069,1.49
		c-0.452,0.411-0.99,0.728-1.607,0.947c-0.616,0.22-1.289,0.33-2.016,0.33c-0.72,0-1.392-0.11-2.017-0.33
		c-0.616-0.22-1.154-0.536-1.612-0.947c-0.459-0.409-0.812-0.904-1.069-1.49c-0.256-0.586-0.385-1.24-0.385-1.967V226.8h2.242v7.563
		c0,0.294,0.044,0.605,0.142,0.929c0.098,0.323,0.25,0.623,0.475,0.892c0.222,0.275,0.515,0.497,0.875,0.672
		c0.366,0.177,0.813,0.262,1.35,0.262c0.538,0,0.99-0.084,1.351-0.262c0.365-0.176,0.659-0.397,0.88-0.672
		c0.22-0.269,0.378-0.568,0.47-0.892c0.098-0.324,0.146-0.635,0.146-0.929V226.8h2.237V234.443z"/>
	<path fill="#211E1E" d="M905.343,240.539c-0.441-0.189-1.015-0.279-1.479-0.279c-1.338,0-2.077,0.835-2.077,1.935
		c0,1.081,0.728,1.9,1.949,1.9c0.312,0,0.556-0.031,0.752-0.074v-1.392h-1.21v-0.844h2.31v2.877
		c-0.605,0.152-1.24,0.275-1.852,0.275c-1.857,0-3.146-0.867-3.146-2.669c0-1.827,1.196-2.852,3.146-2.852
		c0.666,0,1.191,0.084,1.681,0.219L905.343,240.539z"/>
	<path fill="#211E1E" d="M906.723,240.905h0.978v0.893h0.018c0.049-0.365,0.502-0.984,1.155-0.984c0.11,0,0.227,0,0.336,0.033v1.031
		c-0.098-0.053-0.293-0.086-0.489-0.086c-0.897,0-0.897,1.045-0.897,1.607v1.447h-1.1V240.905z"/>
	<path fill="#211E1E" d="M911.373,244.187c0.335,0,0.599-0.135,0.77-0.349c0.184-0.226,0.232-0.507,0.232-0.812h-0.478
		c-0.494,0-1.228,0.073-1.228,0.678C910.669,244.042,910.975,244.187,911.373,244.187 M910.083,241.113
		c0.428-0.183,1.002-0.3,1.466-0.3c1.289,0,1.827,0.496,1.827,1.664v0.5c0,0.397,0,0.697,0.019,0.99
		c0.013,0.298,0.024,0.574,0.055,0.879h-0.972c-0.042-0.207-0.042-0.47-0.048-0.592h-0.019c-0.257,0.442-0.808,0.684-1.309,0.684
		c-0.744,0-1.479-0.421-1.479-1.173c0-0.586,0.306-0.928,0.729-1.124c0.414-0.189,0.958-0.225,1.416-0.225h0.611
		c0-0.636-0.312-0.848-0.952-0.848c-0.472,0-0.936,0.163-1.314,0.425L910.083,241.113z"/>
	<polygon fill="#211E1E" points="914.281,240.905 917.628,240.905 917.628,241.749 915.526,244.052 917.701,244.052 
		917.701,244.846 914.189,244.846 914.189,244.009 916.327,241.7 914.281,241.7 	"/>
	<rect x="918.592" y="240.924" fill="#EE1D52" width="4.032" height="4.026"/>
	<rect x="532.657" y="210.696" fill="#211E1E" width="6.396" height="30.742"/>
	<rect x="563.714" y="266.155" fill="#211E1E" width="30.747" height="6.395"/>
	<rect x="433.499" y="314.508" fill="#211E1E" width="30.741" height="6.396"/>
	<polyline fill="#211E1E" points="604.072,351.354 604.072,344.957 573.324,344.957 573.324,351.354 	"/>
	<polygon fill="#EE1D52" points="566.512,351.354 566.512,344.957 539.827,344.957 539.827,297.165 532.657,297.165 
		532.657,351.354 	"/>
	<polygon fill="#EE1D52" points="496.57,272.65 507.987,272.65 507.987,266.26 480.63,266.26 484.034,272.65 	"/>
	<polygon fill="#EE1D52" points="491.151,296.548 496.252,296.548 496.252,290.152 484.034,290.152 484.034,296.548 	"/>
	<polygon fill="#EE1D52" points="484.834,320.44 496.252,320.44 496.252,314.05 472.298,314.05 472.298,320.44 	"/>
	<rect x="514.804" y="266.1" fill="#211E1E" width="42.099" height="6.396"/>
	<rect x="532.657" y="248.249" fill="#211E1E" width="6.396" height="42.098"/>
	<polyline fill="#211E1E" points="539.051,358.684 532.657,358.684 532.657,389.426 539.051,389.426 	"/>
	<path fill="#EE1D52" d="M484.034,266.26c-8.363,0-15.145,6.781-15.145,15.143c0,8.364,6.781,15.145,15.145,15.145v-6.396
		c-4.832,0-8.749-3.917-8.749-8.749s3.917-8.753,8.749-8.753V266.26z"/>
	<path fill="#EE1D52" d="M496.252,290.152c8.364,0,15.145,6.781,15.145,15.146c0,8.362-6.781,15.143-15.145,15.143v-6.39
		c4.832,0,8.748-3.922,8.748-8.753c0-4.834-3.916-8.75-8.748-8.75V290.152z"/>
	<polyline fill="#D1D1D2" points="724.703,351.354 724.703,344.957 610.883,344.957 610.883,351.354 	"/>
	<rect x="314.432" y="314.508" fill="#D1D1D2" width="112.249" height="6.396"/>
	<rect x="601.274" y="266.155" fill="#D1D1D2" width="61.237" height="6.395"/>
	<rect x="420.291" y="276.956" fill="#D1D1D2" width="6.391" height="30.741"/>
	<rect x="610.883" y="358.164" fill="#D1D1D2" width="6.396" height="31.267"/>
	<rect x="601.274" y="228.603" fill="#D1D1D2" width="6.397" height="30.741"/>
</g>
</svg>
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
	 width="100%" height="100%" viewBox="0 0 52.866 51.739" enable-background="new 0 0 52.866 51.739" xml:space="preserve">
<polygon fill-rule="evenodd" clip-rule="evenodd" fill="#B2B1B1" points="44.415,3.704 14.633,3.704 14.633,51.739 52.866,51.739 
	52.866,11.997 44.415,3.704 44.415,3.704 "/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E20917" d="M42.481,1.85c0,2.811,0,5.655,0,6.226c0.576,0,3.471,0,6.308,0
	L42.481,1.85L42.481,1.85L42.481,1.85z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#F5F5F5" d="M41.175,1.307c-10.689,0-27.428,0-28.284,0
	c0,1.255,0,46.237,0,47.492c1.24,0,35.794,0,37.034,0c0-0.935,0-26.096,0-39.417h-8.75V1.307L41.175,1.307L41.175,1.307z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M42.481,1.85l6.308,6.226c-2.837,0-5.731,0-6.308,0
	C42.481,7.505,42.481,4.66,42.481,1.85L42.481,1.85L42.481,1.85z M49.925,48.799c-1.24,0-35.794,0-37.034,0
	c0-1.255,0-46.236,0-47.492c0.856,0,17.595,0,28.284,0v8.075h8.75C49.925,22.703,49.925,47.864,49.925,48.799L49.925,48.799
	L49.925,48.799L49.925,48.799z M11.583,0v50.105h39.649V8.65L42.467,0H11.583L11.583,0L11.583,0L11.583,0z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#B2B1B1" d="M39.015,19.902V5.337H12.891c0,3.47,0,8.805,0,14.565H39.015
	L39.015,19.902L39.015,19.902z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E20917" d="M1.307,16.936c1.238,0,33.62,0,34.857,0c0-1.12,0-10.861,0-11.981
	c-1.237,0-33.619,0-34.857,0C1.307,6.075,1.307,15.816,1.307,16.936L1.307,16.936L1.307,16.936z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M36.164,16.936c-1.237,0-33.619,0-34.857,0
	c0-1.12,0-10.861,0-11.981c1.238,0,33.62,0,34.857,0C36.164,6.075,36.164,15.816,36.164,16.936L36.164,16.936L36.164,16.936z
	 M0,3.647v14.596h37.471V3.647h-0.653H0L0,3.647L0,3.647L0,3.647z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M9.905,8.311v2.267h0.881c0.635,0,1.059-0.042,1.272-0.125
	c0.214-0.083,0.382-0.214,0.503-0.392c0.122-0.178,0.183-0.385,0.183-0.621c0-0.291-0.086-0.53-0.256-0.72
	c-0.17-0.188-0.386-0.307-0.647-0.354c-0.191-0.037-0.578-0.055-1.158-0.055H9.905L9.905,8.311L9.905,8.311z M8.292,14.928V6.963
	h2.583c0.979,0,1.616,0.04,1.914,0.12c0.456,0.12,0.839,0.38,1.146,0.78c0.309,0.401,0.463,0.918,0.463,1.552
	c0,0.49-0.089,0.901-0.267,1.234c-0.177,0.333-0.402,0.595-0.676,0.786c-0.273,0.19-0.552,0.316-0.834,0.377
	c-0.385,0.077-0.94,0.114-1.668,0.114H9.905v3.002H8.292L8.292,14.928L8.292,14.928L8.292,14.928z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M17.315,8.311v5.27h1.195c0.447,0,0.77-0.025,0.968-0.076
	c0.26-0.065,0.475-0.175,0.646-0.331c0.171-0.156,0.311-0.412,0.419-0.769c0.107-0.356,0.162-0.842,0.162-1.457
	s-0.055-1.087-0.162-1.416c-0.108-0.33-0.26-0.586-0.454-0.771c-0.195-0.185-0.441-0.31-0.741-0.375
	c-0.224-0.05-0.661-0.076-1.313-0.076H17.315L17.315,8.311L17.315,8.311z M15.702,6.963h2.931c0.661,0,1.165,0.05,1.512,0.152
	c0.467,0.138,0.865,0.382,1.197,0.733c0.332,0.352,0.585,0.782,0.759,1.29c0.173,0.509,0.26,1.137,0.26,1.883
	c0,0.656-0.081,1.221-0.244,1.695c-0.198,0.58-0.481,1.049-0.851,1.408c-0.277,0.271-0.653,0.483-1.126,0.635
	c-0.354,0.113-0.827,0.169-1.42,0.169h-3.018V6.963L15.702,6.963L15.702,6.963L15.702,6.963z"/>
<polygon fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" points="23.727,14.928 23.727,6.963 29.18,6.963 29.18,8.311 
	25.34,8.311 25.34,10.19 28.648,10.19 28.648,11.538 25.34,11.538 25.34,14.928 23.727,14.928 23.727,14.928 "/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E30921" d="M25.983,35.519c-2.812,2.1-4.745,5.082-3.982,5.547l-0.666-0.335
	C20.948,40.259,21.825,37.729,25.983,35.519L25.983,35.519L25.983,35.519L25.983,35.519L25.983,35.519z"/>
<path fill="none" stroke="#E30921" stroke-width="0.5197" stroke-miterlimit="2.6131" d="M25.983,35.519
	c-2.812,2.1-4.745,5.082-3.982,5.547l-0.666-0.335C20.948,40.259,21.825,37.729,25.983,35.519L25.983,35.519L25.983,35.519
	L25.983,35.519L25.983,35.519z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E30921" d="M47.193,34.952l-0.023-0.368c-0.004-0.047,0-0.105-0.002-0.166
	h-0.005c-0.015,0.057-0.033,0.122-0.052,0.175l-0.127,0.351h-0.071l-0.124-0.36c-0.015-0.049-0.029-0.108-0.044-0.166H46.74
	c-0.001,0.058,0,0.11-0.003,0.166l-0.022,0.368h-0.089l0.047-0.61h0.121l0.119,0.331c0.016,0.046,0.028,0.097,0.043,0.153h0.003
	c0.014-0.056,0.028-0.11,0.043-0.155l0.12-0.329h0.119l0.046,0.61H47.193L47.193,34.952L47.193,34.952L47.193,34.952L47.193,34.952z
	 M46.604,34.342v0.078h-0.187v0.532h-0.091V34.42h-0.186v-0.078H46.604L46.604,34.342L46.604,34.342L46.604,34.342L46.604,34.342
	L46.604,34.342L46.604,34.342z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#E30921" d="M28.566,34.342c0.568-0.985,1.172-2.088,1.814-3.325
	c1.3-2.505,2.067-4.538,2.526-6.316c0.834,2.165,2.059,4.225,3.702,5.639c0.511,0.44,1.075,0.845,1.667,1.215
	C35.172,32.035,31.688,32.963,28.566,34.342L28.566,34.342L28.566,34.342z M47.102,33.165c0.821-1.749-2.684-2.349-7.452-1.796
	c-0.838-0.472-1.652-1.007-2.389-1.593c-1.836-1.507-3.187-4.034-4.027-6.566c0.383-2.121,0.359-3.924,0.401-5.872
	c-0.182,0.888-0.312,2.372-0.811,4.482c-0.643-2.466-0.783-4.757-0.394-5.904c0.086-0.251,0.293-0.545,0.385-0.61
	c0.358,0.179,0.792,0.619,0.889,1.541c0.323-1.702-0.509-1.642-0.742-1.642l-0.523-0.004c-0.29,0-0.551,0.232-0.677,0.705
	c-0.431,1.605-0.225,4.505,0.669,7.419c-0.556,1.942-1.416,4.301-2.806,7.101c-3.741,7.533-6.472,11.047-8.29,10.306l0.649,0.333
	c1.21,0.617,3.286-1.02,6.551-6.667c3.069-1.107,7.154-1.921,10.714-2.278c3.505,1.878,7.53,2.523,7.734,1.313
	c-0.907,0.436-3.514-0.17-6.149-1.445C44.442,31.758,47.17,32.083,47.102,33.165L47.102,33.165L47.102,33.165L47.102,33.165z"/>
<path fill="none" stroke="#E30921" stroke-width="0.5197" stroke-miterlimit="2.6131" d="M28.566,34.342
	c0.568-0.985,1.172-2.088,1.814-3.325c1.3-2.505,2.067-4.538,2.526-6.316c0.834,2.165,2.059,4.225,3.702,5.639
	c0.511,0.44,1.075,0.845,1.667,1.215C35.172,32.035,31.688,32.963,28.566,34.342L28.566,34.342L28.566,34.342z M47.102,33.165
	c0.821-1.749-2.684-2.349-7.452-1.796c-0.838-0.472-1.652-1.007-2.389-1.593c-1.836-1.507-3.187-4.034-4.027-6.566
	c0.383-2.121,0.359-3.924,0.401-5.872c-0.182,0.888-0.312,2.372-0.811,4.482c-0.643-2.466-0.783-4.757-0.394-5.904
	c0.086-0.251,0.293-0.545,0.385-0.61c0.358,0.179,0.792,0.619,0.889,1.541c0.323-1.702-0.509-1.642-0.742-1.642l-0.523-0.004
	c-0.29,0-0.551,0.232-0.677,0.705c-0.431,1.605-0.225,4.505,0.669,7.419c-0.556,1.942-1.416,4.301-2.806,7.101
	c-3.741,7.533-6.472,11.047-8.29,10.306l0.649,0.333c1.21,0.617,3.286-1.02,6.551-6.667c3.069-1.107,7.154-1.921,10.714-2.278
	c3.505,1.878,7.53,2.523,7.734,1.313c-0.907,0.436-3.514-0.17-6.149-1.445C44.442,31.758,47.17,32.083,47.102,33.165"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M19.435,42.372l-0.528-2.746c-0.148-0.775-0.302-1.79-0.431-2.613
	h-0.053c-0.129,0.834-0.298,1.882-0.446,2.623l-0.542,2.736H19.435L19.435,42.372L19.435,42.372L19.435,42.372L19.435,42.372z
	 M17.233,43.649l-0.675,3.17h-1.566l2.582-11.478h1.856l2.442,11.478h-1.585l-0.667-3.17H17.233L17.233,43.649L17.233,43.649
	L17.233,43.649L17.233,43.649L17.233,43.649L17.233,43.649z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M26.4,41.704c0-0.164,0-0.352-0.025-0.524
	c-0.076-0.741-0.504-1.392-1.079-1.392c-0.985,0-1.331,1.391-1.331,2.936c0,1.689,0.442,2.89,1.275,2.89
	c0.367,0,0.846-0.192,1.103-1.175c0.041-0.146,0.058-0.334,0.058-0.539V41.704L26.4,41.704L26.4,41.704L26.4,41.704L26.4,41.704z
	 M28.008,35.036v9.649c0,0.631,0.043,1.56,0.067,2.135h-1.387l-0.1-1.004h-0.053c-0.277,0.586-0.894,1.14-1.728,1.14
	c-1.521,0-2.463-1.661-2.463-4.243c0-2.914,1.239-4.297,2.549-4.297c0.653,0,1.183,0.307,1.472,0.93H26.4v-4.309H28.008
	L28.008,35.036L28.008,35.036L28.008,35.036L28.008,35.036L28.008,35.036L28.008,35.036z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M31.874,45.692c0.992,0,1.207-1.75,1.207-3.016
	c0-1.225-0.215-3-1.242-3c-1.047,0-1.255,1.775-1.255,3c0,1.383,0.239,3.016,1.272,3.016H31.874L31.874,45.692L31.874,45.692
	L31.874,45.692L31.874,45.692z M31.831,46.955c-1.647,0-2.849-1.423-2.849-4.255c0-2.998,1.422-4.285,2.92-4.285
	c1.632,0,2.814,1.469,2.814,4.254c0,3.282-1.626,4.286-2.869,4.286H31.831L31.831,46.955L31.831,46.955L31.831,46.955L31.831,46.955
	L31.831,46.955L31.831,46.955z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M37.293,43.887c0,0.194,0.024,0.38,0.066,0.519
	c0.264,1.01,0.743,1.208,1.073,1.208c0.951,0,1.305-1.263,1.305-2.96c0-1.582-0.371-2.865-1.323-2.865
	c-0.521,0-0.955,0.625-1.064,1.235c-0.032,0.165-0.057,0.376-0.057,0.548V43.887L37.293,43.887L37.293,43.887L37.293,43.887
	L37.293,43.887z M35.686,35.036h1.607v4.444h0.034c0.419-0.75,1.005-1.064,1.737-1.064c1.397,0,2.291,1.59,2.291,4.135
	c0,2.959-1.206,4.405-2.571,4.405c-0.815,0-1.27-0.433-1.635-1.183h-0.053l-0.101,1.047h-1.379c0.025-0.56,0.068-1.504,0.068-2.135
	V35.036L35.686,35.036L35.686,35.036L35.686,35.036L35.686,35.036L35.686,35.036L35.686,35.036z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#1D1D1B" d="M45.958,41.853c0.019-1.456-0.493-2.223-1.129-2.223
	c-0.819,0-1.203,1.188-1.249,2.223H45.958L45.958,41.853L45.958,41.853L45.958,41.853L45.958,41.853z M43.571,43.017
	c0.016,2.119,0.928,2.635,1.887,2.635c0.591,0,1.088-0.138,1.439-0.301l0.24,1.17c-0.494,0.248-1.256,0.393-1.973,0.393
	c-2.073,0-3.172-1.575-3.172-4.123c0-2.715,1.246-4.384,2.963-4.384c1.721,0,2.52,1.653,2.52,3.731c0,0.414-0.016,0.67-0.04,0.887
	L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017L43.571,43.017z"/>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#B2B1B1" d="M49.925,10.912c0-0.524,0-1.036,0-1.529h-7.589v1.529H49.925
	L49.925,10.912L49.925,10.912z"/>
</svg>
\rules except wikilink

<$button class="cpfadeable">{{$:/core/images/preview-open}}&nbsp;MultiMedia</$button>
<$button tooltip="View the next paper">
Next {{$:/core/images/right-arrow}}
<$action-navigate $to={{!!next_paper}}/>
</$button>
<$button tooltip="View the next session">
Next {{$:/core/images/right-arrow}}
<$action-navigate $to={{!!next_session_title}}/>
</$button>
@@.cppinktext ''NO&nbsp;PDF''@@
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<span class="cpicon cpfadeable cpw25px">{{$:/causal/images/pdficon02}}</span>
<$button tooltip="View the preceding paper">
{{$:/core/images/left-arrow}} Prev
<$action-navigate $to={{!!prev_paper}}/>
</$button>
<$button tooltip="View the preceding session">
{{$:/core/images/left-arrow}} Prev
<$action-navigate $to={{!!prev_session_title}}/>
</$button>
!!!Count of Tiddlers with subscript/superscript in title: <$count filter="[regexp[,,]] [regexp[\^\^]]"/>

List of Tiddlers with subscript/superscript in title:

<$list filter="[regexp[,,]] [regexp[\^\^]]"/>

!!!Count of Missing Tiddlers: <$count filter="[all[missing]sort[title]]"/>

List of Missing Tiddlers:

<$list filter="[all[missing]sort[title]]"/>

\rules except wikilink
Proceedings of the 20th Annual Conference of the International Speech Communication Association (INTERSPEECH 2019). ISSN 2308-457X. © 2019 International Speech Communication Association. All rights reserved.  For technical support please contact Causal Productions (info@causalproductions.com).
{{Session List}}
<$button tooltip="View the Session List">
{{$:/core/images/up-arrow}} Sessions
<$action-navigate $to="Session List"/>
</$button>
/*
 * CONFERENCE Paper abstract card
 */

.cpabstractcardauthorheading { font-size:1em; }

/* the following style is for the <div> that contains the author names (maybe multiline) and affiliation
names (maybe multiline). the 0.75em spaces it a bit from the button row that follows */
.cpabstractcardauthorarea { font-size:1em; line-height:1.15; margin-top:0.5em; margin-bottom:0.75em; }
/* the following style is for the <p> that contains the author names only */
p.cpabstractcardauthornames { font-style:normal; margin-top:0em; margin-bottom:0em; }
/* the following style is for the <p> that contains the affiliations only, the 0.25em separates it from the author names */
p.cpabstractcardaffiliationlist { font-style:italic; margin-top:0.25em; margin-bottom:0em; }
/* the abstract paragraph is the last thing on the tiddler so make the p bottom margin zero */
.cpabstractcardabstract { font-size:1em; line-height:1.15; }
.cpabstractcardabstract > p { margin-top:0.75em; margin-bottom:0em; }

/* the following style is for the <p> that contains the buttons in a single row.  The 0.5 spaces the rows close together. */
.cpbuttonrow > p { margin-top:0.5em; margin-bottom:0.5em; }

/* the following style is for the VIEW PDF button which might have a MULTIMEDIA button next to it.
Need separate <p> style "lineheightforbuttons" to avoid extra vertical space due to line-height, and the <span>
is needed to keep the hover area confined with the buttons and not full width.  The hover
tooltip is vertically sized by the line-height of the span. */
.lineheightforbuttons { line-height:1em; }
.cpabscardpdfandmediabutton { display:inline-flex;align-items:flex-start;line-height:1.5em; }
.cpaidxlinkrowstyle { width:30px;text-align:left;padding-left:0;margin-left:0; }
/* the following style is based on the normal table top margin of 1em, with margin-top
reduced to 0.5em because the link row table is borderless so it needs to be moved a bit
closer to surrounding elements. The bottom margin is zero because that is the end of the tiddler. */
.cpaidxlinkrowtable { margin-top:0.5em; margin-bottom:0em; }
.cpaidxlinkrowtable td { padding-left:0em; padding-right:1em; }
/*
 * CONFERENCE Author Index List tiddler styles
 */

/* the author list is a borderless table so reduce margin-top to 0.5em to make the vertical whitespace appear
consistent with bordered tables.  Bottom margin is set to zero because that is the end of the tiddler. */
.cpauthorindexlisttable { margin-top:0.5em; margin-bottom:0em; }
/* the next line ensures all td elements within a .cpsesslisttable have zero left-right padding
and I include the font and line-height definition to avoid adding more structure elements */
.cpauthorindexlisttable td { padding-left:0em; padding-right:0em; font-size:1em; line-height:1.5; }
/*
 * CONFERENCE Author Index Person Card
 */

/* the following style is for the author paper table.  0.75em at top is standard
for all our bordered tables, and 0em at bottom because it is the end of the tiddler */
.cpaidxauthortable { margin-top:1em; margin-bottom:0em; }

/* the following styles are used within the table */
.cpauthorindexpersoncardauthorname { font-size:1em; font-weight:bold; }
.cpauthorindexpersoncardconferencename { font-size:1em; font-weight:bold; }
.cpauthorindexpersoncardpapercode { font-size:1em; line-height:1.15; white-space:nowrap; }
.cpauthorindexpersoncardpapertitle { font-size:1em; line-height:1.15; }
/*
 * Global change to TIDDLYWIKI built-in styles
 */

/* make the titlebar smaller.  This affects the tiddler title, and the 3 control buttons on top right
 */
.tc-titlebar { font-size:1.2em; }

/* the margin-bottom spec in the next class allows vertical space between tiddler title and body to close
 */
.tc-titlebar h2 { font-weight: bold; margin-bottom:0.5em; }

/* the tiddler body begins with a <p> so the top margin contributes to the space between title and body.
The following selector selects the first child <p> of the tiddler-body and sets the top/bottom margin to
a minimum value, which can be extended in cases such as the abstract card author list.
 */
.tc-tiddler-body > p { margin-top:0.5em; margin-bottom:0.5em; }

/* the following makes the tags wrapper disappear, allowing the vertical space between tiddler title and
tiddler body to close.
 */
.tc-tags-wrapper { display: none; }

\rules except wikilink
.cpwelcomepagespaceaboveiconwithconferencename { padding-top:0.75em; }
.cpwelcomepagespaceaboveiconwithoutconferencename { padding-top:0.0em; }

/* the following styles force the conference logos to lose their descender padding due
to the line-height of the parent */
.cpwelcomepagespaceaboveiconwithconferencename > img { display:block; }
.cpwelcomepagespaceaboveiconwithoutconferencename > img { display:block; }

.icon_size_on_welcome_page { width:250px; }
/* the confinfo page table is borderess so reduce the top margin a bit to make it consistent
with other tiddlers.  Bottom margin is set to zero because that is the end of the tiddler. */
.cpconfinfotable { margin-top:1em; margin-bottom:0em; }
.cpconfinfotable td { padding-left:0em; padding-bottom:0.5em; }
.cpconfinfotable tr:last-child td { padding-bottom:0em; }
/* the following style is used for <a> elements surrounding buttons, to ensure that
the text inside the button does not cause a mysterious underline character to appear between
buttons on the same line, and force the text color to black instead of normal link blue.
Note that the TW text colour is not black but 51^3. */
a.externallinkbutton { color: rgb(51,51,51); text-decoration: none; } 

/* the following reveals and styles allow buttons and table cells with class
"cpfadeable" to be faded when turned off.  Specifically, PDF and MEDIA link
buttons can be switched off, resulting in not clickable links (can still
be tabbed and entered but ignore this), and faded appearance */

<$reveal type="match" state="$:/causal/config/hidePDFandMEDIA" text="hide">
a.externallinkbutton {
pointer-events: none;
cursor: default;
}
.cpfadeable {
opacity: 0.33;
}
.cpabscardpdfandmediabutton:hover::after, .cpaidxauthortable td:first-child:hover::after, .cpconfinfotable td:first-child:hover::after, .cpsessionviewtable td:first-child:hover::after {
display: inline;
position: absolute;
border: 1px solid #ccc;
border-radius: 4px;
box-shadow: 1px 1px 4px #000;
background-color: #fff;
margin-left: 5px;
margin-top: -25px;
padding: 3px;
opacity: 1;
}
.cpabscardpdfandmediabutton::after, .cpaidxauthortable td:first-child::after, .cpconfinfotable td:first-child::after, .cpsessionviewtable td:first-child::after {
content: "PDF+MEDIA files are only available in the final proceedings";
opacity: 1;
}
.cpabscardpdfandmediabutton::after, .cpaidxauthortable td:first-child::after, .cpconfinfotable td:first-child::after, .cpsessionviewtable td:first-child::after {
display: none;
}
</$reveal>
<$reveal type="match" state="$:/causal/config/hidePDFandMEDIA" text="show">
.cpfadeable {
opacity: 1;
}
</$reveal>
.cpconferencedisambiguator { font-size:1.12em; font-weight:bold; }
.cpprevnextanchortext { font-size:1.12em; font-weight:bold; }
.cpredtext { color:red; }
.cppinktext { color:#FFB0B0; }
.cpcenter { text-align:center; }
.cpmailingaddress { padding-left:2em; }

.cptightlineheight { line-height:1.15; }
.cpemabovezerobelow { margin-top:1em; margin-bottom:0em; }

.cpcopyrightpage { line-height:1.15; margin-top:0.75em; margin-bottom:0em; }
.cpsupportpage   { line-height:1.15; margin-top:0.75em; margin-bottom:0em; }
.cpsupportpagetable { margin-top:1em; margin-bottom:0em; }

/* the following causes cpicon to have no line-height, otherwise the icons
get a descender margin below the icon caused by the font style of the parent */
.cpicon > img { display: block; }

.cpw25px > img { width:25px; }

/* the following is used in the session view to force a minimum width for the pdf icon column, using @@ ... @@ syntax */
.pdficonintable { display:block;width:30px; }
/*
 * CONFERENCE Session List tiddler styles
 */

/* the session list is a borderess table so reduce the margin-top to 0.5em to make it consistent
with bordered tables.  Bottom margin is set to zero because that is the end of the tiddler. */
.cpsessionlisttable { margin-top:0.5em; margin-bottom:0em; }
/* the next line ensures all td elements within a .cpsesslisttable have zero left-right padding */
.cpsessionlisttable td { padding-left:0em; padding-right:0.5em; }

/* note that in session list table, the vertical alignment of table cells must be done
using TW5 operators and not CSS.  Operators such as display:flex and align-content:flex-start do not seem to work. */
.cpsessionlistsessioncode { font-size:1em; line-height:1.15; white-space:nowrap; }
.cpsessionlistsessionname { font-size:1em; line-height:1.15; }
/*
 * CONFERENCE Session View tiddler styles
 */

/* the following style adds a bit of space above and below table row to separate cell text from rulers */
table.cpsessionviewtable { margin-top:0.75em; margin-bottom:0em; }

/* the following styles are for entries within the session view table */
.cpsessionviewpapercode  { font-size:1em; line-height:1.15; white-space:nowrap; }
.cpsessionviewpapertitle { font-size:1em; line-height:1.15; }
.cpsessionviewpaperauthor { font-size:1em;font-style:italic;line-height:1.15; }

.cpsessionviewmetadata { font-size:1em; line-height:1.15; }
.cpsessionviewmetadata table { margin-top:0.6em; margin-bottom:0.75em; }
.cpsessionviewmetadata tr:first-child td:first-child { padding-bottom:0.2em; } /* make the padding 0.2em on the bottom of top left cell, to space this row a bit more from subsequent rows */
.cpsessionviewmetadata td { padding-left:0px; padding-right:0px; }
.cpsessionviewmetadata td:first-child { width:1px; white-space: nowrap; } /* ensure that 'chairs:' column is just wide enough for the word */
/* the following class is used to make borderless tables */
.cpborderless,
.cpborderless table,
.cpborderless td,
.cpborderless tr,
.cpborderless th,
.cpborderless tbody { border:0 !important; }

/* the following class essentially defines the visual appearance of H2 headers, for use
in tables where tiddler !! syntax does not work.  For all header style definitions see w3schools
or t287/00_gv.txt */
.cph2 { display: block; font-size: 1.5em; margin-top: 0.83em; margin-bottom: 0.83em; margin-left: 0; margin-right: 0; font-weight: bold; }
.cph3 { display: block; font-size: 1.0em; margin-top: 0.83em; margin-bottom: 0.83em; margin-left: 0; margin-right: 0; font-weight: bold; }

/* the following allows tables to have extra space between content and row divider rules */
.cptablecelltopbottomspace1 td { padding-top:0.1em; padding-bottom:0.1em; }
.cptablecelltopbottomspace2 td { padding-top:0.2em; padding-bottom:0.2em; }
.cptablecelltopbottomspace3 td { padding-top:0.3em; padding-bottom:0.3em; }
/*
 * Welcome Page tiddler styles
 */
/* width of svg logo for the whole publication */
.cppublicationsvg { width:TODO_publication_welcomeartwork_displaywidth; }
.cppublicationname { font-weight:bold;font-size:1.3em; }
.cppublicationdatevenue {
font-size:1.1em;
display:flex;
justify-content:space-between;
}

/* each individual conference in the publication is named in the following style */
.cpwelcomepageconferencename { font-weight:bold;line-height:1.2; }

/* the following style is for the publication header which is a table with icon in left cell
 and conference name and date/venue in right cell.  We need to have a small top margin to separate
 from the tiddler title.
*/
.cpwelcomepagepublicationtable,
.cpwelcomepagepublicationtable td { margin-top:1em; margin-bottom:0px; padding-top:0px; padding-bottom:0px; }

/* the following style is for a table which contains a per-conference row with icon in left cell, and major
headings in right cell such as preface, session list, author index.  We want all margins to be zero so it
can butt up to its vertical neighbours efficiently.
*/
.cpwelcomepageconferencetable,
.cpwelcomepageconferencetable td { margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px; }

/* the copyright message is displayed in tiny font on the welcome page.  To make it readable the user can click on the COPYRIGHT STATEMENT heading to see the text in a readable tiddler */
.cpwelcomepagecopyright { display: block; font-size: 0.5em; margin-top: 0.1em; margin-bottom: 0.1em; margin-left: 0; margin-right: 0; font-weight: bold; line-height:1.5em; }

/* the following style is applied to the conference information, session list, and author index links.
TW mandates that the links be blue, and not bold, so specifying these in the following style will have
no effect.  We can control font size, italic, and other parameters which will work correctly. */
.cpwelcomepageconferencelinks {}
\rules except wikilink

<$button>{{$:/core/images/preview-open}}&nbsp;View&nbsp;Folder</$button>
\rules except wikilink

<$checkbox tiddler="$:/state/causal" field="view multimedia list" checked="yes" unchecked="no" default="no"> View MultiMedia list</$checkbox>
<a href={{!!pdf_file_full_name}} class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in abscard view}}</a>
<$button tooltip="View the top level Welcome Page">
{{$:/core/images/up-arrow}} Welcome
<$action-navigate $to="Welcome Page"/>
</$button>
hide
show
hide
hide
hide
hide
{
    "tiddlers": {
        "$:/Acknowledgements": {
            "title": "$:/Acknowledgements",
            "type": "text/vnd.tiddlywiki",
            "text": "TiddlyWiki incorporates code from these fine OpenSource projects:\n\n* [[The Stanford Javascript Crypto Library|http://bitwiseshiftleft.github.io/sjcl/]]\n* [[The Jasmine JavaScript Test Framework|http://pivotal.github.io/jasmine/]]\n* [[Normalize.css by Nicolas Gallagher|http://necolas.github.io/normalize.css/]]\n\nAnd media from these projects:\n\n* World flag icons from [[Wikipedia|http://commons.wikimedia.org/wiki/Category:SVG_flags_by_country]]\n"
        },
        "$:/core/copyright.txt": {
            "title": "$:/core/copyright.txt",
            "type": "text/plain",
            "text": "TiddlyWiki created by Jeremy Ruston, (jeremy [at] jermolene [dot] com)\n\nCopyright © Jeremy Ruston 2004-2007\nCopyright © UnaMesa Association 2007-2016\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this\nlist of conditions and the following disclaimer in the documentation and/or other\nmaterials provided with the distribution.\n\nNeither the name of the UnaMesa Association nor the names of its contributors may be\nused to endorse or promote products derived from this software without specific\nprior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\nSHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\nANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"
        },
        "$:/core/icon": {
            "title": "$:/core/icon",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\"><path d=\"M64 0l54.56 32v64L64 128 9.44 96V32L64 0zm21.127 95.408c-3.578-.103-5.15-.094-6.974-3.152l-1.42.042c-1.653-.075-.964-.04-2.067-.097-1.844-.07-1.548-1.86-1.873-2.8-.52-3.202.687-6.43.65-9.632-.014-1.14-1.593-5.17-2.157-6.61-1.768.34-3.546.406-5.34.497-4.134-.01-8.24-.527-12.317-1.183-.8 3.35-3.16 8.036-1.21 11.44 2.37 3.52 4.03 4.495 6.61 4.707 2.572.212 3.16 3.18 2.53 4.242-.55.73-1.52.864-2.346 1.04l-1.65.08c-1.296-.046-2.455-.404-3.61-.955-1.93-1.097-3.925-3.383-5.406-5.024.345.658.55 1.938.24 2.53-.878 1.27-4.665 1.26-6.4.47-1.97-.89-6.73-7.162-7.468-11.86 1.96-3.78 4.812-7.07 6.255-11.186-3.146-2.05-4.83-5.384-4.61-9.16l.08-.44c-3.097.59-1.49.37-4.82.628-10.608-.032-19.935-7.37-14.68-18.774.34-.673.664-1.287 1.243-.994.466.237.4 1.18.166 2.227-3.005 13.627 11.67 13.732 20.69 11.21.89-.25 2.67-1.936 3.905-2.495 2.016-.91 4.205-1.282 6.376-1.55 5.4-.63 11.893 2.276 15.19 2.37 3.3.096 7.99-.805 10.87-.615 2.09.098 4.143.483 6.16 1.03 1.306-6.49 1.4-11.27 4.492-12.38 1.814.293 3.213 2.818 4.25 4.167 2.112-.086 4.12.46 6.115 1.066 3.61-.522 6.642-2.593 9.833-4.203-3.234 2.69-3.673 7.075-3.303 11.127.138 2.103-.444 4.386-1.164 6.54-1.348 3.507-3.95 7.204-6.97 7.014-1.14-.036-1.805-.695-2.653-1.4-.164 1.427-.81 2.7-1.434 3.96-1.44 2.797-5.203 4.03-8.687 7.016-3.484 2.985 1.114 13.65 2.23 15.594 1.114 1.94 4.226 2.652 3.02 4.406-.37.58-.936.785-1.54 1.01l-.82.11zm-40.097-8.85l.553.14c.694-.27 2.09.15 2.83.353-1.363-1.31-3.417-3.24-4.897-4.46-.485-1.47-.278-2.96-.174-4.46l.02-.123c-.582 1.205-1.322 2.376-1.72 3.645-.465 1.71 2.07 3.557 3.052 4.615l.336.3z\" fill-rule=\"evenodd\"/></svg>"
        },
        "$:/core/images/advanced-search-button": {
            "title": "$:/core/images/advanced-search-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-advanced-search-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M74.5651535,87.9848361 C66.9581537,93.0488876 57.8237115,96 48,96 C21.490332,96 0,74.509668 0,48 C0,21.490332 21.490332,0 48,0 C74.509668,0 96,21.490332 96,48 C96,57.8541369 93.0305793,67.0147285 87.9377231,74.6357895 L122.284919,108.982985 C125.978897,112.676963 125.973757,118.65366 122.284271,122.343146 C118.593975,126.033442 112.613238,126.032921 108.92411,122.343793 L74.5651535,87.9848361 Z M48,80 C65.673112,80 80,65.673112 80,48 C80,30.326888 65.673112,16 48,16 C30.326888,16 16,30.326888 16,48 C16,65.673112 30.326888,80 48,80 Z\"></path>\n        <circle cx=\"48\" cy=\"48\" r=\"8\"></circle>\n        <circle cx=\"28\" cy=\"48\" r=\"8\"></circle>\n        <circle cx=\"68\" cy=\"48\" r=\"8\"></circle>\n    </g>\n</svg>"
        },
        "$:/core/images/auto-height": {
            "title": "$:/core/images/auto-height",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-auto-height tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path d=\"M67.9867828,114.356363 L67.9579626,99.8785426 C67.9550688,98.4248183 67.1636987,97.087107 65.8909901,96.3845863 L49.9251455,87.5716209 L47.992126,95.0735397 L79.8995411,95.0735397 C84.1215894,95.0735397 85.4638131,89.3810359 81.686497,87.4948823 L49.7971476,71.5713518 L48.0101917,79.1500092 L79.992126,79.1500092 C84.2093753,79.1500092 85.5558421,73.4676733 81.7869993,71.5753162 L49.805065,55.517008 L48.0101916,63.0917009 L79.9921259,63.0917015 C84.2035118,63.0917016 85.5551434,57.4217887 81.7966702,55.5218807 L65.7625147,47.4166161 L67.9579705,50.9864368 L67.9579705,35.6148245 L77.1715737,44.8284272 C78.7336709,46.3905243 81.2663308,46.3905243 82.8284279,44.8284271 C84.390525,43.2663299 84.390525,40.7336699 82.8284278,39.1715728 L66.8284271,23.1715728 C65.2663299,21.6094757 62.73367,21.6094757 61.1715729,23.1715729 L45.1715729,39.1715729 C43.6094757,40.73367 43.6094757,43.26633 45.1715729,44.8284271 C46.73367,46.3905243 49.26633,46.3905243 50.8284271,44.8284271 L59.9579705,35.6988837 L59.9579705,50.9864368 C59.9579705,52.495201 60.806922,53.8755997 62.1534263,54.5562576 L78.1875818,62.6615223 L79.9921261,55.0917015 L48.0101917,55.0917009 C43.7929424,55.0917008 42.4464755,60.7740368 46.2153183,62.6663939 L78.1972526,78.7247021 L79.992126,71.1500092 L48.0101917,71.1500092 C43.7881433,71.1500092 42.4459197,76.842513 46.2232358,78.7286665 L78.1125852,94.6521971 L79.8995411,87.0735397 L47.992126,87.0735397 C43.8588276,87.0735397 42.4404876,92.5780219 46.0591064,94.5754586 L62.024951,103.388424 L59.9579785,99.8944677 L59.9867142,114.32986 L50.8284271,105.171573 C49.26633,103.609476 46.73367,103.609476 45.1715729,105.171573 C43.6094757,106.73367 43.6094757,109.26633 45.1715729,110.828427 L61.1715729,126.828427 C62.73367,128.390524 65.2663299,128.390524 66.8284271,126.828427 L82.8284278,110.828427 C84.390525,109.26633 84.390525,106.73367 82.8284279,105.171573 C81.2663308,103.609476 78.7336709,103.609476 77.1715737,105.171573 L67.9867828,114.356363 L67.9867828,114.356363 Z M16,20 L112,20 C114.209139,20 116,18.209139 116,16 C116,13.790861 114.209139,12 112,12 L16,12 C13.790861,12 12,13.790861 12,16 C12,18.209139 13.790861,20 16,20 L16,20 Z\"></path>\n</svg>"
        },
        "$:/core/images/blank": {
            "title": "$:/core/images/blank",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-blank tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\"></svg>"
        },
        "$:/core/images/bold": {
            "title": "$:/core/images/bold",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-bold tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M41.1456583,51.8095238 L41.1456583,21.8711485 L67.4985994,21.8711485 C70.0084159,21.8711485 72.4285598,22.0802967 74.7591036,22.4985994 C77.0896475,22.9169022 79.1512515,23.6638602 80.9439776,24.7394958 C82.7367036,25.8151314 84.170863,27.3090474 85.2464986,29.2212885 C86.3221342,31.1335296 86.859944,33.5835518 86.859944,36.5714286 C86.859944,41.9496067 85.2465147,45.8337882 82.0196078,48.2240896 C78.792701,50.614391 74.6694929,51.8095238 69.6498599,51.8095238 L41.1456583,51.8095238 Z M13,0 L13,128 L75.0280112,128 C80.7647346,128 86.3519803,127.28292 91.789916,125.848739 C97.2278517,124.414559 102.068139,122.203563 106.310924,119.215686 C110.553709,116.22781 113.929959,112.373506 116.439776,107.652661 C118.949592,102.931816 120.204482,97.3445701 120.204482,90.8907563 C120.204482,82.8832466 118.262391,76.0411115 114.378151,70.3641457 C110.493911,64.6871798 104.607883,60.7133634 96.719888,58.442577 C102.456611,55.6937304 106.788968,52.1680887 109.717087,47.8655462 C112.645206,43.5630037 114.109244,38.1849062 114.109244,31.7310924 C114.109244,25.7553389 113.123259,20.7357813 111.151261,16.6722689 C109.179262,12.6087565 106.400578,9.35201972 102.815126,6.90196078 C99.2296739,4.45190185 94.927196,2.68908101 89.907563,1.61344538 C84.8879301,0.537809748 79.3305627,0 73.2352941,0 L13,0 Z M41.1456583,106.128852 L41.1456583,70.9915966 L71.8011204,70.9915966 C77.896389,70.9915966 82.7964334,72.3958776 86.5014006,75.2044818 C90.2063677,78.0130859 92.0588235,82.7039821 92.0588235,89.2773109 C92.0588235,92.6237329 91.4911355,95.3725383 90.3557423,97.5238095 C89.2203491,99.6750808 87.6965548,101.378145 85.7843137,102.633053 C83.8720726,103.887961 81.661077,104.784311 79.1512605,105.322129 C76.641444,105.859947 74.0121519,106.128852 71.2633053,106.128852 L41.1456583,106.128852 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/cancel-button": {
            "title": "$:/core/images/cancel-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-cancel-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n\t<g fill-rule=\"evenodd\">\n\t    <path d=\"M64,76.3137085 L47.0294734,93.2842351 C43.9038742,96.4098343 38.8399231,96.4084656 35.7157288,93.2842712 C32.5978915,90.166434 32.5915506,85.0947409 35.7157649,81.9705266 L52.6862915,65 L35.7157649,48.0294734 C32.5901657,44.9038742 32.5915344,39.8399231 35.7157288,36.7157288 C38.833566,33.5978915 43.9052591,33.5915506 47.0294734,36.7157649 L64,53.6862915 L80.9705266,36.7157649 C84.0961258,33.5901657 89.1600769,33.5915344 92.2842712,36.7157288 C95.4021085,39.833566 95.4084494,44.9052591 92.2842351,48.0294734 L75.3137085,65 L92.2842351,81.9705266 C95.4098343,85.0961258 95.4084656,90.1600769 92.2842712,93.2842712 C89.166434,96.4021085 84.0947409,96.4084494 80.9705266,93.2842351 L64,76.3137085 Z M64,129 C99.346224,129 128,100.346224 128,65 C128,29.653776 99.346224,1 64,1 C28.653776,1 1.13686838e-13,29.653776 1.13686838e-13,65 C1.13686838e-13,100.346224 28.653776,129 64,129 Z M64,113 C90.509668,113 112,91.509668 112,65 C112,38.490332 90.509668,17 64,17 C37.490332,17 16,38.490332 16,65 C16,91.509668 37.490332,113 64,113 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/chevron-down": {
            "title": "$:/core/images/chevron-down",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-down tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n\t<g fill-rule=\"evenodd\" transform=\"translate(64.000000, 40.500000) rotate(-270.000000) translate(-64.000000, -40.500000) translate(-22.500000, -26.500000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n\t</g>\n</svg>"
        },
        "$:/core/images/chevron-left": {
            "title": "$:/core/images/chevron-left",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-left tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\" version=\"1.1\">\n    <g fill-rule=\"evenodd\" transform=\"translate(92.500000, 64.000000) rotate(-180.000000) translate(-92.500000, -64.000000) translate(6.000000, -3.000000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/chevron-right": {
            "title": "$:/core/images/chevron-right",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-right tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\" transform=\"translate(-48.000000, -3.000000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/chevron-up": {
            "title": "$:/core/images/chevron-up",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-chevron-up tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n\t<g fill-rule=\"evenodd\" transform=\"translate(64.000000, 89.500000) rotate(-90.000000) translate(-64.000000, -89.500000) translate(-22.500000, 22.500000)\">\n        <path d=\"M112.743107,112.12741 C111.310627,113.561013 109.331747,114.449239 107.145951,114.449239 L27.9777917,114.449239 C23.6126002,114.449239 20.0618714,110.904826 20.0618714,106.532572 C20.0618714,102.169214 23.6059497,98.6159054 27.9777917,98.6159054 L99.2285381,98.6159054 L99.2285381,27.365159 C99.2285381,22.9999675 102.77295,19.4492387 107.145205,19.4492387 C111.508562,19.4492387 115.061871,22.993317 115.061871,27.365159 L115.061871,106.533318 C115.061871,108.71579 114.175869,110.694669 112.743378,112.127981 Z\" transform=\"translate(67.561871, 66.949239) rotate(-45.000000) translate(-67.561871, -66.949239) \"></path>\n        <path d=\"M151.35638,112.12741 C149.923899,113.561013 147.94502,114.449239 145.759224,114.449239 L66.5910645,114.449239 C62.225873,114.449239 58.6751442,110.904826 58.6751442,106.532572 C58.6751442,102.169214 62.2192225,98.6159054 66.5910645,98.6159054 L137.841811,98.6159054 L137.841811,27.365159 C137.841811,22.9999675 141.386223,19.4492387 145.758478,19.4492387 C150.121835,19.4492387 153.675144,22.993317 153.675144,27.365159 L153.675144,106.533318 C153.675144,108.71579 152.789142,110.694669 151.356651,112.127981 Z\" transform=\"translate(106.175144, 66.949239) rotate(-45.000000) translate(-106.175144, -66.949239) \"></path>\n\t</g>\n</svg>"
        },
        "$:/core/images/clone-button": {
            "title": "$:/core/images/clone-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-clone-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M32.2650915,96 L32.2650915,120.002359 C32.2650915,124.419334 35.8432884,128 40.2627323,128 L120.002359,128 C124.419334,128 128,124.421803 128,120.002359 L128,40.2627323 C128,35.8457573 124.421803,32.2650915 120.002359,32.2650915 L96,32.2650915 L96,48 L108.858899,48 C110.519357,48 111.853018,49.3405131 111.853018,50.9941198 L111.853018,108.858899 C111.853018,110.519357 110.512505,111.853018 108.858899,111.853018 L50.9941198,111.853018 C49.333661,111.853018 48,110.512505 48,108.858899 L48,96 L32.2650915,96 Z\"></path>\n        <path d=\"M40,56 L32.0070969,56 C27.5881712,56 24,52.418278 24,48 C24,43.5907123 27.5848994,40 32.0070969,40 L40,40 L40,32.0070969 C40,27.5881712 43.581722,24 48,24 C52.4092877,24 56,27.5848994 56,32.0070969 L56,40 L63.9929031,40 C68.4118288,40 72,43.581722 72,48 C72,52.4092877 68.4151006,56 63.9929031,56 L56,56 L56,63.9929031 C56,68.4118288 52.418278,72 48,72 C43.5907123,72 40,68.4151006 40,63.9929031 L40,56 Z M7.9992458,0 C3.58138434,0 0,3.5881049 0,7.9992458 L0,88.0007542 C0,92.4186157 3.5881049,96 7.9992458,96 L88.0007542,96 C92.4186157,96 96,92.4118951 96,88.0007542 L96,7.9992458 C96,3.58138434 92.4118951,0 88.0007542,0 L7.9992458,0 Z M19.0010118,16 C17.3435988,16 16,17.336731 16,19.0010118 L16,76.9989882 C16,78.6564012 17.336731,80 19.0010118,80 L76.9989882,80 C78.6564012,80 80,78.663269 80,76.9989882 L80,19.0010118 C80,17.3435988 78.663269,16 76.9989882,16 L19.0010118,16 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/close-all-button": {
            "title": "$:/core/images/close-all-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-close-all-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\" transform=\"translate(-23.000000, -23.000000)\">\n        <path d=\"M43,131 L22.9976794,131 C18.5827987,131 15,127.418278 15,123 C15,118.590712 18.5806831,115 22.9976794,115 L43,115 L43,94.9976794 C43,90.5827987 46.581722,87 51,87 C55.4092877,87 59,90.5806831 59,94.9976794 L59,115 L79.0023206,115 C83.4172013,115 87,118.581722 87,123 C87,127.409288 83.4193169,131 79.0023206,131 L59,131 L59,151.002321 C59,155.417201 55.418278,159 51,159 C46.5907123,159 43,155.419317 43,151.002321 L43,131 Z\" transform=\"translate(51.000000, 123.000000) rotate(-45.000000) translate(-51.000000, -123.000000) \"></path>\n        <path d=\"M43,59 L22.9976794,59 C18.5827987,59 15,55.418278 15,51 C15,46.5907123 18.5806831,43 22.9976794,43 L43,43 L43,22.9976794 C43,18.5827987 46.581722,15 51,15 C55.4092877,15 59,18.5806831 59,22.9976794 L59,43 L79.0023206,43 C83.4172013,43 87,46.581722 87,51 C87,55.4092877 83.4193169,59 79.0023206,59 L59,59 L59,79.0023206 C59,83.4172013 55.418278,87 51,87 C46.5907123,87 43,83.4193169 43,79.0023206 L43,59 Z\" transform=\"translate(51.000000, 51.000000) rotate(-45.000000) translate(-51.000000, -51.000000) \"></path>\n        <path d=\"M115,59 L94.9976794,59 C90.5827987,59 87,55.418278 87,51 C87,46.5907123 90.5806831,43 94.9976794,43 L115,43 L115,22.9976794 C115,18.5827987 118.581722,15 123,15 C127.409288,15 131,18.5806831 131,22.9976794 L131,43 L151.002321,43 C155.417201,43 159,46.581722 159,51 C159,55.4092877 155.419317,59 151.002321,59 L131,59 L131,79.0023206 C131,83.4172013 127.418278,87 123,87 C118.590712,87 115,83.4193169 115,79.0023206 L115,59 Z\" transform=\"translate(123.000000, 51.000000) rotate(-45.000000) translate(-123.000000, -51.000000) \"></path>\n        <path d=\"M115,131 L94.9976794,131 C90.5827987,131 87,127.418278 87,123 C87,118.590712 90.5806831,115 94.9976794,115 L115,115 L115,94.9976794 C115,90.5827987 118.581722,87 123,87 C127.409288,87 131,90.5806831 131,94.9976794 L131,115 L151.002321,115 C155.417201,115 159,118.581722 159,123 C159,127.409288 155.419317,131 151.002321,131 L131,131 L131,151.002321 C131,155.417201 127.418278,159 123,159 C118.590712,159 115,155.419317 115,151.002321 L115,131 Z\" transform=\"translate(123.000000, 123.000000) rotate(-45.000000) translate(-123.000000, -123.000000) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/close-button": {
            "title": "$:/core/images/close-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-close-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M65.0864256,75.4091629 L14.9727349,125.522854 C11.8515951,128.643993 6.78104858,128.64922 3.65685425,125.525026 C0.539017023,122.407189 0.5336324,117.334539 3.65902635,114.209145 L53.7727171,64.0954544 L3.65902635,13.9817637 C0.537886594,10.8606239 0.532659916,5.79007744 3.65685425,2.6658831 C6.77469148,-0.451954124 11.8473409,-0.457338747 14.9727349,2.66805521 L65.0864256,52.7817459 L115.200116,2.66805521 C118.321256,-0.453084553 123.391803,-0.458311231 126.515997,2.6658831 C129.633834,5.78372033 129.639219,10.8563698 126.513825,13.9817637 L76.4001341,64.0954544 L126.513825,114.209145 C129.634965,117.330285 129.640191,122.400831 126.515997,125.525026 C123.39816,128.642863 118.32551,128.648248 115.200116,125.522854 L65.0864256,75.4091629 L65.0864256,75.4091629 Z\"></path>\n    </g>\n</svg>\n"
        },
        "$:/core/images/close-others-button": {
            "title": "$:/core/images/close-others-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-close-others-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64,128 C99.346224,128 128,99.346224 128,64 C128,28.653776 99.346224,0 64,0 C28.653776,0 0,28.653776 0,64 C0,99.346224 28.653776,128 64,128 Z M64,112 C90.509668,112 112,90.509668 112,64 C112,37.490332 90.509668,16 64,16 C37.490332,16 16,37.490332 16,64 C16,90.509668 37.490332,112 64,112 Z M64,96 C81.673112,96 96,81.673112 96,64 C96,46.326888 81.673112,32 64,32 C46.326888,32 32,46.326888 32,64 C32,81.673112 46.326888,96 64,96 Z M64,80 C72.836556,80 80,72.836556 80,64 C80,55.163444 72.836556,48 64,48 C55.163444,48 48,55.163444 48,64 C48,72.836556 55.163444,80 64,80 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/delete-button": {
            "title": "$:/core/images/delete-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-delete-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\" transform=\"translate(12.000000, 0.000000)\">\n        <rect x=\"0\" y=\"11\" width=\"105\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"28\" y=\"0\" width=\"48\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"8\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n        <rect x=\"8\" y=\"112\" width=\"88\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"80\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n        <rect x=\"56\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n        <rect x=\"32\" y=\"16\" width=\"16\" height=\"112\" rx=\"8\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/done-button": {
            "title": "$:/core/images/done-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-done-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M3.52445141,76.8322939 C2.07397484,75.3828178 1.17514421,73.3795385 1.17514421,71.1666288 L1.17514421,23.1836596 C1.17514421,18.7531992 4.75686621,15.1751442 9.17514421,15.1751442 C13.5844319,15.1751442 17.1751442,18.7606787 17.1751442,23.1836596 L17.1751442,63.1751442 L119.173716,63.1751442 C123.590457,63.1751442 127.175144,66.7568662 127.175144,71.1751442 C127.175144,75.5844319 123.592783,79.1751442 119.173716,79.1751442 L9.17657227,79.1751442 C6.96796403,79.1751442 4.9674142,78.279521 3.51911285,76.8315312 Z\" id=\"Rectangle-285\" transform=\"translate(64.175144, 47.175144) rotate(-45.000000) translate(-64.175144, -47.175144) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/down-arrow": {
            "title": "$:/core/images/down-arrow",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-down-arrow tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <path d=\"M109.35638,81.3533152 C107.923899,82.7869182 105.94502,83.6751442 103.759224,83.6751442 L24.5910645,83.6751442 C20.225873,83.6751442 16.6751442,80.1307318 16.6751442,75.7584775 C16.6751442,71.3951199 20.2192225,67.8418109 24.5910645,67.8418109 L95.8418109,67.8418109 L95.8418109,-3.40893546 C95.8418109,-7.77412698 99.3862233,-11.3248558 103.758478,-11.3248558 C108.121835,-11.3248558 111.675144,-7.78077754 111.675144,-3.40893546 L111.675144,75.7592239 C111.675144,77.9416955 110.789142,79.9205745 109.356651,81.3538862 Z\" transform=\"translate(64.175144, 36.175144) rotate(45.000000) translate(-64.175144, -36.175144) \"></path>\n</svg>"
        },
        "$:/core/images/download-button": {
            "title": "$:/core/images/download-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-download-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\"><g fill-rule=\"evenodd\"><path class=\"tc-image-download-button-ring\" d=\"M64,128 C99.346224,128 128,99.346224 128,64 C128,28.653776 99.346224,0 64,0 C28.653776,0 0,28.653776 0,64 C0,99.346224 28.653776,128 64,128 Z M64,112 C90.509668,112 112,90.509668 112,64 C112,37.490332 90.509668,16 64,16 C37.490332,16 16,37.490332 16,64 C16,90.509668 37.490332,112 64,112 Z\"/><path d=\"M34.3496823,66.4308767 L61.2415823,93.634668 C63.0411536,95.4551107 65.9588502,95.4551107 67.7584215,93.634668 L94.6503215,66.4308767 C96.4498928,64.610434 96.4498928,61.6588981 94.6503215,59.8384554 C93.7861334,58.9642445 92.6140473,58.4731195 91.3919019,58.4731195 L82.9324098,58.4731195 C80.3874318,58.4731195 78.3243078,56.3860674 78.3243078,53.8115729 L78.3243078,38.6615466 C78.3243078,36.0870521 76.2611837,34 73.7162058,34 L55.283798,34 C52.7388201,34 50.675696,36.0870521 50.675696,38.6615466 L50.675696,38.6615466 L50.675696,53.8115729 C50.675696,56.3860674 48.612572,58.4731195 46.0675941,58.4731195 L37.608102,58.4731195 C35.063124,58.4731195 33,60.5601716 33,63.134666 C33,64.3709859 33.4854943,65.5566658 34.3496823,66.4308767 L34.3496823,66.4308767 Z\"/></g></svg>"
        },
        "$:/core/images/edit-button": {
            "title": "$:/core/images/edit-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-edit-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M116.870058,45.3431458 L108.870058,45.3431458 L108.870058,45.3431458 L108.870058,61.3431458 L116.870058,61.3431458 L116.870058,45.3431458 Z M124.870058,45.3431458 L127.649881,45.3431458 C132.066101,45.3431458 135.656854,48.9248678 135.656854,53.3431458 C135.656854,57.7524334 132.07201,61.3431458 127.649881,61.3431458 L124.870058,61.3431458 L124.870058,45.3431458 Z M100.870058,45.3431458 L15.6638275,45.3431458 C15.5064377,45.3431458 15.3501085,45.3476943 15.1949638,45.3566664 L15.1949638,45.3566664 C15.0628002,45.3477039 14.928279,45.3431458 14.7913977,45.3431458 C6.68160973,45.3431458 -8.34314575,53.3431458 -8.34314575,53.3431458 C-8.34314575,53.3431458 6.85614548,61.3431458 14.7913977,61.3431458 C14.9266533,61.3431458 15.0596543,61.3384973 15.190398,61.3293588 C15.3470529,61.3385075 15.5049057,61.3431458 15.6638275,61.3431458 L100.870058,61.3431458 L100.870058,45.3431458 L100.870058,45.3431458 Z\" transform=\"translate(63.656854, 53.343146) rotate(-45.000000) translate(-63.656854, -53.343146) \"></path>\n        <path d=\"M35.1714596,124.189544 C41.9594858,123.613403 49.068777,121.917633 58.85987,118.842282 C60.6854386,118.268877 62.4306907,117.705515 65.1957709,116.802278 C81.1962861,111.575575 87.0734839,109.994907 93.9414474,109.655721 C102.29855,109.242993 107.795169,111.785371 111.520478,118.355045 C112.610163,120.276732 115.051363,120.951203 116.97305,119.861518 C118.894737,118.771832 119.569207,116.330633 118.479522,114.408946 C113.146151,105.003414 104.734907,101.112919 93.5468356,101.66546 C85.6716631,102.054388 79.4899908,103.716944 62.7116783,109.197722 C59.9734132,110.092199 58.2519873,110.64787 56.4625698,111.20992 C37.002649,117.322218 25.6914684,118.282267 16.8654804,112.957098 C14.9739614,111.815848 12.5154166,112.424061 11.3741667,114.31558 C10.2329168,116.207099 10.84113,118.665644 12.7326489,119.806894 C19.0655164,123.627836 26.4866335,124.926678 35.1714596,124.189544 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/erase": {
            "title": "$:/core/images/erase",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-erase tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M60.0870401,127.996166 L123.102318,64.980888 C129.636723,58.4464827 129.629513,47.8655877 123.098967,41.3350425 L99.4657866,17.7018617 C92.927448,11.1635231 82.3486358,11.1698163 75.8199411,17.698511 L4.89768189,88.6207702 C-1.63672343,95.1551755 -1.6295126,105.736071 4.90103262,112.266616 L20.6305829,127.996166 L60.0870401,127.996166 Z M25.1375576,120.682546 L10.812569,106.357558 C7.5455063,103.090495 7.54523836,97.793808 10.8048093,94.5342371 L46.2691086,59.0699377 L81.7308914,94.5317205 L55.5800654,120.682546 L25.1375576,120.682546 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/excise": {
            "title": "$:/core/images/excise",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-excise tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M56,107.313709 L53.6568542,109.656854 C50.5326599,112.781049 45.4673401,112.781049 42.3431457,109.656854 C39.2189514,106.53266 39.2189514,101.46734 42.3431458,98.3431457 L58.3431458,82.3431457 C61.4673401,79.2189514 66.5326599,79.2189514 69.6568542,82.3431458 L85.6568542,98.3431458 C88.7810486,101.46734 88.7810486,106.53266 85.6568542,109.656854 C82.5326599,112.781049 77.4673401,112.781049 74.3431458,109.656854 L72,107.313708 L72,121.597798 C72,125.133636 68.418278,128 64,128 C59.581722,128 56,125.133636 56,121.597798 L56,107.313709 Z M0,40.0070969 C0,35.5848994 3.59071231,32 8,32 C12.418278,32 16,35.5881712 16,40.0070969 L16,71.9929031 C16,76.4151006 12.4092877,80 8,80 C3.581722,80 0,76.4118288 0,71.9929031 L0,40.0070969 Z M32,40.0070969 C32,35.5848994 35.5907123,32 40,32 C44.418278,32 48,35.5881712 48,40.0070969 L48,71.9929031 C48,76.4151006 44.4092877,80 40,80 C35.581722,80 32,76.4118288 32,71.9929031 L32,40.0070969 Z M80,40.0070969 C80,35.5848994 83.5907123,32 88,32 C92.418278,32 96,35.5881712 96,40.0070969 L96,71.9929031 C96,76.4151006 92.4092877,80 88,80 C83.581722,80 80,76.4118288 80,71.9929031 L80,40.0070969 Z M56,8.00709688 C56,3.58489938 59.5907123,0 64,0 C68.418278,0 72,3.58817117 72,8.00709688 L72,39.9929031 C72,44.4151006 68.4092877,48 64,48 C59.581722,48 56,44.4118288 56,39.9929031 L56,8.00709688 Z M112,40.0070969 C112,35.5848994 115.590712,32 120,32 C124.418278,32 128,35.5881712 128,40.0070969 L128,71.9929031 C128,76.4151006 124.409288,80 120,80 C115.581722,80 112,76.4118288 112,71.9929031 L112,40.0070969 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/export-button": {
            "title": "$:/core/images/export-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-export-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00348646,127.999999 C8.00464867,128 8.00581094,128 8.00697327,128 L119.993027,128 C122.205254,128 124.207939,127.101378 125.657096,125.651198 L125.656838,125.65759 C127.104563,124.210109 128,122.21009 128,119.999949 L128,56.0000511 C128,51.5817449 124.409288,48 120,48 C115.581722,48 112,51.5797863 112,56.0000511 L112,112 L16,112 L16,56.0000511 C16,51.5817449 12.4092877,48 8,48 C3.581722,48 7.10542736e-15,51.5797863 7.10542736e-15,56.0000511 L7.10542736e-15,119.999949 C7.10542736e-15,124.418255 3.59071231,128 8,128 C8.00116233,128 8.0023246,128 8.00348681,127.999999 Z M56.6235633,27.3113724 L47.6580188,36.2769169 C44.5333664,39.4015692 39.4634864,39.4061295 36.339292,36.2819351 C33.2214548,33.1640979 33.2173444,28.0901742 36.3443103,24.9632084 L58.9616908,2.34582788 C60.5248533,0.782665335 62.5748436,0.000361191261 64.624516,2.38225238e-14 L64.6193616,0.00151809229 C66.6695374,0.000796251595 68.7211167,0.781508799 70.2854358,2.34582788 L92.9028163,24.9632084 C96.0274686,28.0878607 96.0320289,33.1577408 92.9078345,36.2819351 C89.7899973,39.3997724 84.7160736,39.4038827 81.5891078,36.2769169 L72.6235633,27.3113724 L72.6235633,88.5669606 C72.6235633,92.9781015 69.0418413,96.5662064 64.6235633,96.5662064 C60.2142756,96.5662064 56.6235633,92.984822 56.6235633,88.5669606 L56.6235633,27.3113724 L56.6235633,27.3113724 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/file": {
            "title": "$:/core/images/file",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-file tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"nonzero\">\n        <path d=\"M111.96811,30.5 L112,30.5 L112,119.999079 C112,124.417866 108.419113,128 104.000754,128 L23.9992458,128 C19.5813843,128 16,124.417687 16,119.999079 L16,8.00092105 C16,3.58213437 19.5808867,0 23.9992458,0 L81,0 L81,0.0201838424 C83.1589869,-0.071534047 85.3482153,0.707077645 86.9982489,2.35711116 L109.625176,24.9840387 C111.151676,26.510538 111.932942,28.4998414 111.96811,30.5 L111.96811,30.5 Z M81,8 L24,8 L24,120 L104,120 L104,30.5 L89.0003461,30.5 C84.5818769,30.5 81,26.9216269 81,22.4996539 L81,8 Z\"></path>\n        <rect x=\"32\" y=\"36\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"52\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"68\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"84\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"100\" width=\"64\" height=\"8\" rx=\"4\"></rect>\n        <rect x=\"32\" y=\"20\" width=\"40\" height=\"8\" rx=\"4\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/fixed-height": {
            "title": "$:/core/images/fixed-height",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fixed-height tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M60,35.6568542 L50.8284271,44.8284271 C49.26633,46.3905243 46.73367,46.3905243 45.1715729,44.8284271 C43.6094757,43.26633 43.6094757,40.73367 45.1715729,39.1715729 L61.1715729,23.1715729 C62.73367,21.6094757 65.2663299,21.6094757 66.8284271,23.1715728 L82.8284278,39.1715728 C84.390525,40.7336699 84.390525,43.2663299 82.8284279,44.8284271 C81.2663308,46.3905243 78.7336709,46.3905243 77.1715737,44.8284272 L68,35.6568539 L68,93.3431461 L77.1715737,84.1715728 C78.7336709,82.6094757 81.2663308,82.6094757 82.8284279,84.1715729 C84.390525,85.7336701 84.390525,88.2663301 82.8284278,89.8284272 L66.8284271,105.828427 C65.2663299,107.390524 62.73367,107.390524 61.1715729,105.828427 L45.1715729,89.8284271 C43.6094757,88.26633 43.6094757,85.73367 45.1715729,84.1715729 C46.73367,82.6094757 49.26633,82.6094757 50.8284271,84.1715729 L60,93.3431458 L60,35.6568542 L60,35.6568542 Z M16,116 L112,116 C114.209139,116 116,114.209139 116,112 C116,109.790861 114.209139,108 112,108 L16,108 C13.790861,108 12,109.790861 12,112 C12,114.209139 13.790861,116 16,116 L16,116 Z M16,20 L112,20 C114.209139,20 116,18.209139 116,16 C116,13.790861 114.209139,12 112,12 L16,12 C13.790861,12 12,13.790861 12,16 C12,18.209139 13.790861,20 16,20 L16,20 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/fold-all-button": {
            "title": "$:/core/images/fold-all-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fold-all tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"0\" y=\"64\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M64.0292774,58.6235628 C61.9791013,58.6242848 59.9275217,57.8435723 58.3632024,56.279253 L35.7458219,33.6618725 C32.6211696,30.5372202 32.6166093,25.4673401 35.7408036,22.3431458 C38.8586409,19.2253085 43.9325646,19.2211982 47.0595304,22.348164 L64.0250749,39.3137085 L80.9906194,22.348164 C84.1152717,19.2235117 89.1851518,19.2189514 92.3093461,22.3431458 C95.4271834,25.460983 95.4312937,30.5349067 92.3043279,33.6618725 L69.6869474,56.279253 C68.1237851,57.8424153 66.0737951,58.6247195 64.0241231,58.6250809 Z\" transform=\"translate(64.024316, 39.313708) scale(1, -1) translate(-64.024316, -39.313708) \"></path>\n        <path d=\"M64.0292774,123.621227 C61.9791013,123.621949 59.9275217,122.841236 58.3632024,121.276917 L35.7458219,98.6595365 C32.6211696,95.5348842 32.6166093,90.4650041 35.7408036,87.3408098 C38.8586409,84.2229725 43.9325646,84.2188622 47.0595304,87.345828 L64.0250749,104.311373 L80.9906194,87.345828 C84.1152717,84.2211757 89.1851518,84.2166154 92.3093461,87.3408098 C95.4271834,90.458647 95.4312937,95.5325707 92.3043279,98.6595365 L69.6869474,121.276917 C68.1237851,122.840079 66.0737951,123.622383 64.0241231,123.622745 Z\" transform=\"translate(64.024316, 104.311372) scale(1, -1) translate(-64.024316, -104.311372) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/fold-button": {
            "title": "$:/core/images/fold-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fold tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M64.0292774,63.6235628 C61.9791013,63.6242848 59.9275217,62.8435723 58.3632024,61.279253 L35.7458219,38.6618725 C32.6211696,35.5372202 32.6166093,30.4673401 35.7408036,27.3431458 C38.8586409,24.2253085 43.9325646,24.2211982 47.0595304,27.348164 L64.0250749,44.3137085 L80.9906194,27.348164 C84.1152717,24.2235117 89.1851518,24.2189514 92.3093461,27.3431458 C95.4271834,30.460983 95.4312937,35.5349067 92.3043279,38.6618725 L69.6869474,61.279253 C68.1237851,62.8424153 66.0737951,63.6247195 64.0241231,63.6250809 Z\" transform=\"translate(64.024316, 44.313708) scale(1, -1) translate(-64.024316, -44.313708) \"></path>\n        <path d=\"M64.0049614,105.998482 C61.9547853,105.999204 59.9032057,105.218491 58.3388864,103.654172 L35.7215059,81.0367916 C32.5968535,77.9121393 32.5922933,72.8422592 35.7164876,69.7180649 C38.8343248,66.6002276 43.9082485,66.5961173 47.0352144,69.7230831 L64.0007589,86.6886276 L80.9663034,69.7230831 C84.0909557,66.5984308 89.1608358,66.5938705 92.2850301,69.7180649 C95.4028673,72.8359021 95.4069777,77.9098258 92.2800119,81.0367916 L69.6626314,103.654172 C68.099469,105.217334 66.0494791,105.999639 63.999807,106 Z\" transform=\"translate(64.000000, 86.688628) scale(1, -1) translate(-64.000000, -86.688628) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/fold-others-button": {
            "title": "$:/core/images/fold-others-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-fold-others tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"56.0314331\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M101.657101,104.948818 C100.207918,103.498614 98.2051847,102.599976 95.9929031,102.599976 L72,102.599976 L72,78.6070725 C72,76.3964271 71.1036108,74.3936927 69.6545293,72.9441002 L69.6571005,72.9488183 C68.2079177,71.4986143 66.2051847,70.5999756 63.9929031,70.5999756 L32.0070969,70.5999756 C27.5881712,70.5999756 24,74.1816976 24,78.5999756 C24,83.0092633 27.5848994,86.5999756 32.0070969,86.5999756 L56,86.5999756 L56,110.592879 C56,112.803524 56.8963895,114.806259 58.3454713,116.255852 L58.3429,116.251133 C59.7920828,117.701337 61.7948156,118.599976 64.0070969,118.599976 L88,118.599976 L88,142.592879 C88,147.011804 91.581722,150.599976 96,150.599976 C100.409288,150.599976 104,147.015076 104,142.592879 L104,110.607072 C104,108.396427 103.103611,106.393693 101.654529,104.9441 Z\" transform=\"translate(64.000000, 110.599976) rotate(-45.000000) translate(-64.000000, -110.599976) \"></path>\n        <path d=\"M101.725643,11.7488671 C100.27646,10.2986632 98.2737272,9.40002441 96.0614456,9.40002441 L72.0685425,9.40002441 L72.0685425,-14.5928787 C72.0685425,-16.8035241 71.1721533,-18.8062584 69.7230718,-20.255851 L69.725643,-20.2511329 C68.2764602,-21.7013368 66.2737272,-22.5999756 64.0614456,-22.5999756 L32.0756394,-22.5999756 C27.6567137,-22.5999756 24.0685425,-19.0182536 24.0685425,-14.5999756 C24.0685425,-10.1906879 27.6534419,-6.59997559 32.0756394,-6.59997559 L56.0685425,-6.59997559 L56.0685425,17.3929275 C56.0685425,19.6035732 56.964932,21.6063078 58.4140138,23.0559004 L58.4114425,23.0511823 C59.8606253,24.5013859 61.8633581,25.4000244 64.0756394,25.4000244 L88.0685425,25.4000244 L88.0685425,49.3929275 C88.0685425,53.8118532 91.6502645,57.4000244 96.0685425,57.4000244 C100.47783,57.4000244 104.068542,53.815125 104.068542,49.3929275 L104.068542,17.4071213 C104.068542,15.1964759 103.172153,13.1937416 101.723072,11.744149 Z\" transform=\"translate(64.068542, 17.400024) scale(1, -1) rotate(-45.000000) translate(-64.068542, -17.400024) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/folder": {
            "title": "$:/core/images/folder",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-folder tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M55.6943257,128.000004 L7.99859666,128.000004 C3.5810937,128.000004 0,124.413822 0,119.996384 L0,48.0036243 C0,43.5833471 3.58387508,40.0000044 7.99859666,40.0000044 L16,40.0000044 L16,31.9999914 C16,27.5817181 19.5783731,24 24.0003461,24 L55.9996539,24 C60.4181231,24 64,27.5800761 64,31.9999914 L64,40.0000044 L104.001403,40.0000044 C108.418906,40.0000044 112,43.5861868 112,48.0036243 L112,59.8298353 L104,59.7475921 L104,51.9994189 C104,49.7887607 102.207895,48.0000044 99.9972215,48.0000044 L56,48.0000044 L56,36.0000255 C56,33.7898932 54.2072328,32 51.9957423,32 L28.0042577,32 C25.7890275,32 24,33.7908724 24,36.0000255 L24,48.0000044 L12.0027785,48.0000044 C9.78987688,48.0000044 8,49.7906032 8,51.9994189 L8,116.00059 C8,118.211248 9.79210499,120.000004 12.0027785,120.000004 L58.7630167,120.000004 L55.6943257,128.000004 L55.6943257,128.000004 Z\"></path>\n        <path d=\"M23.8728955,55.5 L119.875702,55.5 C124.293205,55.5 126.87957,59.5532655 125.650111,64.5630007 L112.305967,118.936999 C111.077582,123.942356 106.497904,128 102.083183,128 L6.08037597,128 C1.66287302,128 -0.923492342,123.946735 0.305967145,118.936999 L13.650111,64.5630007 C14.878496,59.5576436 19.4581739,55.5 23.8728955,55.5 L23.8728955,55.5 L23.8728955,55.5 Z M25.6530124,64 L113.647455,64 C115.858129,64 117.151473,66.0930612 116.538306,68.6662267 L105.417772,115.333773 C104.803671,117.910859 102.515967,120 100.303066,120 L12.3086228,120 C10.0979492,120 8.8046054,117.906939 9.41777189,115.333773 L20.5383062,68.6662267 C21.1524069,66.0891409 23.4401107,64 25.6530124,64 L25.6530124,64 L25.6530124,64 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/full-screen-button": {
            "title": "$:/core/images/full-screen-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-full-screen-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g>\n        <g>\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n        <g transform=\"translate(104.000000, 104.000000) rotate(-180.000000) translate(-104.000000, -104.000000) translate(80.000000, 80.000000)\">\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n        <g transform=\"translate(24.000000, 104.000000) rotate(-90.000000) translate(-24.000000, -104.000000) translate(0.000000, 80.000000)\">\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n        <g transform=\"translate(104.000000, 24.000000) rotate(90.000000) translate(-104.000000, -24.000000) translate(80.000000, 0.000000)\">\n            <path d=\"M5.29777586e-31,8 C1.59060409e-15,3.581722 3.581722,0 8,0 L40,0 C44.418278,0 48,3.581722 48,8 C48,12.418278 44.418278,16 40,16 L16,16 L16,40 C16,44.418278 12.418278,48 8,48 C3.581722,48 -3.55271368e-15,44.418278 0,40 L3.55271368e-15,8 Z\"></path>\n        </g>\n    </g>\n</svg>"
        },
        "$:/core/images/github": {
            "title": "$:/core/images/github",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-github tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n        <g fill-rule=\"evenodd\">\n            <path d=\"M63.9383506,1.60695328 C28.6017227,1.60695328 -0.055756057,30.2970814 -0.055756057,65.6906208 C-0.055756057,94.003092 18.2804728,118.019715 43.7123154,126.493393 C46.9143781,127.083482 48.0812647,125.104717 48.0812647,123.405261 C48.0812647,121.886765 48.02626,117.85449 47.9948287,112.508284 C30.1929317,116.379268 26.4368926,103.916587 26.4368926,103.916587 C23.5255693,96.5129372 19.3294921,94.5420399 19.3294921,94.5420399 C13.5186324,90.5687739 19.7695302,90.6474524 19.7695302,90.6474524 C26.1933001,91.099854 29.5721638,97.2525155 29.5721638,97.2525155 C35.2808718,107.044059 44.5531024,104.215566 48.1991321,102.575118 C48.7806109,98.4366275 50.4346826,95.612068 52.2616263,94.0109598 C38.0507543,92.3941159 23.1091047,86.8944862 23.1091047,62.3389152 C23.1091047,55.3443933 25.6039634,49.6205298 29.6978889,45.1437211 C29.0378318,43.5229433 26.8415704,37.0044266 30.3265147,28.1845627 C30.3265147,28.1845627 35.6973364,26.4615028 47.9241083,34.7542205 C53.027764,33.330139 58.5046663,32.6220321 63.9462084,32.5944947 C69.3838216,32.6220321 74.856795,33.330139 79.9683085,34.7542205 C92.1872225,26.4615028 97.5501864,28.1845627 97.5501864,28.1845627 C101.042989,37.0044266 98.8467271,43.5229433 98.190599,45.1437211 C102.292382,49.6205298 104.767596,55.3443933 104.767596,62.3389152 C104.767596,86.9574291 89.8023734,92.3744463 75.5482834,93.9598188 C77.8427675,95.9385839 79.8897303,99.8489072 79.8897303,105.828476 C79.8897303,114.392635 79.8111521,121.304544 79.8111521,123.405261 C79.8111521,125.120453 80.966252,127.114954 84.2115327,126.489459 C109.623731,117.996111 127.944244,93.9952241 127.944244,65.6906208 C127.944244,30.2970814 99.2867652,1.60695328 63.9383506,1.60695328\"></path>\n        </g>\n    </svg>\n"
        },
        "$:/core/images/globe": {
            "title": "$:/core/images/globe",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-globe tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M72.8111354,37.1275855 C72.8111354,37.9789875 72.8111354,38.8303894 72.8111354,39.6817913 C72.8111354,41.8784743 73.7885604,46.5631866 72.8111354,48.5143758 C71.3445471,51.4420595 68.1617327,52.0543531 66.4170946,54.3812641 C65.2352215,55.9575873 61.7987417,64.9821523 62.7262858,67.3005778 C66.6959269,77.2228204 74.26087,70.4881886 80.6887657,76.594328 C81.5527211,77.415037 83.5758191,78.8666631 83.985137,79.8899578 C87.2742852,88.1128283 76.4086873,94.8989524 87.7419325,106.189751 C88.9872885,107.430443 91.555495,102.372895 91.8205061,101.575869 C92.6726866,99.0129203 98.5458765,96.1267309 100.908882,94.5234439 C102.928056,93.1534443 105.782168,91.8557166 107.236936,89.7775886 C109.507391,86.5342557 108.717505,82.2640435 110.334606,79.0328716 C112.473794,74.7585014 114.163418,69.3979002 116.332726,65.0674086 C120.230862,57.2857361 121.054075,67.1596684 121.400359,67.5059523 C121.757734,67.8633269 122.411167,67.5059523 122.916571,67.5059523 C123.011132,67.5059523 124.364019,67.6048489 124.432783,67.5059523 C125.0832,66.5705216 123.390209,49.5852316 123.114531,48.2089091 C121.710578,41.1996597 116.17083,32.4278331 111.249523,27.7092761 C104.975994,21.6942076 104.160516,11.5121686 92.9912146,12.7547535 C92.7872931,12.7774397 87.906794,22.9027026 85.2136766,26.2672064 C81.486311,30.9237934 82.7434931,22.1144904 78.6876623,22.1144904 C78.6065806,22.1144904 77.5045497,22.0107615 77.4353971,22.1144904 C76.8488637,22.9942905 75.9952305,26.0101404 75.1288269,26.5311533 C74.8635477,26.6906793 73.4071369,26.2924966 73.2826811,26.5311533 C71.0401728,30.8313939 81.5394677,28.7427264 79.075427,34.482926 C76.7225098,39.9642538 72.747373,32.4860199 72.747373,43.0434079\"></path>\n        <path d=\"M44.4668556,7.01044608 C54.151517,13.1403033 45.1489715,19.2084878 47.1611905,23.2253896 C48.8157833,26.5283781 51.4021933,28.6198851 48.8753629,33.038878 C46.8123257,36.6467763 42.0052989,37.0050492 39.251679,39.7621111 C36.2115749,42.8060154 33.7884281,48.7028116 32.4624592,52.6732691 C30.8452419,57.5158356 47.0088721,59.5388126 44.5246867,63.6811917 C43.1386839,65.9923513 37.7785192,65.1466282 36.0880227,63.8791519 C34.9234453,63.0059918 32.4946425,63.3331166 31.6713597,62.0997342 C29.0575851,58.1839669 29.4107339,54.0758543 28.0457962,49.9707786 C27.1076833,47.1493864 21.732611,47.8501656 20.2022714,49.3776393 C19.6790362,49.8998948 19.8723378,51.1703278 19.8723378,51.8829111 C19.8723378,57.1682405 26.9914913,55.1986414 26.9914913,58.3421973 C26.9914913,72.9792302 30.9191897,64.8771867 38.1313873,69.6793121 C48.1678018,76.3618966 45.9763926,76.981595 53.0777543,84.0829567 C56.7511941,87.7563965 60.8192437,87.7689005 62.503478,93.3767069 C64.1046972,98.7081071 53.1759798,98.7157031 50.786754,100.825053 C49.663965,101.816317 47.9736094,104.970571 46.5680513,105.439676 C44.7757187,106.037867 43.334221,105.93607 41.6242359,107.219093 C39.1967302,109.040481 37.7241465,112.151588 37.6034934,112.030935 C35.4555278,109.88297 34.0848666,96.5511248 33.7147244,93.7726273 C33.1258872,89.3524817 28.1241923,88.2337027 26.7275443,84.7420826 C25.1572737,80.8164061 28.2518481,75.223612 25.599097,70.9819941 C19.0797019,60.557804 13.7775712,56.4811506 10.2493953,44.6896152 C9.3074899,41.5416683 13.5912267,38.1609942 15.1264825,35.8570308 C17.0029359,33.0410312 17.7876232,30.0028946 19.8723378,27.2224065 C22.146793,24.1888519 40.8551166,9.46076832 43.8574051,8.63490613 L44.4668556,7.01044608 Z\"></path>\n        <path d=\"M64,126 C98.2416545,126 126,98.2416545 126,64 C126,29.7583455 98.2416545,2 64,2 C29.7583455,2 2,29.7583455 2,64 C2,98.2416545 29.7583455,126 64,126 Z M64,120 C94.927946,120 120,94.927946 120,64 C120,33.072054 94.927946,8 64,8 C33.072054,8 8,33.072054 8,64 C8,94.927946 33.072054,120 64,120 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-1": {
            "title": "$:/core/images/heading-1",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-1 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M14,30 L27.25,30 L27.25,60.104 L61.7,60.104 L61.7,30 L74.95,30 L74.95,105.684 L61.7,105.684 L61.7,71.552 L27.25,71.552 L27.25,105.684 L14,105.684 L14,30 Z M84.3350766,43.78 C86.8790893,43.78 89.3523979,43.5680021 91.7550766,43.144 C94.1577553,42.7199979 96.3307336,42.0133383 98.2740766,41.024 C100.21742,40.0346617 101.87807,38.7626744 103.256077,37.208 C104.634084,35.6533256 105.535075,33.7453446 105.959077,31.484 L115.817077,31.484 L115.817077,105.684 L102.567077,105.684 L102.567077,53.32 L84.3350766,53.32 L84.3350766,43.78 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-2": {
            "title": "$:/core/images/heading-2",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-2 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M125.519077,105.684 L74.8510766,105.684 C74.9217436,99.5359693 76.4057288,94.1653563 79.3030766,89.572 C82.2004244,84.9786437 86.1577182,80.986017 91.1750766,77.594 C93.5777553,75.8273245 96.0863969,74.113675 98.7010766,72.453 C101.315756,70.792325 103.718399,69.0080095 105.909077,67.1 C108.099754,65.1919905 109.901736,63.1250111 111.315077,60.899 C112.728417,58.6729889 113.47041,56.1113478 113.541077,53.214 C113.541077,51.8713266 113.382078,50.4403409 113.064077,48.921 C112.746075,47.4016591 112.127748,45.9883399 111.209077,44.681 C110.290405,43.3736601 109.018418,42.2783377 107.393077,41.395 C105.767735,40.5116622 103.647756,40.07 101.033077,40.07 C98.6303979,40.07 96.6340846,40.5469952 95.0440766,41.501 C93.4540687,42.4550048 92.1820814,43.762325 91.2280766,45.423 C90.2740719,47.083675 89.5674123,49.0446554 89.1080766,51.306 C88.648741,53.5673446 88.3837436,56.0053203 88.3130766,58.62 L76.2290766,58.62 C76.2290766,54.5213128 76.7767378,50.7230175 77.8720766,47.225 C78.9674154,43.7269825 80.610399,40.7060127 82.8010766,38.162 C84.9917542,35.6179873 87.6593942,33.6216739 90.8040766,32.173 C93.948759,30.7243261 97.6057224,30 101.775077,30 C106.297766,30 110.078395,30.7419926 113.117077,32.226 C116.155758,33.7100074 118.611401,35.5826554 120.484077,37.844 C122.356753,40.1053446 123.681739,42.5609868 124.459077,45.211 C125.236414,47.8610133 125.625077,50.3873213 125.625077,52.79 C125.625077,55.7580148 125.165748,58.4433213 124.247077,60.846 C123.328405,63.2486787 122.091751,65.4569899 120.537077,67.471 C118.982402,69.4850101 117.215753,71.3399915 115.237077,73.036 C113.2584,74.7320085 111.209087,76.3219926 109.089077,77.806 C106.969066,79.2900074 104.849087,80.7033266 102.729077,82.046 C100.609066,83.3886734 98.6480856,84.7313266 96.8460766,86.074 C95.0440676,87.4166734 93.47175,88.8123261 92.1290766,90.261 C90.7864032,91.7096739 89.8677458,93.2466585 89.3730766,94.872 L125.519077,94.872 L125.519077,105.684 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-3": {
            "title": "$:/core/images/heading-3",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-3 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M94.8850766,62.224 C96.8637532,62.294667 98.8424001,62.1533351 100.821077,61.8 C102.799753,61.4466649 104.566402,60.8283378 106.121077,59.945 C107.675751,59.0616623 108.930072,57.8426744 109.884077,56.288 C110.838081,54.7333256 111.315077,52.8253446 111.315077,50.564 C111.315077,47.3839841 110.237421,44.8400095 108.082077,42.932 C105.926733,41.0239905 103.153094,40.07 99.7610766,40.07 C97.641066,40.07 95.8037511,40.4939958 94.2490766,41.342 C92.6944022,42.1900042 91.4047484,43.3383261 90.3800766,44.787 C89.3554048,46.2356739 88.5957458,47.860991 88.1010766,49.663 C87.6064075,51.465009 87.3944096,53.3199905 87.4650766,55.228 L75.3810766,55.228 C75.5224107,51.623982 76.1937373,48.2850154 77.3950766,45.211 C78.596416,42.1369846 80.2393995,39.4693446 82.3240766,37.208 C84.4087537,34.9466554 86.9350618,33.1800064 89.9030766,31.908 C92.8710915,30.6359936 96.2277246,30 99.9730766,30 C102.870424,30 105.714729,30.4239958 108.506077,31.272 C111.297424,32.1200042 113.806065,33.3566585 116.032077,34.982 C118.258088,36.6073415 120.042403,38.6743208 121.385077,41.183 C122.72775,43.6916792 123.399077,46.5713171 123.399077,49.822 C123.399077,53.5673521 122.551085,56.8356527 120.855077,59.627 C119.159068,62.4183473 116.509095,64.4499936 112.905077,65.722 L112.905077,65.934 C117.145098,66.7820042 120.448731,68.8843166 122.816077,72.241 C125.183422,75.5976835 126.367077,79.6786426 126.367077,84.484 C126.367077,88.017351 125.660417,91.1796527 124.247077,93.971 C122.833736,96.7623473 120.925755,99.129657 118.523077,101.073 C116.120398,103.016343 113.329093,104.517995 110.149077,105.578 C106.969061,106.638005 103.612428,107.168 100.079077,107.168 C95.7683884,107.168 92.005426,106.549673 88.7900766,105.313 C85.5747272,104.076327 82.8894207,102.327345 80.7340766,100.066 C78.5787325,97.8046554 76.9357489,95.0840159 75.8050766,91.904 C74.6744043,88.7239841 74.0737436,85.1906861 74.0030766,81.304 L86.0870766,81.304 C85.9457426,85.8266893 87.0587315,89.5896517 89.4260766,92.593 C91.7934218,95.5963483 95.3443863,97.098 100.079077,97.098 C104.107097,97.098 107.481396,95.9496782 110.202077,93.653 C112.922757,91.3563219 114.283077,88.0880212 114.283077,83.848 C114.283077,80.9506522 113.717749,78.6540085 112.587077,76.958 C111.456404,75.2619915 109.972419,73.9723378 108.135077,73.089 C106.297734,72.2056623 104.230755,71.6580011 101.934077,71.446 C99.6373985,71.2339989 97.2877553,71.163333 94.8850766,71.234 L94.8850766,62.224 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-4": {
            "title": "$:/core/images/heading-4",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-4 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8,30 L21.25,30 L21.25,60.104 L55.7,60.104 L55.7,30 L68.95,30 L68.95,105.684 L55.7,105.684 L55.7,71.552 L21.25,71.552 L21.25,105.684 L8,105.684 L8,30 Z M84.5890766,78.548 L107.061077,78.548 L107.061077,45.9 L106.849077,45.9 L84.5890766,78.548 Z M128.049077,88.088 L118.509077,88.088 L118.509077,105.684 L107.061077,105.684 L107.061077,88.088 L75.2610766,88.088 L75.2610766,76.11 L107.061077,31.484 L118.509077,31.484 L118.509077,78.548 L128.049077,78.548 L128.049077,88.088 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-5": {
            "title": "$:/core/images/heading-5",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-5 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M83.7550766,31.484 L122.127077,31.484 L122.127077,42.296 L92.7650766,42.296 L88.9490766,61.164 L89.1610766,61.376 C90.7864181,59.5386575 92.8533974,58.1430048 95.3620766,57.189 C97.8707558,56.2349952 100.361731,55.758 102.835077,55.758 C106.509762,55.758 109.795729,56.3763272 112.693077,57.613 C115.590424,58.8496729 118.0284,60.5809889 120.007077,62.807 C121.985753,65.0330111 123.487405,67.6653181 124.512077,70.704 C125.536748,73.7426819 126.049077,77.028649 126.049077,80.562 C126.049077,83.5300148 125.572081,86.5863176 124.618077,89.731 C123.664072,92.8756824 122.144754,95.7376538 120.060077,98.317 C117.9754,100.896346 115.30776,103.016325 112.057077,104.677 C108.806394,106.337675 104.919766,107.168 100.397077,107.168 C96.7930586,107.168 93.454092,106.691005 90.3800766,105.737 C87.3060613,104.782995 84.6030883,103.35201 82.2710766,101.444 C79.939065,99.5359905 78.0840835,97.1863473 76.7060766,94.395 C75.3280697,91.6036527 74.5684107,88.3353521 74.4270766,84.59 L86.5110766,84.59 C86.8644117,88.6180201 88.2423979,91.7096559 90.6450766,93.865 C93.0477553,96.0203441 96.2277235,97.098 100.185077,97.098 C102.729089,97.098 104.884401,96.6740042 106.651077,95.826 C108.417752,94.9779958 109.848738,93.8120074 110.944077,92.328 C112.039415,90.8439926 112.816741,89.1126766 113.276077,87.134 C113.735412,85.1553234 113.965077,83.0353446 113.965077,80.774 C113.965077,78.7246564 113.682413,76.763676 113.117077,74.891 C112.55174,73.018324 111.703749,71.3753404 110.573077,69.962 C109.442404,68.5486596 107.976086,67.4180042 106.174077,66.57 C104.372068,65.7219958 102.269755,65.298 99.8670766,65.298 C97.3230639,65.298 94.9380878,65.7749952 92.7120766,66.729 C90.4860655,67.6830048 88.8784149,69.4673203 87.8890766,72.082 L75.8050766,72.082 L83.7550766,31.484 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/heading-6": {
            "title": "$:/core/images/heading-6",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-heading-6 tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M6,30 L19.25,30 L19.25,60.104 L53.7,60.104 L53.7,30 L66.95,30 L66.95,105.684 L53.7,105.684 L53.7,71.552 L19.25,71.552 L19.25,105.684 L6,105.684 L6,30 Z M112.587077,50.246 C112.304409,47.2073181 111.226753,44.751676 109.354077,42.879 C107.481401,41.006324 104.955093,40.07 101.775077,40.07 C99.584399,40.07 97.6940846,40.4763293 96.1040766,41.289 C94.5140687,42.1016707 93.1714154,43.1793266 92.0760766,44.522 C90.9807378,45.8646734 90.0974133,47.401658 89.4260766,49.133 C88.7547399,50.864342 88.2070787,52.6839905 87.7830766,54.592 C87.3590745,56.5000095 87.0587442,58.390324 86.8820766,60.263 C86.7054091,62.135676 86.5464107,63.8846585 86.4050766,65.51 L86.6170766,65.722 C88.2424181,62.7539852 90.4860623,60.5456739 93.3480766,59.097 C96.2100909,57.6483261 99.3017267,56.924 102.623077,56.924 C106.297762,56.924 109.583729,57.5599936 112.481077,58.832 C115.378424,60.1040064 117.834067,61.8529889 119.848077,64.079 C121.862087,66.3050111 123.399071,68.9373181 124.459077,71.976 C125.519082,75.0146819 126.049077,78.300649 126.049077,81.834 C126.049077,85.438018 125.466082,88.7769846 124.300077,91.851 C123.134071,94.9250154 121.455754,97.6103219 119.265077,99.907 C117.074399,102.203678 114.459758,103.987994 111.421077,105.26 C108.382395,106.532006 105.025762,107.168 101.351077,107.168 C95.9097161,107.168 91.4400941,106.16101 87.9420766,104.147 C84.4440591,102.13299 81.6880867,99.3770175 79.6740766,95.879 C77.6600666,92.3809825 76.2644138,88.2823568 75.4870766,83.583 C74.7097394,78.8836432 74.3210766,73.8133605 74.3210766,68.372 C74.3210766,63.9199777 74.7980719,59.4326893 75.7520766,54.91 C76.7060814,50.3873107 78.278399,46.2710186 80.4690766,42.561 C82.6597542,38.8509815 85.5393921,35.8300117 89.1080766,33.498 C92.6767611,31.1659883 97.0757171,30 102.305077,30 C105.273091,30 108.064397,30.4946617 110.679077,31.484 C113.293756,32.4733383 115.608067,33.8513245 117.622077,35.618 C119.636087,37.3846755 121.27907,39.5046543 122.551077,41.978 C123.823083,44.4513457 124.529743,47.2073181 124.671077,50.246 L112.587077,50.246 Z M100.927077,97.098 C103.117754,97.098 105.025735,96.6563378 106.651077,95.773 C108.276418,94.8896623 109.636738,93.7413404 110.732077,92.328 C111.827415,90.9146596 112.640074,89.271676 113.170077,87.399 C113.700079,85.526324 113.965077,83.6006766 113.965077,81.622 C113.965077,79.6433234 113.700079,77.7353425 113.170077,75.898 C112.640074,74.0606575 111.827415,72.4530069 110.732077,71.075 C109.636738,69.6969931 108.276418,68.5840042 106.651077,67.736 C105.025735,66.8879958 103.117754,66.464 100.927077,66.464 C98.736399,66.464 96.8107516,66.8703293 95.1500766,67.683 C93.4894017,68.4956707 92.0937489,69.5909931 90.9630766,70.969 C89.8324043,72.3470069 88.9844128,73.9546575 88.4190766,75.792 C87.8537405,77.6293425 87.5710766,79.5726564 87.5710766,81.622 C87.5710766,83.6713436 87.8537405,85.6146575 88.4190766,87.452 C88.9844128,89.2893425 89.8324043,90.9323261 90.9630766,92.381 C92.0937489,93.8296739 93.4894017,94.9779958 95.1500766,95.826 C96.8107516,96.6740042 98.736399,97.098 100.927077,97.098 L100.927077,97.098 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/help": {
            "title": "$:/core/images/help",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-help tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M36.0548906,111.44117 C30.8157418,115.837088 20.8865444,118.803477 9.5,118.803477 C7.86465619,118.803477 6.25937294,118.742289 4.69372699,118.624467 C12.612543,115.984876 18.7559465,110.02454 21.0611049,102.609942 C8.74739781,92.845129 1.04940554,78.9359851 1.04940554,63.5 C1.04940554,33.9527659 29.2554663,10 64.0494055,10 C98.8433448,10 127.049406,33.9527659 127.049406,63.5 C127.049406,93.0472341 98.8433448,117 64.0494055,117 C53.9936953,117 44.48824,114.999337 36.0548906,111.44117 L36.0548906,111.44117 Z M71.4042554,77.5980086 C71.406883,77.2865764 71.4095079,76.9382011 71.4119569,76.5610548 C71.4199751,75.3262169 71.4242825,74.0811293 71.422912,72.9158546 C71.4215244,71.736154 71.4143321,70.709635 71.4001396,69.8743525 C71.4078362,68.5173028 71.9951951,67.7870427 75.1273009,65.6385471 C75.2388969,65.5619968 76.2124091,64.8981068 76.5126553,64.6910879 C79.6062455,62.5580654 81.5345849,60.9050204 83.2750652,58.5038955 C85.6146327,55.2762841 86.8327108,51.426982 86.8327108,46.8554323 C86.8327108,33.5625756 76.972994,24.9029551 65.3778484,24.9029551 C54.2752771,24.9029551 42.8794554,34.5115163 41.3121702,47.1975534 C40.9043016,50.4989536 43.2499725,53.50591 46.5513726,53.9137786 C49.8527728,54.3216471 52.8597292,51.9759763 53.2675978,48.6745761 C54.0739246,42.1479456 60.2395837,36.9492759 65.3778484,36.9492759 C70.6427674,36.9492759 74.78639,40.5885487 74.78639,46.8554323 C74.78639,50.4892974 73.6853224,52.008304 69.6746221,54.7736715 C69.4052605,54.9593956 68.448509,55.6118556 68.3131127,55.7047319 C65.6309785,57.5445655 64.0858213,58.803255 62.6123358,60.6352315 C60.5044618,63.2559399 59.3714208,66.3518252 59.3547527,69.9487679 C59.3684999,70.8407274 59.3752803,71.8084521 59.3765995,72.9300232 C59.3779294,74.0607297 59.3737237,75.2764258 59.36589,76.482835 C59.3634936,76.8518793 59.3609272,77.1924914 59.3583633,77.4963784 C59.3568319,77.6778944 59.3556368,77.8074256 59.3549845,77.8730928 C59.3219814,81.1994287 61.9917551,83.9227111 65.318091,83.9557142 C68.644427,83.9887173 71.3677093,81.3189435 71.4007124,77.9926076 C71.4014444,77.9187458 71.402672,77.7856841 71.4042554,77.5980086 Z M65.3778489,102.097045 C69.5359735,102.097045 72.9067994,98.7262189 72.9067994,94.5680944 C72.9067994,90.4099698 69.5359735,87.0391439 65.3778489,87.0391439 C61.2197243,87.0391439 57.8488984,90.4099698 57.8488984,94.5680944 C57.8488984,98.7262189 61.2197243,102.097045 65.3778489,102.097045 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/home-button": {
            "title": "$:/core/images/home-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-home-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M112.9847,119.501583 C112.99485,119.336814 113,119.170705 113,119.003406 L113,67.56802 C116.137461,70.5156358 121.076014,70.4518569 124.133985,67.3938855 C127.25818,64.2696912 127.260618,59.2068102 124.131541,56.0777326 L70.3963143,2.34250601 C68.8331348,0.779326498 66.7828947,-0.000743167069 64.7337457,1.61675364e-05 C62.691312,-0.00409949529 60.6426632,0.777559815 59.077717,2.34250601 L33,28.420223 L33,28.420223 L33,8.00697327 C33,3.58484404 29.4092877,0 25,0 C20.581722,0 17,3.59075293 17,8.00697327 L17,44.420223 L5.3424904,56.0777326 C2.21694607,59.2032769 2.22220878,64.2760483 5.34004601,67.3938855 C8.46424034,70.5180798 13.5271213,70.5205187 16.6561989,67.3914411 L17,67.04764 L17,119.993027 C17,119.994189 17.0000002,119.995351 17.0000007,119.996514 C17.0000002,119.997675 17,119.998838 17,120 C17,124.418278 20.5881049,128 24.9992458,128 L105.000754,128 C109.418616,128 113,124.409288 113,120 C113,119.832611 112.99485,119.666422 112.9847,119.501583 Z M97,112 L97,51.5736087 L97,51.5736087 L64.7370156,19.3106244 L33,51.04764 L33,112 L97,112 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/import-button": {
            "title": "$:/core/images/import-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-import-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M105.449437,94.2138951 C105.449437,94.2138951 110.049457,94.1897106 110.049457,99.4026111 C110.049457,104.615512 105.163246,104.615511 105.163246,104.615511 L45.0075072,105.157833 C45.0075072,105.157833 0.367531803,106.289842 0.367532368,66.6449212 C0.367532934,27.0000003 45.0428249,27.0000003 45.0428249,27.0000003 L105.532495,27.0000003 C105.532495,27.0000003 138.996741,25.6734987 138.996741,55.1771866 C138.996741,84.6808745 105.727102,82.8457535 105.727102,82.8457535 L56.1735087,82.8457535 C56.1735087,82.8457535 22.6899229,85.1500223 22.6899229,66.0913753 C22.6899229,47.0327282 56.1735087,49.3383013 56.1735087,49.3383013 L105.727102,49.3383013 C105.727102,49.3383013 111.245209,49.3383024 111.245209,54.8231115 C111.245209,60.3079206 105.727102,60.5074524 105.727102,60.5074524 L56.1735087,60.5074524 C56.1735087,60.5074524 37.48913,60.5074528 37.48913,66.6449195 C37.48913,72.7823862 56.1735087,71.6766023 56.1735087,71.6766023 L105.727102,71.6766029 C105.727102,71.6766029 127.835546,73.1411469 127.835546,55.1771866 C127.835546,35.5304025 105.727102,38.3035317 105.727102,38.3035317 L45.0428249,38.3035317 C45.0428249,38.3035317 11.5287276,38.3035313 11.5287276,66.6449208 C11.5287276,94.9863103 45.0428244,93.9579678 45.0428244,93.9579678 L105.449437,94.2138951 Z\" transform=\"translate(69.367532, 66.000000) rotate(-45.000000) translate(-69.367532, -66.000000) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/info-button": {
            "title": "$:/core/images/info-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-info-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <g transform=\"translate(0.049406, 0.000000)\">\n            <path d=\"M64,128 C99.346224,128 128,99.346224 128,64 C128,28.653776 99.346224,0 64,0 C28.653776,0 0,28.653776 0,64 C0,99.346224 28.653776,128 64,128 Z M64,112 C90.509668,112 112,90.509668 112,64 C112,37.490332 90.509668,16 64,16 C37.490332,16 16,37.490332 16,64 C16,90.509668 37.490332,112 64,112 Z\"></path>\n            <circle cx=\"64\" cy=\"32\" r=\"8\"></circle>\n            <rect x=\"56\" y=\"48\" width=\"16\" height=\"56\" rx=\"8\"></rect>\n        </g>\n    </g>\n</svg>"
        },
        "$:/core/images/italic": {
            "title": "$:/core/images/italic",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-italic tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n         <polygon points=\"66.7114846 0 89.1204482 0 62.4089636 128 40 128\"></polygon>\n    </g>\n</svg>"
        },
        "$:/core/images/left-arrow": {
            "created": "20150315234410875",
            "modified": "20150315235324760",
            "tags": "$:/tags/Image",
            "title": "$:/core/images/left-arrow",
            "text": "<svg class=\"tc-image-left-arrow tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path transform=\"rotate(135, 63.8945, 64.1752)\" d=\"m109.07576,109.35336c-1.43248,1.43361 -3.41136,2.32182 -5.59717,2.32182l-79.16816,0c-4.36519,0 -7.91592,-3.5444 -7.91592,-7.91666c0,-4.36337 3.54408,-7.91667 7.91592,-7.91667l71.25075,0l0,-71.25075c0,-4.3652 3.54442,-7.91592 7.91667,-7.91592c4.36336,0 7.91667,3.54408 7.91667,7.91592l0,79.16815c0,2.1825 -0.88602,4.16136 -2.3185,5.59467l-0.00027,-0.00056z\"/>\n</svg>\n"
        },
        "$:/core/images/line-width": {
            "title": "$:/core/images/line-width",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-line-width tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M128,-97 L112.992786,-97 C112.452362,-97 112,-96.5522847 112,-96 C112,-95.4438648 112.444486,-95 112.992786,-95 L128,-95 L128,-97 Z M128,-78.6794919 L111.216185,-88.3696322 C110.748163,-88.6398444 110.132549,-88.4782926 109.856406,-88 C109.578339,-87.5183728 109.741342,-86.9117318 110.216185,-86.6375814 L128,-76.3700908 L128,-78.6794919 Z M78.6794919,-128 L88.3696322,-111.216185 C88.6437826,-110.741342 88.4816272,-110.134474 88,-109.856406 C87.5217074,-109.580264 86.9077936,-109.748163 86.6375814,-110.216185 L76.3700908,-128 L78.6794919,-128 Z M97,-128 L97,-112.992786 C97,-112.444486 96.5561352,-112 96,-112 C95.4477153,-112 95,-112.452362 95,-112.992786 L95,-128 L97,-128 Z M115.629909,-128 L105.362419,-110.216185 C105.088268,-109.741342 104.481627,-109.578339 104,-109.856406 C103.521707,-110.132549 103.360156,-110.748163 103.630368,-111.216185 L113.320508,-128 L115.629909,-128 Z M128,-113.320508 L111.216185,-103.630368 C110.741342,-103.356217 110.134474,-103.518373 109.856406,-104 C109.580264,-104.478293 109.748163,-105.092206 110.216185,-105.362419 L128,-115.629909 L128,-113.320508 Z M48,-96 C48,-96.5522847 48.4523621,-97 48.9927864,-97 L79.0072136,-97 C79.5555144,-97 80,-96.5561352 80,-96 C80,-95.4477153 79.5476379,-95 79.0072136,-95 L48.9927864,-95 C48.4444856,-95 48,-95.4438648 48,-96 Z M54.4307806,-120 C54.706923,-120.478293 55.3225377,-120.639844 55.7905589,-120.369632 L81.7838153,-105.362419 C82.2586577,-105.088268 82.4216611,-104.481627 82.1435935,-104 C81.8674512,-103.521707 81.2518365,-103.360156 80.7838153,-103.630368 L54.7905589,-118.637581 C54.3157165,-118.911732 54.152713,-119.518373 54.4307806,-120 Z M104,-82.1435935 C104.478293,-82.4197359 105.092206,-82.2518365 105.362419,-81.7838153 L120.369632,-55.7905589 C120.643783,-55.3157165 120.481627,-54.7088482 120,-54.4307806 C119.521707,-54.1546382 118.907794,-54.3225377 118.637581,-54.7905589 L103.630368,-80.7838153 C103.356217,-81.2586577 103.518373,-81.865526 104,-82.1435935 Z M96,-80 C96.5522847,-80 97,-79.5476379 97,-79.0072136 L97,-48.9927864 C97,-48.4444856 96.5561352,-48 96,-48 C95.4477153,-48 95,-48.4523621 95,-48.9927864 L95,-79.0072136 C95,-79.5555144 95.4438648,-80 96,-80 Z M88,-82.1435935 C88.4782926,-81.8674512 88.6398444,-81.2518365 88.3696322,-80.7838153 L73.3624186,-54.7905589 C73.0882682,-54.3157165 72.4816272,-54.152713 72,-54.4307806 C71.5217074,-54.706923 71.3601556,-55.3225377 71.6303678,-55.7905589 L86.6375814,-81.7838153 C86.9117318,-82.2586577 87.5183728,-82.4216611 88,-82.1435935 Z M82.1435935,-88 C82.4197359,-87.5217074 82.2518365,-86.9077936 81.7838153,-86.6375814 L55.7905589,-71.6303678 C55.3157165,-71.3562174 54.7088482,-71.5183728 54.4307806,-72 C54.1546382,-72.4782926 54.3225377,-73.0922064 54.7905589,-73.3624186 L80.7838153,-88.3696322 C81.2586577,-88.6437826 81.865526,-88.4816272 82.1435935,-88 Z M1.30626177e-08,-41.9868843 L15.0170091,-57.9923909 L20.7983821,-52.9749272 L44.7207091,-81.2095939 L73.4260467,-42.1002685 L85.984793,-56.6159488 L104.48741,-34.0310661 L127.969109,-47.4978019 L127.969109,7.99473128e-07 L1.30626177e-08,7.99473128e-07 L1.30626177e-08,-41.9868843 Z M96,-84 C102.627417,-84 108,-89.372583 108,-96 C108,-102.627417 102.627417,-108 96,-108 C89.372583,-108 84,-102.627417 84,-96 C84,-89.372583 89.372583,-84 96,-84 Z\"></path>\n        <path d=\"M16,18 L112,18 C113.104569,18 114,17.1045695 114,16 C114,14.8954305 113.104569,14 112,14 L16,14 C14.8954305,14 14,14.8954305 14,16 C14,17.1045695 14.8954305,18 16,18 L16,18 Z M16,35 L112,35 C114.209139,35 116,33.209139 116,31 C116,28.790861 114.209139,27 112,27 L16,27 C13.790861,27 12,28.790861 12,31 C12,33.209139 13.790861,35 16,35 L16,35 Z M16,56 L112,56 C115.313708,56 118,53.3137085 118,50 C118,46.6862915 115.313708,44 112,44 L16,44 C12.6862915,44 10,46.6862915 10,50 C10,53.3137085 12.6862915,56 16,56 L16,56 Z M16,85 L112,85 C117.522847,85 122,80.5228475 122,75 C122,69.4771525 117.522847,65 112,65 L16,65 C10.4771525,65 6,69.4771525 6,75 C6,80.5228475 10.4771525,85 16,85 L16,85 Z M16,128 L112,128 C120.836556,128 128,120.836556 128,112 C128,103.163444 120.836556,96 112,96 L16,96 C7.163444,96 0,103.163444 0,112 C0,120.836556 7.163444,128 16,128 L16,128 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/link": {
            "title": "$:/core/images/link",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-link tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M128.719999,57.568543 C130.219553,53.8628171 131.045202,49.8121445 131.045202,45.5685425 C131.045202,27.8915447 116.718329,13.5685425 99.0452364,13.5685425 L67.0451674,13.5685425 C49.3655063,13.5685425 35.0452019,27.8954305 35.0452019,45.5685425 C35.0452019,63.2455403 49.3720745,77.5685425 67.0451674,77.5685425 L99.0452364,77.5685425 C100.406772,77.5685425 101.748384,77.4835732 103.065066,77.3186499 C96.4792444,73.7895096 91.1190212,68.272192 87.7873041,61.5685425 L67.0506214,61.5685425 C58.2110723,61.5685425 51.0452019,54.4070414 51.0452019,45.5685425 C51.0452019,36.7319865 58.2005234,29.5685425 67.0506214,29.5685425 L99.0397824,29.5685425 C107.879331,29.5685425 115.045202,36.7300436 115.045202,45.5685425 C115.045202,48.9465282 113.99957,52.0800164 112.21335,54.6623005 C114.314383,56.4735917 117.050039,57.5685425 120.041423,57.5685425 L128.720003,57.5685425 Z\" transform=\"translate(83.045202, 45.568542) rotate(-225.000000) translate(-83.045202, -45.568542)\"></path>\n        <path d=\"M-0.106255113,71.0452019 C-1.60580855,74.7509276 -2.43145751,78.8016001 -2.43145751,83.0452019 C-2.43145751,100.7222 11.8954151,115.045202 29.568508,115.045202 L61.568577,115.045202 C79.2482381,115.045202 93.5685425,100.718314 93.5685425,83.0452019 C93.5685425,65.3682041 79.2416699,51.0452019 61.568577,51.0452019 L29.568508,51.0452019 C28.206973,51.0452019 26.8653616,51.1301711 25.5486799,51.2950943 C32.1345,54.8242347 37.4947231,60.3415524 40.8264403,67.0452019 L61.563123,67.0452019 C70.4026721,67.0452019 77.5685425,74.206703 77.5685425,83.0452019 C77.5685425,91.8817579 70.413221,99.0452019 61.563123,99.0452019 L29.573962,99.0452019 C20.7344129,99.0452019 13.5685425,91.8837008 13.5685425,83.0452019 C13.5685425,79.6672162 14.6141741,76.533728 16.4003949,73.9514439 C14.2993609,72.1401527 11.5637054,71.0452019 8.5723215,71.0452019 L-0.106255113,71.0452019 Z\" transform=\"translate(45.568542, 83.045202) rotate(-225.000000) translate(-45.568542, -83.045202)\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/list-bullet": {
            "title": "$:/core/images/list-bullet",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-list-bullet tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M11.6363636,40.2727273 C18.0629498,40.2727273 23.2727273,35.0629498 23.2727273,28.6363636 C23.2727273,22.2097775 18.0629498,17 11.6363636,17 C5.20977746,17 0,22.2097775 0,28.6363636 C0,35.0629498 5.20977746,40.2727273 11.6363636,40.2727273 Z M11.6363636,75.1818182 C18.0629498,75.1818182 23.2727273,69.9720407 23.2727273,63.5454545 C23.2727273,57.1188684 18.0629498,51.9090909 11.6363636,51.9090909 C5.20977746,51.9090909 0,57.1188684 0,63.5454545 C0,69.9720407 5.20977746,75.1818182 11.6363636,75.1818182 Z M11.6363636,110.090909 C18.0629498,110.090909 23.2727273,104.881132 23.2727273,98.4545455 C23.2727273,92.0279593 18.0629498,86.8181818 11.6363636,86.8181818 C5.20977746,86.8181818 0,92.0279593 0,98.4545455 C0,104.881132 5.20977746,110.090909 11.6363636,110.090909 Z M34.9090909,22.8181818 L128,22.8181818 L128,34.4545455 L34.9090909,34.4545455 L34.9090909,22.8181818 Z M34.9090909,57.7272727 L128,57.7272727 L128,69.3636364 L34.9090909,69.3636364 L34.9090909,57.7272727 Z M34.9090909,92.6363636 L128,92.6363636 L128,104.272727 L34.9090909,104.272727 L34.9090909,92.6363636 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/list-number": {
            "title": "$:/core/images/list-number",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-list-number tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M33.8390805,22.3563218 L128,22.3563218 L128,34.1264368 L33.8390805,34.1264368 L33.8390805,22.3563218 Z M33.8390805,57.6666667 L128,57.6666667 L128,69.4367816 L33.8390805,69.4367816 L33.8390805,57.6666667 Z M33.8390805,92.9770115 L128,92.9770115 L128,104.747126 L33.8390805,104.747126 L33.8390805,92.9770115 Z M0.379509711,42.6307008 L0.379509711,40.4082314 L1.37821948,40.4082314 C2.20382368,40.4082314 2.82301754,40.268077 3.23581964,39.9877642 C3.64862174,39.7074513 3.85501969,39.0400498 3.85501969,37.9855395 L3.85501969,22.7686318 C3.85501969,21.3270228 3.66193774,20.4327047 3.27576803,20.0856507 C2.88959832,19.7385967 1.79768657,19.5650723 0,19.5650723 L0,17.4226919 C3.50215975,17.2758613 6.25191314,16.4683055 8.24934266,15 L10.3666074,15 L10.3666074,37.865406 C10.3666074,38.786434 10.5164123,39.4404875 10.8160268,39.8275862 C11.1156412,40.2146849 11.764796,40.4082314 12.7635108,40.4082314 L13.7622206,40.4082314 L13.7622206,42.6307008 L0.379509711,42.6307008 Z M0.0798967812,77.9873934 L0.0798967812,76.0852799 C7.27064304,69.5312983 10.8659622,63.5046623 10.8659622,58.005191 C10.8659622,56.4434479 10.5397203,55.195407 9.88722667,54.2610308 C9.23473303,53.3266546 8.36253522,52.8594735 7.27060709,52.8594735 C6.3784219,52.8594735 5.61608107,53.1764892 4.98356173,53.8105302 C4.35104238,54.4445712 4.03478745,55.1753759 4.03478745,56.0029663 C4.03478745,56.9773871 4.28113339,57.8316611 4.77383268,58.5658139 C4.88036225,58.7259926 4.93362624,58.8461249 4.93362624,58.9262143 C4.93362624,59.0730449 4.77383427,59.2065252 4.45424555,59.3266593 C4.2411864,59.4067486 3.70188852,59.6336652 2.83633573,60.0074156 C1.99741533,60.3811661 1.47809145,60.5680386 1.2783485,60.5680386 C1.03865696,60.5680386 0.765679018,60.1976307 0.459406492,59.4568039 C0.153133966,58.715977 0,57.9184322 0,57.0641453 C0,55.1153036 0.848894811,53.5202138 2.5467099,52.2788283 C4.24452499,51.0374428 6.34512352,50.4167594 8.84856852,50.4167594 C11.3120649,50.4167594 13.3793735,51.0874979 15.0505562,52.4289952 C16.7217389,53.7704924 17.5573177,55.5224215 17.5573177,57.684835 C17.5573177,58.9662652 17.2743527,60.2076321 16.7084144,61.4089729 C16.142476,62.6103138 14.7875733,64.4623531 12.6436656,66.9651465 C10.4997579,69.4679398 8.40914641,71.7804862 6.3717683,73.902855 L17.8169822,73.902855 L16.7982982,79.6292176 L14.6810335,79.6292176 C14.7609307,79.3489048 14.8008787,79.0952922 14.8008787,78.8683723 C14.8008787,78.4812736 14.7010087,78.237672 14.5012658,78.1375603 C14.3015228,78.0374485 13.9020429,77.9873934 13.3028141,77.9873934 L0.0798967812,77.9873934 Z M12.2042333,97.1935484 C13.9486551,97.2335931 15.4400468,97.8309175 16.6784531,98.9855395 C17.9168594,100.140162 18.5360532,101.75861 18.5360532,103.840934 C18.5360532,106.830938 17.4041935,109.233584 15.14044,111.048943 C12.8766866,112.864303 10.1402492,113.771969 6.93104577,113.771969 C4.92030005,113.771969 3.26245842,113.388213 1.95747114,112.62069 C0.652483855,111.853166 0,110.848727 0,109.607341 C0,108.833144 0.26964894,108.209124 0.808954909,107.735261 C1.34826088,107.261399 1.93749375,107.024472 2.57667119,107.024472 C3.21584864,107.024472 3.73850152,107.224692 4.14464552,107.625139 C4.55078953,108.025586 4.92696644,108.67964 5.27318756,109.587319 C5.73925445,110.855401 6.51158227,111.489433 7.59019421,111.489433 C8.85523291,111.489433 9.87723568,111.012241 10.6562332,110.057842 C11.4352307,109.103444 11.8247236,107.371536 11.8247236,104.862069 C11.8247236,103.153495 11.7048796,101.838714 11.4651881,100.917686 C11.2254966,99.9966584 10.6728827,99.5361513 9.80732989,99.5361513 C9.22141723,99.5361513 8.62219737,99.843156 8.00965231,100.457175 C7.51695303,100.951059 7.07752513,101.197998 6.69135542,101.197998 C6.3584505,101.197998 6.08880156,101.051169 5.88240051,100.757508 C5.67599946,100.463847 5.57280049,100.183539 5.57280049,99.916574 C5.57280049,99.5962164 5.67599946,99.3225818 5.88240051,99.0956618 C6.08880156,98.8687419 6.57150646,98.5016711 7.33052967,97.9944383 C10.2068282,96.0722929 11.6449559,93.9766521 11.6449559,91.7074527 C11.6449559,90.5194601 11.3386879,89.615131 10.7261429,88.9944383 C10.1135978,88.3737455 9.37455999,88.0634038 8.5090072,88.0634038 C7.71003539,88.0634038 6.98431355,88.3270274 6.33181991,88.8542825 C5.67932627,89.3815377 5.35308434,90.0122321 5.35308434,90.7463849 C5.35308434,91.3871 5.60608828,91.9810874 6.11210376,92.5283648 C6.28521432,92.7285883 6.3717683,92.8954387 6.3717683,93.028921 C6.3717683,93.1490551 5.80250943,93.4560598 4.6639746,93.9499444 C3.52543978,94.4438289 2.80970494,94.6907675 2.51674861,94.6907675 C2.10394651,94.6907675 1.76771758,94.3570667 1.50805174,93.6896552 C1.24838591,93.0222436 1.11855494,92.4082342 1.11855494,91.8476085 C1.11855494,90.0989901 2.04734573,88.6240327 3.90495518,87.4226919 C5.76256463,86.2213511 7.86982116,85.6206897 10.226788,85.6206897 C12.2907985,85.6206897 14.0784711,86.0678487 15.5898594,86.9621802 C17.1012478,87.8565117 17.8569306,89.0778566 17.8569306,90.6262514 C17.8569306,91.987771 17.2876717,93.2491599 16.1491369,94.4104561 C15.0106021,95.5717522 13.6956474,96.4994404 12.2042333,97.1935484 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/locked-padlock": {
            "title": "$:/core/images/locked-padlock",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-locked-padlock tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M96.4723753,64 L105,64 L105,96.0097716 C105,113.673909 90.6736461,128 73.001193,128 L55.998807,128 C38.3179793,128 24,113.677487 24,96.0097716 L24,64 L32.0000269,64 C32.0028554,48.2766389 32.3030338,16.2688026 64.1594984,16.2688041 C95.9543927,16.2688056 96.4648869,48.325931 96.4723753,64 Z M80.5749059,64 L48.4413579,64 C48.4426205,47.71306 48.5829272,31.9999996 64.1595001,31.9999996 C79.8437473,31.9999996 81.1369461,48.1359182 80.5749059,64 Z M67.7315279,92.3641717 C70.8232551,91.0923621 73,88.0503841 73,84.5 C73,79.8055796 69.1944204,76 64.5,76 C59.8055796,76 56,79.8055796 56,84.5 C56,87.947435 58.0523387,90.9155206 61.0018621,92.2491029 L55.9067479,115.020857 L72.8008958,115.020857 L67.7315279,92.3641717 L67.7315279,92.3641717 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/mail": {
            "title": "$:/core/images/mail",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-mail tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M122.826782,104.894066 C121.945525,105.22777 120.990324,105.41043 119.993027,105.41043 L8.00697327,105.41043 C7.19458381,105.41043 6.41045219,105.289614 5.67161357,105.064967 L5.67161357,105.064967 L39.8346483,70.9019325 L60.6765759,91.7438601 C61.6118278,92.679112 62.8865166,93.0560851 64.0946097,92.8783815 C65.2975108,93.0473238 66.5641085,92.6696979 67.4899463,91.7438601 L88.5941459,70.6396605 C88.6693095,70.7292352 88.7490098,70.8162939 88.8332479,70.9005321 L122.826782,104.894066 Z M127.903244,98.6568194 C127.966933,98.2506602 128,97.8343714 128,97.4103789 L128,33.410481 C128,32.7414504 127.917877,32.0916738 127.763157,31.4706493 L94.2292399,65.0045665 C94.3188145,65.0797417 94.4058701,65.1594458 94.4901021,65.2436778 L127.903244,98.6568194 Z M0.205060636,99.2178117 C0.0709009529,98.6370366 0,98.0320192 0,97.4103789 L0,33.410481 C0,32.694007 0.0944223363,31.9995312 0.27147538,31.3387595 L0.27147538,31.3387595 L34.1777941,65.2450783 L0.205060636,99.2178117 L0.205060636,99.2178117 Z M5.92934613,25.6829218 C6.59211333,25.5051988 7.28862283,25.4104299 8.00697327,25.4104299 L119.993027,25.4104299 C120.759109,25.4104299 121.500064,25.5178649 122.201605,25.7184927 L122.201605,25.7184927 L64.0832611,83.8368368 L5.92934613,25.6829218 L5.92934613,25.6829218 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/menu-button": {
            "title": "$:/core/images/menu-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-menu-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <rect x=\"0\" y=\"16\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n    <rect x=\"0\" y=\"56\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n    <rect x=\"0\" y=\"96\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n</svg>"
        },
        "$:/core/images/mono-block": {
            "title": "$:/core/images/mono-block",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-mono-block tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M23.9653488,32.9670593 L24.3217888,32.9670593 C25.0766067,32.9670593 25.6497006,33.1592554 26.0410876,33.5436534 C26.4324747,33.9280514 26.6281653,34.4906619 26.6281653,35.2315017 C26.6281653,36.0562101 26.4219913,36.6502709 26.009637,37.0137017 C25.5972828,37.3771326 24.9158602,37.5588453 23.9653488,37.5588453 L17.6542639,37.5588453 C16.6897744,37.5588453 16.0048573,37.380627 15.5994921,37.0241852 C15.1941269,36.6677435 14.9914474,36.0701882 14.9914474,35.2315017 C14.9914474,34.4207713 15.1941269,33.8406885 15.5994921,33.4912358 C16.0048573,33.141783 16.6897744,32.9670593 17.6542639,32.9670593 L18.388111,32.9670593 L17.5284616,30.5139133 L8.47069195,30.5139133 L7.5691084,32.9670593 L8.30295547,32.9670593 C9.25346691,32.9670593 9.93488953,33.1452775 10.3472438,33.5017193 C10.759598,33.8581611 10.965772,34.4347494 10.965772,35.2315017 C10.965772,36.0562101 10.759598,36.6502709 10.3472438,37.0137017 C9.93488953,37.3771326 9.25346691,37.5588453 8.30295547,37.5588453 L2.89345418,37.5588453 C1.92896463,37.5588453 1.24404754,37.3771326 0.838682371,37.0137017 C0.433317198,36.6502709 0.230637652,36.0562101 0.230637652,35.2315017 C0.230637652,34.4906619 0.426328248,33.9280514 0.817715312,33.5436534 C1.20910238,33.1592554 1.78219626,32.9670593 2.53701417,32.9670593 L2.89345418,32.9670593 L8.51262607,17.3256331 L6.83526132,17.3256331 C5.88474988,17.3256331 5.20332727,17.1439204 4.79097304,16.7804895 C4.37861882,16.4170587 4.1724448,15.8299869 4.1724448,15.0192565 C4.1724448,14.1945481 4.37861882,13.6004873 4.79097304,13.2370565 C5.20332727,12.8736257 5.88474988,12.691913 6.83526132,12.691913 L14.6979086,12.691913 C15.9419603,12.691913 16.815579,13.3628521 17.318791,14.7047506 L17.318791,14.7676518 L23.9653488,32.9670593 Z M12.9786097,17.3256331 L9.9383861,26.1737321 L16.0188333,26.1737321 L12.9786097,17.3256331 Z M35.3809383,26.6979086 L35.3809383,33.0928616 L38.5259972,33.0928616 C40.7485166,33.0928616 42.3140414,32.8482484 43.2226185,32.3590146 C44.1311956,31.8697807 44.5854773,31.0520736 44.5854773,29.9058686 C44.5854773,28.7456855 44.1521624,27.9209895 43.2855197,27.4317556 C42.4188769,26.9425218 40.9022748,26.6979086 38.7356678,26.6979086 L35.3809383,26.6979086 Z M46.0741385,24.370565 C47.5977525,24.9296893 48.7159844,25.6949794 49.428868,26.666458 C50.1417516,27.6379366 50.498188,28.8784752 50.498188,30.388111 C50.498188,31.6601189 50.1906743,32.8202846 49.5756374,33.8686428 C48.9606006,34.917001 48.0799929,35.7766419 46.933788,36.4475911 C46.2628387,36.8389782 45.5115266,37.1220307 44.6798291,37.296757 C43.8481316,37.4714834 42.6704935,37.5588453 41.1468796,37.5588453 L39.3856466,37.5588453 L30.2020747,37.5588453 C29.2795194,37.5588453 28.6190637,37.3771326 28.2206876,37.0137017 C27.8223114,36.6502709 27.6231264,36.0562101 27.6231264,35.2315017 C27.6231264,34.4906619 27.811828,33.9280514 28.189237,33.5436534 C28.5666459,33.1592554 29.118773,32.9670593 29.8456347,32.9670593 L30.2020747,32.9670593 L30.2020747,17.3256331 L29.8456347,17.3256331 C29.118773,17.3256331 28.5666459,17.1299425 28.189237,16.7385554 C27.811828,16.3471683 27.6231264,15.7740744 27.6231264,15.0192565 C27.6231264,14.2085262 27.8258059,13.6179599 28.2311711,13.24754 C28.6365363,12.8771201 29.2934976,12.691913 30.2020747,12.691913 L39.8469219,12.691913 C42.796303,12.691913 45.0362615,13.2650068 46.5668644,14.4112118 C48.0974674,15.5574168 48.8627574,17.2347648 48.8627574,19.443306 C48.8627574,20.5335986 48.6286276,21.4945792 48.1603609,22.3262767 C47.6920943,23.1579742 46.9966938,23.8393968 46.0741385,24.370565 L46.0741385,24.370565 Z M35.3809383,17.1998307 L35.3809383,22.4835296 L38.2114913,22.4835296 C39.9307988,22.4835296 41.1433816,22.2808501 41.8492761,21.8754849 C42.5551706,21.4701197 42.9081126,20.7852027 42.9081126,19.8207131 C42.9081126,18.912136 42.5901154,18.2481858 41.9541114,17.8288425 C41.3181074,17.4094992 40.2872373,17.1998307 38.8614701,17.1998307 L35.3809383,17.1998307 Z M71.244119,13.3838259 C71.5236812,12.880614 71.8102281,12.5241775 72.1037684,12.3145059 C72.3973087,12.1048342 72.7677231,12 73.2150226,12 C73.8999499,12 74.3856819,12.1817127 74.6722332,12.5451435 C74.9587844,12.9085744 75.1020579,13.5305909 75.1020579,14.4112118 L75.143992,19.8626472 C75.143992,20.8271368 74.9867406,21.4771091 74.6722332,21.8125837 C74.3577257,22.1480584 73.7881263,22.3157932 72.9634178,22.3157932 C72.3763372,22.3157932 71.92555,22.1760142 71.6110425,21.896452 C71.2965351,21.6168898 71.0274605,21.0997075 70.8038107,20.3448896 C70.4403799,19.0169692 69.8602971,18.0629775 69.0635448,17.482886 C68.2667926,16.9027945 67.1625385,16.612753 65.7507494,16.612753 C63.5981206,16.612753 61.9487284,17.3396038 60.8025235,18.7933272 C59.6563185,20.2470506 59.0832246,22.3507245 59.0832246,25.104412 C59.0832246,27.8441215 59.6633074,29.9477954 60.8234905,31.4154969 C61.9836736,32.8831984 63.6400547,33.6170381 65.7926836,33.6170381 C67.2603851,33.6170381 68.878327,33.1278116 70.6465578,32.149344 C72.4147886,31.1708763 73.5295261,30.6816498 73.9908037,30.6816498 C74.53595,30.6816498 74.9937262,30.9122852 75.3641461,31.3735628 C75.734566,31.8348404 75.9197732,32.4079343 75.9197732,33.0928616 C75.9197732,34.3229353 74.836486,35.4831009 72.669879,36.5733935 C70.5032721,37.663686 68.0641285,38.2088241 65.3523753,38.2088241 C61.6901107,38.2088241 58.7267959,36.9997358 56.4623422,34.5815228 C54.1978885,32.1633099 53.0656786,29.0043046 53.0656786,25.104412 C53.0656786,21.3443006 54.2118664,18.22024 56.5042763,15.7321366 C58.7966863,13.2440331 61.7040894,12 65.226573,12 C66.2190187,12 67.1974717,12.1118232 68.1619613,12.3354729 C69.1264508,12.5591227 70.1538264,12.9085702 71.244119,13.3838259 L71.244119,13.3838259 Z M81.4645862,32.9670593 L81.4645862,17.3256331 L81.1081461,17.3256331 C80.3533282,17.3256331 79.7802344,17.1299425 79.3888473,16.7385554 C78.9974602,16.3471683 78.8017696,15.7740744 78.8017696,15.0192565 C78.8017696,14.2085262 79.0114381,13.6179599 79.4307814,13.24754 C79.8501247,12.8771201 80.5280528,12.691913 81.4645862,12.691913 L85.4063933,12.691913 L86.6434498,12.691913 C89.5648747,12.691913 91.7034933,12.8177141 93.0593699,13.06932 C94.4152465,13.320926 95.5684233,13.740263 96.5189347,14.3273436 C98.210286,15.3337675 99.5067362,16.7699967 100.408324,18.6360743 C101.309912,20.5021519 101.7607,22.6582429 101.7607,25.104412 C101.7607,27.6903623 101.247012,29.9512876 100.219621,31.8872557 C99.1922296,33.8232239 97.7350336,35.2874089 95.8479888,36.2798546 C94.9953241,36.7271541 93.9959043,37.0521403 92.8496993,37.2548229 C91.7034944,37.4575055 89.9981906,37.5588453 87.7337369,37.5588453 L85.4063933,37.5588453 L81.4645862,37.5588453 C80.5000966,37.5588453 79.8151795,37.380627 79.4098143,37.0241852 C79.0044492,36.6677435 78.8017696,36.0701882 78.8017696,35.2315017 C78.8017696,34.4906619 78.9974602,33.9280514 79.3888473,33.5436534 C79.7802344,33.1592554 80.3533282,32.9670593 81.1081461,32.9670593 L81.4645862,32.9670593 Z M86.8740874,17.2417648 L86.8740874,32.9670593 L88.0692098,32.9670593 C90.7110725,32.9670593 92.6609895,32.3205814 93.9190194,31.0276063 C95.1770492,29.7346312 95.8060547,27.7462749 95.8060547,25.0624779 C95.8060547,22.4206153 95.1665658,20.4497314 93.8875688,19.1497672 C92.6085718,17.849803 90.6831161,17.1998307 88.1111439,17.1998307 C87.7756693,17.1998307 87.5205727,17.2033252 87.3458463,17.2103142 C87.1711199,17.2173033 87.0138685,17.2277867 86.8740874,17.2417648 L86.8740874,17.2417648 Z M121.94052,17.1159625 L112.190837,17.1159625 L112.190837,22.4835296 L115.88104,22.4835296 L115.88104,22.2319249 C115.88104,21.4351727 116.055763,20.841112 116.405216,20.4497249 C116.754669,20.0583378 117.285829,19.8626472 117.998713,19.8626472 C118.627728,19.8626472 119.141415,20.0408655 119.539792,20.3973072 C119.938168,20.753749 120.137353,21.2045363 120.137353,21.7496826 C120.137353,21.7776388 120.144342,21.8684951 120.15832,22.0222543 C120.172298,22.1760135 120.179287,22.3297704 120.179287,22.4835296 L120.179287,26.8237109 C120.179287,27.7602442 120.011552,28.4311834 119.676077,28.8365486 C119.340603,29.2419138 118.795465,29.4445933 118.040647,29.4445933 C117.327763,29.4445933 116.789614,29.2558917 116.426183,28.8784827 C116.062752,28.5010738 115.88104,27.9419578 115.88104,27.201118 L115.88104,26.8237109 L112.190837,26.8237109 L112.190837,33.0928616 L121.94052,33.0928616 L121.94052,30.5977816 C121.94052,29.6612482 122.118738,28.9903091 122.47518,28.5849439 C122.831622,28.1795787 123.415199,27.9768992 124.225929,27.9768992 C125.022682,27.9768992 125.592281,28.1760842 125.934745,28.5744604 C126.277208,28.9728365 126.448438,29.6472701 126.448438,30.5977816 L126.448438,35.6718099 C126.448438,36.4266278 126.30167,36.9298322 126.008129,37.1814382 C125.714589,37.4330442 125.134506,37.5588453 124.267863,37.5588453 L107.095842,37.5588453 C106.173287,37.5588453 105.512831,37.3771326 105.114455,37.0137017 C104.716079,36.6502709 104.516894,36.0562101 104.516894,35.2315017 C104.516894,34.4906619 104.705595,33.9280514 105.083004,33.5436534 C105.460413,33.1592554 106.01254,32.9670593 106.739402,32.9670593 L107.095842,32.9670593 L107.095842,17.3256331 L106.739402,17.3256331 C106.026518,17.3256331 105.477886,17.126448 105.093488,16.7280719 C104.70909,16.3296957 104.516894,15.7600963 104.516894,15.0192565 C104.516894,14.2085262 104.719573,13.6179599 105.124938,13.24754 C105.530304,12.8771201 106.187265,12.691913 107.095842,12.691913 L124.267863,12.691913 C125.120528,12.691913 125.697116,12.8212085 125.997646,13.0798036 C126.298175,13.3383986 126.448438,13.8520864 126.448438,14.6208824 L126.448438,19.3175037 C126.448438,20.2680151 126.273714,20.9494377 125.924261,21.361792 C125.574808,21.7741462 125.008703,21.9803202 124.225929,21.9803202 C123.415199,21.9803202 122.831622,21.7706517 122.47518,21.3513084 C122.118738,20.9319652 121.94052,20.254037 121.94052,19.3175037 L121.94052,17.1159625 Z M19.7719369,47.6405477 C20.037521,47.1373358 20.3205734,46.7808993 20.6211028,46.5712277 C20.9216322,46.361556 21.295541,46.2567218 21.7428405,46.2567218 C22.4277678,46.2567218 22.9134998,46.4384345 23.2000511,46.8018653 C23.4866023,47.1652962 23.6298758,47.7873127 23.6298758,48.6679336 L23.6718099,54.119369 C23.6718099,55.0838586 23.5145586,55.7338309 23.2000511,56.0693055 C22.8855436,56.4047802 22.3089553,56.572515 21.4702687,56.572515 C20.8831881,56.572515 20.4254119,56.4292415 20.0969263,56.1426902 C19.7684407,55.856139 19.4993662,55.3424512 19.2896945,54.6016114 C18.9122856,53.2597129 18.3322027,52.3022267 17.5494286,51.7291243 C16.7666545,51.1560218 15.6693894,50.8694748 14.2576003,50.8694748 C12.1049715,50.8694748 10.4590738,51.5963256 9.31985785,53.050049 C8.18064193,54.5037724 7.61104252,56.6074463 7.61104252,59.3611338 C7.61104252,62.1148214 8.20859773,64.2429566 9.40372609,65.7456034 C10.5988544,67.2482501 12.2936748,67.9995623 14.488238,67.9995623 C14.9914499,67.9995623 15.5645438,67.9401562 16.2075368,67.8213423 C16.8505299,67.7025283 17.6053364,67.5173212 18.4719792,67.2657152 L18.4719792,63.9529198 L16.1027015,63.9529198 C15.1521901,63.9529198 14.4777564,63.7781961 14.0793803,63.4287433 C13.6810042,63.0792906 13.4818191,62.4992078 13.4818191,61.6884774 C13.4818191,60.8497908 13.6810042,60.2522356 14.0793803,59.8957938 C14.4777564,59.5393521 15.1521901,59.3611338 16.1027015,59.3611338 L23.6718099,59.3611338 C24.6502776,59.3611338 25.3386891,59.5358576 25.7370653,59.8853103 C26.1354414,60.2347631 26.3346265,60.8218348 26.3346265,61.6465433 C26.3346265,62.3873831 26.1354414,62.9569825 25.7370653,63.3553586 C25.3386891,63.7537347 24.7621008,63.9529198 24.0072829,63.9529198 L23.6718099,63.9529198 L23.6718099,68.9430799 L23.6718099,69.1946846 C23.6718099,69.6419841 23.6228873,69.9529924 23.5250405,70.1277188 C23.4271937,70.3024451 23.2315031,70.4806634 22.9379628,70.6623788 C22.1412106,71.1376345 20.8762107,71.5569715 19.1429251,71.9204023 C17.4096396,72.2838332 15.6554131,72.4655459 13.8801932,72.4655459 C10.2179286,72.4655459 7.25461383,71.2564576 4.99016011,68.8382446 C2.72570638,66.4200317 1.59349651,63.2610264 1.59349651,59.3611338 C1.59349651,55.6010224 2.73968428,52.4769618 5.03209423,49.9888583 C7.32450417,47.5007549 10.2319073,46.2567218 13.7543909,46.2567218 C14.7328585,46.2567218 15.7078171,46.368545 16.6792957,46.5921947 C17.6507743,46.8158445 18.6816444,47.165292 19.7719369,47.6405477 L19.7719369,47.6405477 Z M35.611576,51.5823548 L35.611576,56.4047785 L42.4678043,56.4047785 L42.4678043,51.5823548 L42.1323314,51.5823548 C41.3775135,51.5823548 40.8009251,51.3866642 40.402549,50.9952772 C40.0041729,50.6038901 39.8049878,50.0307962 39.8049878,49.2759783 C39.8049878,48.4512699 40.0111618,47.8572091 40.4235161,47.4937783 C40.8358703,47.1303474 41.5172929,46.9486347 42.4678043,46.9486347 L47.8773056,46.9486347 C48.8278171,46.9486347 49.5022507,47.1303474 49.9006269,47.4937783 C50.299003,47.8572091 50.498188,48.4512699 50.498188,49.2759783 C50.498188,50.0307962 50.3059919,50.6038901 49.9215939,50.9952772 C49.5371959,51.3866642 48.9745854,51.5823548 48.2337456,51.5823548 L47.8773056,51.5823548 L47.8773056,67.2237811 L48.2337456,67.2237811 C48.9885636,67.2237811 49.5616574,67.4159772 49.9530445,67.8003752 C50.3444316,68.1847732 50.5401222,68.7473837 50.5401222,69.4882235 C50.5401222,70.3129319 50.3374426,70.9069927 49.9320774,71.2704235 C49.5267123,71.6338543 48.8417952,71.815567 47.8773056,71.815567 L42.4678043,71.815567 C41.5033148,71.815567 40.8183977,71.6373488 40.4130325,71.280907 C40.0076674,70.9244652 39.8049878,70.32691 39.8049878,69.4882235 C39.8049878,68.7473837 40.0041729,68.1847732 40.402549,67.8003752 C40.8009251,67.4159772 41.3775135,67.2237811 42.1323314,67.2237811 L42.4678043,67.2237811 L42.4678043,61.0384986 L35.611576,61.0384986 L35.611576,67.2237811 L35.9470489,67.2237811 C36.7018668,67.2237811 37.2784552,67.4159772 37.6768313,67.8003752 C38.0752074,68.1847732 38.2743925,68.7473837 38.2743925,69.4882235 C38.2743925,70.3129319 38.0682185,70.9069927 37.6558642,71.2704235 C37.24351,71.6338543 36.5620874,71.815567 35.611576,71.815567 L30.2020747,71.815567 C29.2375851,71.815567 28.552668,71.6373488 28.1473029,71.280907 C27.7419377,70.9244652 27.5392581,70.32691 27.5392581,69.4882235 C27.5392581,68.7473837 27.7349487,68.1847732 28.1263358,67.8003752 C28.5177229,67.4159772 29.0908168,67.2237811 29.8456347,67.2237811 L30.2020747,67.2237811 L30.2020747,51.5823548 L29.8456347,51.5823548 C29.1047949,51.5823548 28.5421844,51.3866642 28.1577864,50.9952772 C27.7733884,50.6038901 27.5811923,50.0307962 27.5811923,49.2759783 C27.5811923,48.4512699 27.7803773,47.8572091 28.1787534,47.4937783 C28.5771296,47.1303474 29.2515632,46.9486347 30.2020747,46.9486347 L35.611576,46.9486347 C36.5481093,46.9486347 37.2260374,47.1303474 37.6453807,47.4937783 C38.064724,47.8572091 38.2743925,48.4512699 38.2743925,49.2759783 C38.2743925,50.0307962 38.0752074,50.6038901 37.6768313,50.9952772 C37.2784552,51.3866642 36.7018668,51.5823548 35.9470489,51.5823548 L35.611576,51.5823548 Z M67.365213,51.5823548 L67.365213,67.2237811 L70.887679,67.2237811 C71.8381904,67.2237811 72.519613,67.4019993 72.9319673,67.7584411 C73.3443215,68.1148829 73.5504955,68.6914712 73.5504955,69.4882235 C73.5504955,70.2989538 73.340827,70.8895201 72.9214837,71.25994 C72.5021404,71.6303599 71.8242123,71.815567 70.887679,71.815567 L58.4332458,71.815567 C57.4827343,71.815567 56.8013117,71.6338543 56.3889575,71.2704235 C55.9766033,70.9069927 55.7704292,70.3129319 55.7704292,69.4882235 C55.7704292,68.6774931 55.9731088,68.0974103 56.378474,67.7479575 C56.7838391,67.3985048 57.4687562,67.2237811 58.4332458,67.2237811 L61.9557117,67.2237811 L61.9557117,51.5823548 L58.4332458,51.5823548 C57.4827343,51.5823548 56.8013117,51.4006421 56.3889575,51.0372113 C55.9766033,50.6737805 55.7704292,50.0867087 55.7704292,49.2759783 C55.7704292,48.4512699 55.9731088,47.8641981 56.378474,47.5147453 C56.7838391,47.1652926 57.4687562,46.9905689 58.4332458,46.9905689 L70.887679,46.9905689 C71.8801247,46.9905689 72.5720308,47.1652926 72.9634178,47.5147453 C73.3548049,47.8641981 73.5504955,48.4512699 73.5504955,49.2759783 C73.5504955,50.0867087 73.347816,50.6737805 72.9424508,51.0372113 C72.5370856,51.4006421 71.8521685,51.5823548 70.887679,51.5823548 L67.365213,51.5823548 Z M97.8608265,51.5823548 L97.8608265,63.1771386 L97.8608265,63.5755127 C97.8608265,65.4485794 97.7385199,66.8044357 97.493903,67.6431222 C97.2492861,68.4818088 96.8404325,69.2296264 96.26733,69.8865976 C95.5264902,70.7392623 94.4991146,71.3822457 93.1851723,71.815567 C91.87123,72.2488884 90.2917273,72.4655459 88.4466169,72.4655459 C87.1466527,72.4655459 85.8921362,72.3397448 84.6830298,72.0881388 C83.4739233,71.8365328 82.3102631,71.4591296 81.1920144,70.9559176 C80.5769776,70.6763554 80.175113,70.31293 79.9864085,69.8656305 C79.797704,69.418331 79.7033532,68.6914802 79.7033532,67.6850564 L79.7033532,63.3658422 C79.7033532,62.1637247 79.8780769,61.3250508 80.2275297,60.849795 C80.5769824,60.3745393 81.185021,60.136915 82.0516638,60.136915 C83.2957156,60.136915 83.9806326,61.0524675 84.1064356,62.8835998 C84.1204137,63.2050963 84.1413806,63.4497096 84.1693368,63.6174469 C84.3370741,65.2389076 84.7144774,66.3466561 85.301558,66.9407258 C85.8886386,67.5347954 86.8251579,67.8318258 88.1111439,67.8318258 C89.7046484,67.8318258 90.8263749,67.4089943 91.476357,66.5633187 C92.126339,65.7176431 92.4513252,64.1765796 92.4513252,61.9400821 L92.4513252,51.5823548 L88.9288593,51.5823548 C87.9783478,51.5823548 87.2969252,51.4006421 86.884571,51.0372113 C86.4722168,50.6737805 86.2660427,50.0867087 86.2660427,49.2759783 C86.2660427,48.4512699 86.4652278,47.8641981 86.8636039,47.5147453 C87.26198,47.1652926 87.9503916,46.9905689 88.9288593,46.9905689 L99.6220595,46.9905689 C100.600527,46.9905689 101.288939,47.1652926 101.687315,47.5147453 C102.085691,47.8641981 102.284876,48.4512699 102.284876,49.2759783 C102.284876,50.0867087 102.078702,50.6737805 101.666348,51.0372113 C101.253994,51.4006421 100.572571,51.5823548 99.6220595,51.5823548 L97.8608265,51.5823548 Z M112.505343,51.5823548 L112.505343,57.9353738 L118.984165,51.4565525 C118.257303,51.3726838 117.747109,51.1665098 117.453569,50.8380242 C117.160029,50.5095387 117.013261,49.9888619 117.013261,49.2759783 C117.013261,48.4512699 117.212446,47.8572091 117.610822,47.4937783 C118.009198,47.1303474 118.683632,46.9486347 119.634143,46.9486347 L124.771073,46.9486347 C125.721584,46.9486347 126.396018,47.1303474 126.794394,47.4937783 C127.19277,47.8572091 127.391955,48.4512699 127.391955,49.2759783 C127.391955,50.0447743 127.19277,50.6213627 126.794394,51.0057607 C126.396018,51.3901587 125.812441,51.5823548 125.043645,51.5823548 L124.561402,51.5823548 L118.459988,57.641835 C119.592215,58.4805215 120.626579,59.5812811 121.563113,60.9441468 C122.499646,62.3070125 123.596911,64.400203 124.854941,67.2237811 L125.127513,67.2237811 L125.546854,67.2237811 C126.371563,67.2237811 126.98659,67.4124827 127.391955,67.7898917 C127.79732,68.1673006 128,68.7334056 128,69.4882235 C128,70.3129319 127.793826,70.9069927 127.381472,71.2704235 C126.969118,71.6338543 126.287695,71.815567 125.337183,71.815567 L122.758235,71.815567 C121.626008,71.815567 120.710456,71.0537715 120.01155,69.5301576 C119.885747,69.2505954 119.787902,69.026949 119.718012,68.8592117 C118.795456,66.9022764 117.949793,65.3926632 117.180997,64.3303269 C116.412201,63.2679906 115.510627,62.2965265 114.476247,61.4159056 L112.505343,63.302941 L112.505343,67.2237811 L112.840816,67.2237811 C113.595634,67.2237811 114.172222,67.4159772 114.570599,67.8003752 C114.968975,68.1847732 115.16816,68.7473837 115.16816,69.4882235 C115.16816,70.3129319 114.961986,70.9069927 114.549631,71.2704235 C114.137277,71.6338543 113.455855,71.815567 112.505343,71.815567 L107.095842,71.815567 C106.131352,71.815567 105.446435,71.6373488 105.04107,71.280907 C104.635705,70.9244652 104.433025,70.32691 104.433025,69.4882235 C104.433025,68.7473837 104.628716,68.1847732 105.020103,67.8003752 C105.41149,67.4159772 105.984584,67.2237811 106.739402,67.2237811 L107.095842,67.2237811 L107.095842,51.5823548 L106.739402,51.5823548 C105.998562,51.5823548 105.435952,51.3866642 105.051554,50.9952772 C104.667156,50.6038901 104.474959,50.0307962 104.474959,49.2759783 C104.474959,48.4512699 104.674145,47.8572091 105.072521,47.4937783 C105.470897,47.1303474 106.14533,46.9486347 107.095842,46.9486347 L112.505343,46.9486347 C113.441877,46.9486347 114.119805,47.1303474 114.539148,47.4937783 C114.958491,47.8572091 115.16816,48.4512699 115.16816,49.2759783 C115.16816,50.0307962 114.968975,50.6038901 114.570599,50.9952772 C114.172222,51.3866642 113.595634,51.5823548 112.840816,51.5823548 L112.505343,51.5823548 Z M13.439885,96.325622 L17.4445933,84.4372993 C17.6961993,83.6545252 18.0456468,83.0849258 18.4929463,82.728484 C18.9402458,82.3720422 19.5343065,82.193824 20.2751463,82.193824 L23.5460076,82.193824 C24.496519,82.193824 25.1779416,82.3755367 25.5902958,82.7389675 C26.0026501,83.1023984 26.2088241,83.6964591 26.2088241,84.5211676 C26.2088241,85.2759855 26.009639,85.8490794 25.6112629,86.2404664 C25.2128868,86.6318535 24.6362984,86.8275441 23.8814805,86.8275441 L23.5460076,86.8275441 L24.1330852,102.46897 L24.4895252,102.46897 C25.2443431,102.46897 25.8104481,102.661166 26.187857,103.045564 C26.565266,103.429962 26.7539676,103.992573 26.7539676,104.733413 C26.7539676,105.558121 26.5547826,106.152182 26.1564064,106.515613 C25.7580303,106.879044 25.0835967,107.060756 24.1330852,107.060756 L19.4154969,107.060756 C18.4649855,107.060756 17.7905518,106.882538 17.3921757,106.526096 C16.9937996,106.169654 16.7946145,105.572099 16.7946145,104.733413 C16.7946145,103.992573 16.9868106,103.429962 17.3712086,103.045564 C17.7556066,102.661166 18.325206,102.46897 19.0800239,102.46897 L19.4154969,102.46897 L19.1219581,89.6790642 L16.0607674,99.1981091 C15.8371177,99.9109927 15.5191204,100.42468 15.1067662,100.739188 C14.694412,101.053695 14.1248126,101.210947 13.3979509,101.210947 C12.6710892,101.210947 12.0945008,101.053695 11.6681685,100.739188 C11.2418362,100.42468 10.91685,99.9109927 10.6932002,99.1981091 L7.65297664,89.6790642 L7.35943781,102.46897 L7.69491075,102.46897 C8.44972866,102.46897 9.01932808,102.661166 9.40372609,103.045564 C9.78812409,103.429962 9.98032022,103.992573 9.98032022,104.733413 C9.98032022,105.558121 9.77764067,106.152182 9.3722755,106.515613 C8.96691032,106.879044 8.29597114,107.060756 7.35943781,107.060756 L2.62088241,107.060756 C1.68434908,107.060756 1.01340989,106.879044 0.608044719,106.515613 C0.202679546,106.152182 0,105.558121 0,104.733413 C0,103.992573 0.192196121,103.429962 0.57659413,103.045564 C0.960992139,102.661166 1.53059155,102.46897 2.28540946,102.46897 L2.62088241,102.46897 L3.22892713,86.8275441 L2.89345418,86.8275441 C2.13863627,86.8275441 1.56204791,86.6318535 1.16367179,86.2404664 C0.765295672,85.8490794 0.5661106,85.2759855 0.5661106,84.5211676 C0.5661106,83.6964591 0.772284622,83.1023984 1.18463885,82.7389675 C1.59699308,82.3755367 2.27841569,82.193824 3.22892713,82.193824 L6.49978838,82.193824 C7.22665007,82.193824 7.81022738,82.3685477 8.25053783,82.7180005 C8.69084827,83.0674532 9.05077919,83.6405471 9.33034138,84.4372993 L13.439885,96.325622 Z M43.8935644,98.3803938 L43.8935644,86.8275441 L42.7403761,86.8275441 C41.8178209,86.8275441 41.1573651,86.6458314 40.758989,86.2824006 C40.3606129,85.9189697 40.1614278,85.3318979 40.1614278,84.5211676 C40.1614278,83.7104372 40.3606129,83.119871 40.758989,82.7494511 C41.1573651,82.3790312 41.8178209,82.193824 42.7403761,82.193824 L48.6950209,82.193824 C49.6035981,82.193824 50.2605593,82.3790312 50.6659245,82.7494511 C51.0712897,83.119871 51.2739692,83.7104372 51.2739692,84.5211676 C51.2739692,85.2620074 51.0817731,85.8316068 50.6973751,86.2299829 C50.3129771,86.628359 49.7643445,86.8275441 49.051461,86.8275441 L48.6950209,86.8275441 L48.6950209,105.865634 C48.6950209,106.522605 48.6251315,106.934953 48.4853504,107.10269 C48.3455693,107.270428 48.0310665,107.354295 47.5418327,107.354295 L45.4451268,107.354295 C44.7741775,107.354295 44.3024234,107.284406 44.0298503,107.144625 C43.7572771,107.004843 43.5231473,106.76023 43.3274538,106.410777 L34.6051571,91.0838571 L34.6051571,102.46897 L35.8212466,102.46897 C36.7298237,102.46897 37.379796,102.643694 37.7711831,102.993147 C38.1625701,103.3426 38.3582607,103.922682 38.3582607,104.733413 C38.3582607,105.558121 38.1590757,106.152182 37.7606995,106.515613 C37.3623234,106.879044 36.7158456,107.060756 35.8212466,107.060756 L29.8037005,107.060756 C28.8951234,107.060756 28.2381621,106.879044 27.832797,106.515613 C27.4274318,106.152182 27.2247522,105.558121 27.2247522,104.733413 C27.2247522,103.992573 27.4134539,103.429962 27.7908629,103.045564 C28.1682718,102.661166 28.7273878,102.46897 29.4682276,102.46897 L29.8037005,102.46897 L29.8037005,86.8275441 L29.4682276,86.8275441 C28.755344,86.8275441 28.203217,86.628359 27.8118299,86.2299829 C27.4204428,85.8316068 27.2247522,85.2620074 27.2247522,84.5211676 C27.2247522,83.7104372 27.4309263,83.119871 27.8432805,82.7494511 C28.2556347,82.3790312 28.9091015,82.193824 29.8037005,82.193824 L33.2422983,82.193824 C34.0670067,82.193824 34.6261227,82.3021527 34.919663,82.5188134 C35.2132033,82.7354741 35.5416839,83.1722835 35.9051148,83.8292546 L43.8935644,98.3803938 Z M64.6604624,86.3662688 C62.8572863,86.3662688 61.4420239,87.0931196 60.4146329,88.546843 C59.3872418,90.0005663 58.873554,92.0203728 58.873554,94.6063231 C58.873554,97.1922733 59.3907363,99.2190688 60.4251164,100.68677 C61.4594965,102.154472 62.8712644,102.888312 64.6604624,102.888312 C66.4636385,102.888312 67.8823953,102.157966 68.9167754,100.697254 C69.9511555,99.2365414 70.4683378,97.2062514 70.4683378,94.6063231 C70.4683378,92.0203728 69.95465,90.0005663 68.9272589,88.546843 C67.8998679,87.0931196 66.4776166,86.3662688 64.6604624,86.3662688 L64.6604624,86.3662688 Z M64.6604624,81.501911 C68.0990773,81.501911 70.929602,82.7319662 73.1521214,85.1921135 C75.3746408,87.6522607 76.4858838,90.7902992 76.4858838,94.6063231 C76.4858838,98.4503032 75.3816297,101.595331 73.1730884,104.0415 C70.9645471,106.487669 68.1270335,107.710735 64.6604624,107.710735 C61.2358256,107.710735 58.4053009,106.477185 56.1688034,104.010049 C53.9323059,101.542913 52.8140739,98.4083688 52.8140739,94.6063231 C52.8140739,90.7763211 53.9218224,87.6347881 56.1373528,85.1816299 C58.3528831,82.7284717 61.1938912,81.501911 64.6604624,81.501911 L64.6604624,81.501911 Z M87.4611651,98.1707232 L87.4611651,102.46897 L89.6207722,102.46897 C90.5293493,102.46897 91.1758272,102.643694 91.5602252,102.993147 C91.9446232,103.3426 92.1368193,103.922682 92.1368193,104.733413 C92.1368193,105.558121 91.9411287,106.152182 91.5497417,106.515613 C91.1583546,106.879044 90.5153712,107.060756 89.6207722,107.060756 L82.3661697,107.060756 C81.4436145,107.060756 80.7831587,106.879044 80.3847826,106.515613 C79.9864065,106.152182 79.7872214,105.558121 79.7872214,104.733413 C79.7872214,103.992573 79.9759231,103.429962 80.353332,103.045564 C80.730741,102.661166 81.282868,102.46897 82.0097297,102.46897 L82.3661697,102.46897 L82.3661697,86.8275441 L82.0097297,86.8275441 C81.2968461,86.8275441 80.7482136,86.628359 80.3638155,86.2299829 C79.9794175,85.8316068 79.7872214,85.2620074 79.7872214,84.5211676 C79.7872214,83.7104372 79.989901,83.119871 80.3952661,82.7494511 C80.8006313,82.3790312 81.4575926,82.193824 82.3661697,82.193824 L91.0255652,82.193824 C94.450202,82.193824 97.0396079,82.8507853 98.7938606,84.1647276 C100.548113,85.4786699 101.425227,87.414609 101.425227,89.972603 C101.425227,92.6703781 100.551608,94.7111515 98.8043442,96.0949843 C97.0570805,97.4788171 94.4641801,98.1707232 91.0255652,98.1707232 L87.4611651,98.1707232 Z M87.4611651,86.8275441 L87.4611651,93.4531348 L90.4384875,93.4531348 C92.0879044,93.4531348 93.328443,93.1735768 94.1601405,92.6144525 C94.9918381,92.0553281 95.4076806,91.2166541 95.4076806,90.0984053 C95.4076806,89.0500471 94.9778602,88.2428234 94.1182064,87.67671 C93.2585527,87.1105966 92.031992,86.8275441 90.4384875,86.8275441 L87.4611651,86.8275441 Z M114.727851,107.396229 L113.092421,109.03166 C113.69348,108.835966 114.284046,108.689198 114.864137,108.591352 C115.444229,108.493505 116.013828,108.444582 116.572953,108.444582 C117.677223,108.444582 118.840883,108.608823 120.063968,108.937308 C121.287053,109.265794 122.031376,109.430034 122.29696,109.430034 C122.744259,109.430034 123.327837,109.279772 124.047709,108.979242 C124.767582,108.678713 125.253314,108.52845 125.50492,108.52845 C126.02211,108.52845 126.45193,108.727636 126.794394,109.126012 C127.136858,109.524388 127.308087,110.024098 127.308087,110.625156 C127.308087,111.421909 126.836333,112.099837 125.892811,112.658961 C124.949288,113.218086 123.792617,113.497643 122.422762,113.497643 C121.486229,113.497643 120.28413,113.277492 118.816428,112.837181 C117.348727,112.396871 116.286406,112.176719 115.629435,112.176719 C114.636989,112.176719 113.518757,112.449288 112.274706,112.994434 C111.030654,113.53958 110.261869,113.812149 109.968329,113.812149 C109.36727,113.812149 108.857077,113.612964 108.437734,113.214588 C108.01839,112.816212 107.808722,112.337469 107.808722,111.778345 C107.808722,111.386958 107.941512,110.971115 108.207096,110.530805 C108.47268,110.090494 108.94094,109.520895 109.611889,108.821989 L111.729562,106.683349 C109.395218,105.830685 107.536157,104.29661 106.152324,102.08108 C104.768491,99.8655494 104.076585,97.3180772 104.076585,94.4385866 C104.076585,90.6365409 105.180839,87.5299526 107.389381,85.1187288 C109.597922,82.7075049 112.442425,81.501911 115.922974,81.501911 C119.389545,81.501911 122.227059,82.7109994 124.4356,85.1292123 C126.644141,87.5474252 127.748395,90.650519 127.748395,94.4385866 C127.748395,98.2126762 126.65113,101.322759 124.456567,103.768928 C122.262004,106.215097 119.480402,107.438163 116.111677,107.438163 C115.888028,107.438163 115.660887,107.434669 115.430248,107.42768 C115.199609,107.420691 114.965479,107.410207 114.727851,107.396229 L114.727851,107.396229 Z M115.922974,86.3662688 C114.119798,86.3662688 112.704535,87.0931196 111.677144,88.546843 C110.649753,90.0005663 110.136065,92.0203728 110.136065,94.6063231 C110.136065,97.1922733 110.653248,99.2190688 111.687628,100.68677 C112.722008,102.154472 114.133776,102.888312 115.922974,102.888312 C117.72615,102.888312 119.144907,102.157966 120.179287,100.697254 C121.213667,99.2365414 121.730849,97.2062514 121.730849,94.6063231 C121.730849,92.0203728 121.217161,90.0005663 120.18977,88.546843 C119.162379,87.0931196 117.740128,86.3662688 115.922974,86.3662688 L115.922974,86.3662688 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/mono-line": {
            "title": "$:/core/images/mono-line",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-mono-line tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M60.4374591,84.522627 L61.3450888,84.522627 C63.2671377,84.522627 64.7264493,85.0120303 65.7230673,85.9908515 C66.7196852,86.9696727 67.2179868,88.4022896 67.2179868,90.288745 C67.2179868,92.3887615 66.6929905,93.9014625 65.6429823,94.8268935 C64.5929741,95.7523244 62.857817,96.215033 60.4374591,96.215033 L44.3670747,96.215033 C41.9111232,96.215033 40.1670679,95.7612227 39.1348565,94.8535884 C38.102645,93.9459542 37.586547,92.424355 37.586547,90.288745 C37.586547,88.2243221 38.102645,86.747214 39.1348565,85.8573766 C40.1670679,84.9675391 41.9111232,84.522627 44.3670747,84.522627 L46.235724,84.522627 L44.0467348,78.2759992 L20.9822627,78.2759992 L18.6864935,84.522627 L20.5551429,84.522627 C22.9755008,84.522627 24.7106579,84.9764373 25.7606661,85.8840716 C26.8106743,86.7917058 27.3356705,88.2599156 27.3356705,90.288745 C27.3356705,92.3887615 26.8106743,93.9014625 25.7606661,94.8268935 C24.7106579,95.7523244 22.9755008,96.215033 20.5551429,96.215033 L6.78052766,96.215033 C4.32457622,96.215033 2.58052094,95.7523244 1.54830946,94.8268935 C0.516097994,93.9014625 0,92.3887615 0,90.288745 C0,88.4022896 0.498301511,86.9696727 1.49491948,85.9908515 C2.49153745,85.0120303 3.95084902,84.522627 5.87289797,84.522627 L6.78052766,84.522627 L21.0890427,44.6937008 L16.8178442,44.6937008 C14.3974863,44.6937008 12.6623292,44.2309922 11.612321,43.3055613 C10.5623128,42.3801303 10.0373165,40.8852258 10.0373165,38.8208028 C10.0373165,36.7207864 10.5623128,35.2080854 11.612321,34.2826544 C12.6623292,33.3572234 14.3974863,32.8945149 16.8178442,32.8945149 L36.8390873,32.8945149 C40.0069087,32.8945149 42.231469,34.6029772 43.512835,38.0199531 L43.512835,38.180123 L60.4374591,84.522627 Z M32.4611088,44.6937008 L24.7195615,67.224273 L40.2026561,67.224273 L32.4611088,44.6937008 Z M89.5058233,68.5590225 L89.5058233,84.8429669 L97.5143205,84.8429669 C103.173687,84.8429669 107.160099,84.22009 109.473676,82.9743176 C111.787254,81.7285451 112.944025,79.6463566 112.944025,76.7276897 C112.944025,73.7734293 111.840643,71.6734444 109.633846,70.4276719 C107.427049,69.1818994 103.565213,68.5590225 98.0482204,68.5590225 L89.5058233,68.5590225 Z M116.734714,62.6327346 C120.614405,64.0564746 123.461842,66.0051894 125.277111,68.4789376 C127.092379,70.9526857 128,74.1115614 128,77.9556593 C128,81.1946677 127.216955,84.1488838 125.650841,86.8183962 C124.084727,89.4879087 121.84237,91.676876 118.923703,93.385364 C117.215215,94.3819819 115.302093,95.1027395 113.18428,95.5476582 C111.066467,95.9925769 108.06776,96.215033 104.188068,96.215033 L99.7033098,96.215033 L76.3184979,96.215033 C73.9693269,96.215033 72.2875593,95.7523244 71.2731446,94.8268935 C70.2587299,93.9014625 69.7515301,92.3887615 69.7515301,90.288745 C69.7515301,88.4022896 70.2320352,86.9696727 71.1930596,85.9908515 C72.1540841,85.0120303 73.5600062,84.522627 75.4108682,84.522627 L76.3184979,84.522627 L76.3184979,44.6937008 L75.4108682,44.6937008 C73.5600062,44.6937008 72.1540841,44.1953993 71.1930596,43.1987813 C70.2320352,42.2021633 69.7515301,40.7428518 69.7515301,38.8208028 C69.7515301,36.7563799 70.2676281,35.2525771 71.2998396,34.3093494 C72.3320511,33.3661217 74.0049204,32.8945149 76.3184979,32.8945149 L100.877889,32.8945149 C108.388118,32.8945149 114.09189,34.3538264 117.989378,37.2724934 C121.886867,40.1911603 123.835581,44.4623161 123.835581,50.0860889 C123.835581,52.8623819 123.239399,55.3093982 122.047017,57.4272114 C120.854635,59.5450246 119.083885,61.2801816 116.734714,62.6327346 L116.734714,62.6327346 Z M89.5058233,44.3733609 L89.5058233,57.8276363 L96.7134708,57.8276363 C101.091471,57.8276363 104.179161,57.3115383 105.976633,56.2793268 C107.774104,55.2471153 108.672827,53.50306 108.672827,51.0471086 C108.672827,48.7335312 107.863087,47.0428653 106.243583,45.9750604 C104.624078,44.9072554 101.999097,44.3733609 98.3685602,44.3733609 L89.5058233,44.3733609 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/new-button": {
            "title": "$:/core/images/new-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M56,72 L8.00697327,72 C3.59075293,72 0,68.418278 0,64 C0,59.5907123 3.58484404,56 8.00697327,56 L56,56 L56,8.00697327 C56,3.59075293 59.581722,0 64,0 C68.4092877,0 72,3.58484404 72,8.00697327 L72,56 L119.993027,56 C124.409247,56 128,59.581722 128,64 C128,68.4092877 124.415156,72 119.993027,72 L72,72 L72,119.993027 C72,124.409247 68.418278,128 64,128 C59.5907123,128 56,124.415156 56,119.993027 L56,72 L56,72 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/new-here-button": {
            "title": "$:/core/images/new-here-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-here-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n    \t<g transform=\"translate(52.233611, 64.389922) rotate(75.000000) translate(-52.233611, -64.389922) translate(-7.734417, 3.702450)\">\n\t        <path d=\"M18.9270186,45.959338 L18.9080585,49.6521741 C18.8884833,53.4648378 21.0574548,58.7482162 23.7526408,61.4434022 L78.5671839,116.257945 C81.2617332,118.952495 85.6348701,118.950391 88.3334363,116.251825 L115.863237,88.7220241 C118.555265,86.0299959 118.564544,81.6509578 115.869358,78.9557717 L61.0548144,24.1412286 C58.3602652,21.4466794 53.0787224,19.2788426 49.2595808,19.3006519 L25.9781737,19.4336012 C22.1633003,19.4553862 19.0471195,22.5673232 19.0275223,26.3842526 L18.9871663,34.2443819 C19.0818862,34.255617 19.1779758,34.2665345 19.2754441,34.2771502 C22.6891275,34.6489512 27.0485594,34.2348566 31.513244,33.2285542 C31.7789418,32.8671684 32.075337,32.5211298 32.4024112,32.1940556 C34.8567584,29.7397084 38.3789778,29.0128681 41.4406288,30.0213822 C41.5958829,29.9543375 41.7503946,29.8866669 41.9041198,29.8183808 L42.1110981,30.2733467 C43.1114373,30.6972371 44.0473796,31.3160521 44.8614145,32.1300869 C48.2842088,35.5528813 48.2555691,41.130967 44.7974459,44.5890903 C41.4339531,47.952583 36.0649346,48.0717177 32.6241879,44.9262969 C27.8170558,45.8919233 23.0726921,46.2881596 18.9270186,45.959338 Z\"></path>\n\t        <path d=\"M45.4903462,38.8768094 C36.7300141,42.6833154 26.099618,44.7997354 18.1909048,43.9383587 C7.2512621,42.7468685 1.50150083,35.8404432 4.66865776,24.7010202 C7.51507386,14.6896965 15.4908218,6.92103848 24.3842626,4.38423012 C34.1310219,1.60401701 42.4070208,6.15882777 42.4070209,16.3101169 L34.5379395,16.310117 C34.5379394,11.9285862 31.728784,10.3825286 26.5666962,11.8549876 C20.2597508,13.6540114 14.3453742,19.4148216 12.2444303,26.8041943 C10.4963869,32.9523565 12.6250796,35.5092726 19.0530263,36.2093718 C25.5557042,36.9176104 35.0513021,34.9907189 42.7038419,31.5913902 L42.7421786,31.6756595 C44.3874154,31.5384763 47.8846101,37.3706354 45.9274416,38.6772897 L45.9302799,38.6835285 C45.9166992,38.6895612 45.9031139,38.6955897 45.8895238,38.7016142 C45.8389288,38.7327898 45.7849056,38.7611034 45.7273406,38.7863919 C45.6506459,38.8200841 45.571574,38.8501593 45.4903462,38.8768094 Z\"></path>\n        </g>\n        <rect x=\"96\" y=\"80\" width=\"16\" height=\"48\" rx=\"8\"></rect>\n        <rect x=\"80\" y=\"96\" width=\"48\" height=\"16\" rx=\"8\"></rect>\n    </g>\n    </g>\n</svg>"
        },
        "$:/core/images/new-image-button": {
            "title": "$:/core/images/new-image-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-image-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M81.3619177,73.6270062 L97.1875317,46.2162388 C97.91364,44.9585822 97.4824378,43.3533085 96.2260476,42.6279312 L46.2162388,13.7547547 C44.9585822,13.0286463 43.3533085,13.4598485 42.6279312,14.7162388 L30.0575956,36.4886988 L40.0978909,31.2276186 C43.1404959,29.6333041 46.8692155,31.3421319 47.6479264,34.6877101 L51.2545483,52.3903732 L61.1353556,53.2399953 C63.2899974,53.4346096 65.1046382,54.9309951 65.706105,57.0091178 C65.7395572,57.1246982 65.8069154,57.3539875 65.9047035,57.6813669 C66.0696435,58.2335608 66.2581528,58.852952 66.4667073,59.5238092 C67.0618822,61.4383079 67.6960725,63.3742727 68.3393254,65.2021174 C68.5462918,65.7902259 68.7511789,66.3583016 68.953259,66.9034738 C69.5777086,68.5881157 70.1617856,70.0172008 70.6783305,71.110045 C70.9334784,71.6498566 71.1627732,72.0871602 71.4035746,72.5373068 C71.6178999,72.7492946 71.9508843,72.9623307 72.4151452,73.1586945 C73.5561502,73.6412938 75.1990755,73.899146 77.0720271,73.9171651 C77.9355886,73.9254732 78.7819239,73.8832103 79.5638842,73.8072782 C80.0123946,73.7637257 80.3172916,73.7224469 80.4352582,73.7027375 C80.7503629,73.6500912 81.0598053,73.6256267 81.3619177,73.6270062 L81.3619177,73.6270062 L81.3619177,73.6270062 L81.3619177,73.6270062 Z M37.4707881,2.64867269 C38.9217993,0.135447653 42.1388058,-0.723707984 44.6486727,0.725364314 L108.293614,37.4707881 C110.806839,38.9217993 111.665994,42.1388058 110.216922,44.6486727 L73.4714982,108.293614 C72.0204871,110.806839 68.8034805,111.665994 66.2936136,110.216922 L2.64867269,73.4714982 C0.135447653,72.0204871 -0.723707984,68.8034805 0.725364314,66.2936136 L37.4707881,2.64867269 L37.4707881,2.64867269 L37.4707881,2.64867269 L37.4707881,2.64867269 Z M80.3080975,53.1397764 C82.8191338,54.5895239 86.0299834,53.7291793 87.4797308,51.218143 C88.9294783,48.7071068 88.0691338,45.4962571 85.5580975,44.0465097 C83.0470612,42.5967622 79.8362116,43.4571068 78.3864641,45.968143 C76.9367166,48.4791793 77.7970612,51.6900289 80.3080975,53.1397764 L80.3080975,53.1397764 L80.3080975,53.1397764 L80.3080975,53.1397764 Z M96,112 L88.0070969,112 C83.5881712,112 80,108.418278 80,104 C80,99.5907123 83.5848994,96 88.0070969,96 L96,96 L96,88.0070969 C96,83.5881712 99.581722,80 104,80 C108.409288,80 112,83.5848994 112,88.0070969 L112,96 L119.992903,96 C124.411829,96 128,99.581722 128,104 C128,108.409288 124.415101,112 119.992903,112 L112,112 L112,119.992903 C112,124.411829 108.418278,128 104,128 C99.5907123,128 96,124.415101 96,119.992903 L96,112 L96,112 Z M33.3471097,51.7910932 C40.7754579,59.7394511 42.3564368,62.4818351 40.7958321,65.1848818 C39.2352273,67.8879286 26.9581062,62.8571718 24.7019652,66.7649227 C22.4458242,70.6726735 23.7947046,70.0228006 22.2648667,72.6725575 L41.9944593,84.0634431 C41.9944593,84.0634431 36.3904568,75.8079231 37.7602356,73.4353966 C40.2754811,69.0788636 46.5298923,72.1787882 48.1248275,69.4162793 C50.538989,65.234829 43.0222016,59.7770885 33.3471097,51.7910932 L33.3471097,51.7910932 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/new-journal-button": {
            "title": "$:/core/images/new-journal-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-new-journal-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M102.545455,112.818182 L102.545455,124.636364 L102.545455,124.636364 L102.545455,124.636364 C102.545455,125.941761 103.630828,127 104.969697,127 L111.030303,127 C112.369172,127 113.454545,125.941761 113.454545,124.636364 L113.454545,112.818182 L125.575758,112.818182 C126.914626,112.818182 128,111.759982 128,110.454545 L128,104.545455 C128,103.240018 126.914626,102.181818 125.575758,102.181818 L113.454545,102.181818 L113.454545,90.3636364 C113.454545,89.0582 112.369172,88 111.030303,88 L104.969697,88 L104.969697,88 C103.630828,88 102.545455,89.0582 102.545455,90.3636364 L102.545455,102.181818 L90.4242424,102.181818 L90.4242424,102.181818 C89.0853705,102.181818 88,103.240018 88,104.545455 L88,110.454545 L88,110.454545 L88,110.454545 C88,111.759982 89.0853705,112.818182 90.4242424,112.818182 L102.545455,112.818182 Z\"></path>\n        <g transform=\"translate(59.816987, 64.316987) rotate(30.000000) translate(-59.816987, -64.316987) translate(20.316987, 12.816987)\">\n            <g transform=\"translate(0.000000, 0.000000)\">\n                <path d=\"M9.99631148,0 C4.4755011,0 -2.27373675e-13,4.48070044 -2.27373675e-13,9.99759461 L-2.27373675e-13,91.6128884 C-2.27373675e-13,97.1344074 4.46966773,101.610483 9.99631148,101.610483 L68.9318917,101.610483 C74.4527021,101.610483 78.9282032,97.1297826 78.9282032,91.6128884 L78.9282032,9.99759461 C78.9282032,4.47607557 74.4585355,0 68.9318917,0 L9.99631148,0 Z M20.8885263,26 C24.2022348,26 26.8885263,23.3137085 26.8885263,20 C26.8885263,16.6862915 24.2022348,14 20.8885263,14 C17.5748178,14 14.8885263,16.6862915 14.8885263,20 C14.8885263,23.3137085 17.5748178,26 20.8885263,26 Z M57.3033321,25.6783342 C60.6170406,25.6783342 63.3033321,22.9920427 63.3033321,19.6783342 C63.3033321,16.3646258 60.6170406,13.6783342 57.3033321,13.6783342 C53.9896236,13.6783342 51.3033321,16.3646258 51.3033321,19.6783342 C51.3033321,22.9920427 53.9896236,25.6783342 57.3033321,25.6783342 Z\"></path>\n                <text font-family=\"Helvetica\" font-size=\"47.1724138\" font-weight=\"bold\" fill=\"#FFFFFF\">\n                    <tspan x=\"42\" y=\"77.4847912\" text-anchor=\"middle\"><<now \"DD\">></tspan>\n                </text>\n            </g>\n        </g>\n    </g>\n</svg>"
        },
        "$:/core/images/opacity": {
            "title": "$:/core/images/opacity",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-opacity tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M102.361773,65 C101.833691,67.051742 101.183534,69.0544767 100.419508,71 L82.5835324,71 C83.7602504,69.1098924 84.7666304,67.1027366 85.581205,65 L102.361773,65 Z M102.834311,63 C103.256674,61.0388326 103.568427,59.0365486 103.762717,57 L87.6555706,57 C87.3692052,59.0609452 86.9083652,61.0660782 86.2884493,63 L102.834311,63 Z M99.5852583,73 C98.6682925,75.0747721 97.6196148,77.0783056 96.4498253,79 L75.8124196,79 C77.8387053,77.2115633 79.6621163,75.1985844 81.2437158,73 L99.5852583,73 Z M95.1689122,81 C93.7449202,83.1155572 92.1695234,85.1207336 90.458251,87 L60.4614747,87 C65.1836162,85.86248 69.5430327,83.794147 73.3347255,81 L95.1689122,81 Z M87.6555706,47 L103.762717,47 C101.246684,20.6269305 79.0321807,0 52,0 C23.281193,0 0,23.281193 0,52 C0,77.2277755 17.9651296,98.2595701 41.8000051,103 L62.1999949,103 C67.8794003,101.870444 73.2255333,99.8158975 78.074754,97 L39,97 L39,95 L81.2493857,95 C83.8589242,93.2215015 86.2981855,91.2116653 88.5376609,89 L39,89 L39,87 L43.5385253,87 C27.7389671,83.1940333 16,68.967908 16,52 C16,32.117749 32.117749,16 52,16 C70.1856127,16 85.2217929,29.4843233 87.6555706,47 Z M87.8767787,49 L103.914907,49 C103.971379,49.9928025 104,50.9930589 104,52 C104,53.0069411 103.971379,54.0071975 103.914907,55 L87.8767787,55 C87.958386,54.0107999 88,53.0102597 88,52 C88,50.9897403 87.958386,49.9892001 87.8767787,49 Z\"></path>\n        <path d=\"M76,128 C104.718807,128 128,104.718807 128,76 C128,47.281193 104.718807,24 76,24 C47.281193,24 24,47.281193 24,76 C24,104.718807 47.281193,128 76,128 L76,128 Z M76,112 C95.882251,112 112,95.882251 112,76 C112,56.117749 95.882251,40 76,40 C56.117749,40 40,56.117749 40,76 C40,95.882251 56.117749,112 76,112 L76,112 Z\"></path>\n        <path d=\"M37,58 L90,58 L90,62 L37,62 L37,58 L37,58 Z M40,50 L93,50 L93,54 L40,54 L40,50 L40,50 Z M40,42 L93,42 L93,46 L40,46 L40,42 L40,42 Z M32,66 L85,66 L85,70 L32,70 L32,66 L32,66 Z M30,74 L83,74 L83,78 L30,78 L30,74 L30,74 Z M27,82 L80,82 L80,86 L27,86 L27,82 L27,82 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/open-window": {
            "title": "$:/core/images/open-window",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-open-window tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M16,112 L104.993898,112 C108.863261,112 112,115.590712 112,120 C112,124.418278 108.858091,128 104.993898,128 L7.00610161,128 C3.13673853,128 0,124.409288 0,120 C0,119.998364 4.30952878e-07,119.996727 1.29273572e-06,119.995091 C4.89579306e-07,119.993456 0,119.99182 0,119.990183 L0,24.0098166 C0,19.586117 3.59071231,16 8,16 C12.418278,16 16,19.5838751 16,24.0098166 L16,112 Z\"></path>\n        <path d=\"M96,43.1959595 L96,56 C96,60.418278 99.581722,64 104,64 C108.418278,64 112,60.418278 112,56 L112,24 C112,19.5907123 108.415101,16 103.992903,16 L72.0070969,16 C67.5881712,16 64,19.581722 64,24 C64,28.4092877 67.5848994,32 72.0070969,32 L84.5685425,32 L48.2698369,68.2987056 C45.1421332,71.4264093 45.1434327,76.4904296 48.267627,79.614624 C51.3854642,82.7324612 56.4581306,82.7378289 59.5835454,79.6124141 L96,43.1959595 Z M32,7.9992458 C32,3.58138434 35.5881049,0 39.9992458,0 L120.000754,0 C124.418616,0 128,3.5881049 128,7.9992458 L128,88.0007542 C128,92.4186157 124.411895,96 120.000754,96 L39.9992458,96 C35.5813843,96 32,92.4118951 32,88.0007542 L32,7.9992458 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/options-button": {
            "title": "$:/core/images/options-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-options-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M110.48779,76.0002544 C109.354214,80.4045063 107.611262,84.5641217 105.354171,88.3838625 L105.354171,88.3838625 L112.07833,95.1080219 C115.20107,98.2307613 115.210098,103.299824 112.089164,106.420759 L106.420504,112.089418 C103.301049,115.208874 98.2346851,115.205502 95.1077675,112.078585 L88.3836082,105.354425 C84.5638673,107.611516 80.4042519,109.354468 76,110.488045 L76,110.488045 L76,119.993281 C76,124.409501 72.4220153,128.000254 68.0083475,128.000254 L59.9916525,128.000254 C55.5800761,128.000254 52,124.41541 52,119.993281 L52,110.488045 C47.5957481,109.354468 43.4361327,107.611516 39.6163918,105.354425 L32.8922325,112.078585 C29.7694931,115.201324 24.7004301,115.210353 21.5794957,112.089418 L15.9108363,106.420759 C12.7913807,103.301303 12.7947522,98.2349395 15.9216697,95.1080219 L22.6458291,88.3838625 C20.3887383,84.5641217 18.6457859,80.4045063 17.5122098,76.0002544 L8.00697327,76.0002544 C3.59075293,76.0002544 2.19088375e-16,72.4222697 4.89347582e-16,68.0086019 L9.80228577e-16,59.9919069 C1.25035972e-15,55.5803305 3.58484404,52.0002544 8.00697327,52.0002544 L17.5122098,52.0002544 C18.6457859,47.5960025 20.3887383,43.4363871 22.6458291,39.6166462 L15.9216697,32.8924868 C12.7989304,29.7697475 12.7899019,24.7006845 15.9108363,21.5797501 L21.5794957,15.9110907 C24.6989513,12.7916351 29.7653149,12.7950065 32.8922325,15.9219241 L39.6163918,22.6460835 C43.4361327,20.3889927 47.5957481,18.6460403 52,17.5124642 L52,8.00722764 C52,3.5910073 55.5779847,0.000254375069 59.9916525,0.000254375069 L68.0083475,0.000254375069 C72.4199239,0.000254375069 76,3.58509841 76,8.00722764 L76,17.5124642 C80.4042519,18.6460403 84.5638673,20.3889927 88.3836082,22.6460835 L95.1077675,15.9219241 C98.2305069,12.7991848 103.29957,12.7901562 106.420504,15.9110907 L112.089164,21.5797501 C115.208619,24.6992057 115.205248,29.7655693 112.07833,32.8924868 L105.354171,39.6166462 L105.354171,39.6166462 C107.611262,43.4363871 109.354214,47.5960025 110.48779,52.0002544 L119.993027,52.0002544 C124.409247,52.0002544 128,55.5782391 128,59.9919069 L128,68.0086019 C128,72.4201783 124.415156,76.0002544 119.993027,76.0002544 L110.48779,76.0002544 L110.48779,76.0002544 Z M64,96.0002544 C81.673112,96.0002544 96,81.6733664 96,64.0002544 C96,46.3271424 81.673112,32.0002544 64,32.0002544 C46.326888,32.0002544 32,46.3271424 32,64.0002544 C32,81.6733664 46.326888,96.0002544 64,96.0002544 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/paint": {
            "title": "$:/core/images/paint",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-paint tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M83.5265806,76.1907935 C90.430962,69.2864121 91.8921169,59.0000433 87.9100453,50.6642209 L125.812763,12.7615036 C128.732035,9.84223095 128.72611,5.10322984 125.812796,2.18991592 C122.893542,-0.729338085 118.161775,-0.730617045 115.241209,2.18994966 L77.3384914,40.092667 C69.002669,36.1105954 58.7163002,37.5717503 51.8119188,44.4761317 L83.5265806,76.1907935 L83.5265806,76.1907935 L83.5265806,76.1907935 L83.5265806,76.1907935 Z M80.8836921,78.8336819 L49.1690303,47.1190201 C49.1690303,47.1190201 8.50573364,81.242543 0,80.2820711 C0,80.2820711 3.78222974,85.8744423 6.82737483,88.320684 C20.8514801,82.630792 44.1526049,63.720771 44.1526049,63.720771 L44.8144806,64.3803375 C44.8144806,64.3803375 19.450356,90.2231043 9.18040433,92.0477601 C10.4017154,93.4877138 13.5343883,96.1014812 15.4269991,97.8235871 C20.8439164,96.3356979 50.1595367,69.253789 50.1595367,69.253789 L50.8214124,69.9133555 L18.4136144,100.936036 L23.6993903,106.221812 L56.1060358,75.2002881 L56.7679115,75.8598546 C56.7679115,75.8598546 28.9040131,106.396168 28.0841366,108.291555 C28.0841366,108.291555 34.1159238,115.144621 35.6529617,116.115796 C36.3545333,113.280171 63.5365402,82.6307925 63.5365402,82.6307925 L64.1984159,83.290359 C64.1984159,83.290359 43.6013016,107.04575 39.2343772,120.022559 C42.443736,123.571575 46.7339155,125.159692 50.1595362,126.321151 C47.9699978,114.504469 80.8836921,78.8336819 80.8836921,78.8336819 L80.8836921,78.8336819 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/palette": {
            "title": "$:/core/images/palette",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-palette tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M80.2470434,39.1821571 C75.0645698,38.2680897 69.6261555,37.7814854 64.0193999,37.7814854 C28.6624616,37.7814854 0,57.1324214 0,81.0030106 C0,90.644534 4.67604329,99.5487133 12.5805659,106.738252 C23.5031767,91.1899067 26.3405471,72.3946229 36.8885698,63.5622337 C52.0716764,50.8486559 63.4268694,55.7343343 63.4268694,55.7343343 L80.2470434,39.1821571 Z M106.781666,48.8370714 C119.830962,56.749628 128.0388,68.229191 128.0388,81.0030106 C128.0388,90.3534932 128.557501,98.4142085 116.165191,106.082518 C105.367708,112.763955 112.341384,99.546808 104.321443,95.1851533 C96.3015017,90.8234987 84.3749007,96.492742 86.1084305,103.091059 C89.3087234,115.272303 105.529892,114.54645 92.4224435,119.748569 C79.3149955,124.950687 74.2201582,124.224536 64.0193999,124.224536 C56.1979176,124.224536 48.7040365,123.277578 41.7755684,121.544216 C51.620343,117.347916 69.6563669,109.006202 75.129737,102.088562 C82.7876655,92.4099199 87.3713218,80.0000002 83.3235694,72.4837191 C83.1303943,72.1250117 94.5392656,60.81569 106.781666,48.8370714 Z M1.13430476,123.866563 C0.914084026,123.867944 0.693884185,123.868637 0.473712455,123.868637 C33.9526848,108.928928 22.6351223,59.642592 59.2924543,59.6425917 C59.6085574,61.0606542 59.9358353,62.5865065 60.3541977,64.1372318 C34.4465025,59.9707319 36.7873124,112.168427 1.13429588,123.866563 L1.13430476,123.866563 Z M1.84669213,123.859694 C40.7185279,123.354338 79.9985412,101.513051 79.9985401,79.0466836 C70.7284906,79.0466835 65.9257264,75.5670082 63.1833375,71.1051511 C46.585768,64.1019718 32.81846,116.819636 1.84665952,123.859695 L1.84669213,123.859694 Z M67.1980193,59.8524981 C62.748213,63.9666823 72.0838429,76.2846822 78.5155805,71.1700593 C89.8331416,59.8524993 112.468264,37.2173758 123.785825,25.8998146 C135.103386,14.5822535 123.785825,3.26469247 112.468264,14.5822535 C101.150703,25.8998144 78.9500931,48.9868127 67.1980193,59.8524981 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/permalink-button": {
            "title": "$:/core/images/permalink-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-permalink-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M80.4834582,48 L73.0956761,80 L73.0956761,80 L47.5165418,80 L54.9043239,48 L80.4834582,48 Z M84.1773493,32 L89.8007299,7.64246248 C90.7941633,3.33942958 95.0918297,0.64641956 99.3968675,1.64031585 C103.693145,2.63218977 106.385414,6.93288901 105.390651,11.2416793 L100.598215,32 L104.000754,32 C108.411895,32 112,35.581722 112,40 C112,44.4092877 108.418616,48 104.000754,48 L96.9043239,48 L89.5165418,80 L104.000754,80 C108.411895,80 112,83.581722 112,88 C112,92.4092877 108.418616,96 104.000754,96 L85.8226507,96 L80.1992701,120.357538 C79.2058367,124.66057 74.9081703,127.35358 70.6031325,126.359684 C66.3068546,125.36781 63.6145865,121.067111 64.6093491,116.758321 L69.401785,96 L43.8226507,96 L38.1992701,120.357538 C37.2058367,124.66057 32.9081703,127.35358 28.6031325,126.359684 C24.3068546,125.36781 21.6145865,121.067111 22.6093491,116.758321 L27.401785,96 L23.9992458,96 C19.5881049,96 16,92.418278 16,88 C16,83.5907123 19.5813843,80 23.9992458,80 L31.0956761,80 L38.4834582,48 L23.9992458,48 C19.5881049,48 16,44.418278 16,40 C16,35.5907123 19.5813843,32 23.9992458,32 L42.1773493,32 L47.8007299,7.64246248 C48.7941633,3.33942958 53.0918297,0.64641956 57.3968675,1.64031585 C61.6931454,2.63218977 64.3854135,6.93288901 63.3906509,11.2416793 L58.598215,32 L84.1773493,32 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/permaview-button": {
            "title": "$:/core/images/permaview-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-permaview-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M81.4834582,48 L79.6365127,56 L79.6365127,56 L74.0573784,56 L75.9043239,48 L81.4834582,48 Z M85.1773493,32 L90.8007299,7.64246248 C91.7941633,3.33942958 96.0918297,0.64641956 100.396867,1.64031585 C104.693145,2.63218977 107.385414,6.93288901 106.390651,11.2416793 L101.598215,32 L104.000754,32 C108.411895,32 112,35.581722 112,40 C112,44.4092877 108.418616,48 104.000754,48 L97.9043239,48 L96.0573784,56 L104.000754,56 C108.411895,56 112,59.581722 112,64 C112,68.4092877 108.418616,72 104.000754,72 L92.3634873,72 L90.5165418,80 L104.000754,80 C108.411895,80 112,83.581722 112,88 C112,92.4092877 108.418616,96 104.000754,96 L86.8226507,96 L81.1992701,120.357538 C80.2058367,124.66057 75.9081703,127.35358 71.6031325,126.359684 C67.3068546,125.36781 64.6145865,121.067111 65.6093491,116.758321 L70.401785,96 L64.8226507,96 L59.1992701,120.357538 C58.2058367,124.66057 53.9081703,127.35358 49.6031325,126.359684 C45.3068546,125.36781 42.6145865,121.067111 43.6093491,116.758321 L48.401785,96 L42.8226507,96 L37.1992701,120.357538 C36.2058367,124.66057 31.9081703,127.35358 27.6031325,126.359684 C23.3068546,125.36781 20.6145865,121.067111 21.6093491,116.758321 L26.401785,96 L23.9992458,96 C19.5881049,96 16,92.418278 16,88 C16,83.5907123 19.5813843,80 23.9992458,80 L30.0956761,80 L31.9426216,72 L23.9992458,72 C19.5881049,72 16,68.418278 16,64 C16,59.5907123 19.5813843,56 23.9992458,56 L35.6365127,56 L37.4834582,48 L23.9992458,48 C19.5881049,48 16,44.418278 16,40 C16,35.5907123 19.5813843,32 23.9992458,32 L41.1773493,32 L46.8007299,7.64246248 C47.7941633,3.33942958 52.0918297,0.64641956 56.3968675,1.64031585 C60.6931454,2.63218977 63.3854135,6.93288901 62.3906509,11.2416793 L57.598215,32 L63.1773493,32 L68.8007299,7.64246248 C69.7941633,3.33942958 74.0918297,0.64641956 78.3968675,1.64031585 C82.6931454,2.63218977 85.3854135,6.93288901 84.3906509,11.2416793 L79.598215,32 L85.1773493,32 Z M53.9043239,48 L52.0573784,56 L57.6365127,56 L59.4834582,48 L53.9043239,48 Z M75.9426216,72 L74.0956761,80 L74.0956761,80 L68.5165418,80 L70.3634873,72 L75.9426216,72 L75.9426216,72 Z M48.3634873,72 L46.5165418,80 L52.0956761,80 L53.9426216,72 L48.3634873,72 L48.3634873,72 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/picture": {
            "title": "$:/core/images/picture",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-picture tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M112,68.2332211 L112,20.0027785 C112,17.7898769 110.207895,16 107.997221,16 L20.0027785,16 C17.7898769,16 16,17.792105 16,20.0027785 L16,58.312373 L25.2413115,43.7197989 C28.041793,39.297674 34.2643908,38.7118128 37.8410347,42.5335275 L56.0882845,63.1470817 L69.7748997,56.7400579 C72.766567,55.3552503 76.3013751,55.9473836 78.678437,58.2315339 C78.8106437,58.3585731 79.0742301,58.609836 79.4527088,58.9673596 C80.0910923,59.570398 80.8117772,60.2441563 81.598127,60.9705595 C83.8422198,63.043576 86.1541548,65.1151944 88.3956721,67.0372264 C89.1168795,67.6556396 89.8200801,68.2492007 90.5021258,68.8146755 C92.6097224,70.5620551 94.4693308,72.0029474 95.9836366,73.0515697 C96.7316295,73.5695379 97.3674038,73.9719282 98.0281481,74.3824999 C98.4724987,74.4989557 99.0742374,74.5263881 99.8365134,74.4317984 C101.709944,74.1993272 104.074502,73.2878514 106.559886,71.8846196 C107.705822,71.2376318 108.790494,70.5370325 109.764561,69.8410487 C110.323259,69.4418522 110.694168,69.1550757 110.834827,69.0391868 C111.210545,68.7296319 111.600264,68.4615815 112,68.2332211 L112,68.2332211 Z M0,8.00697327 C0,3.58484404 3.59075293,0 8.00697327,0 L119.993027,0 C124.415156,0 128,3.59075293 128,8.00697327 L128,119.993027 C128,124.415156 124.409247,128 119.993027,128 L8.00697327,128 C3.58484404,128 0,124.409247 0,119.993027 L0,8.00697327 L0,8.00697327 Z M95,42 C99.418278,42 103,38.418278 103,34 C103,29.581722 99.418278,26 95,26 C90.581722,26 87,29.581722 87,34 C87,38.418278 90.581722,42 95,42 L95,42 Z M32,76 C47.8587691,80.8294182 52.0345556,83.2438712 52.0345556,88 C52.0345556,92.7561288 32,95.4712486 32,102.347107 C32,109.222965 33.2849191,107.337637 33.2849191,112 L67.999999,112 C67.999999,112 54.3147136,105.375255 54.3147136,101.200691 C54.3147136,93.535181 64.9302432,92.860755 64.9302432,88 C64.9302432,80.6425555 50.8523779,79.167282 32,76 L32,76 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/plugin-generic-language": {
            "title": "$:/core/images/plugin-generic-language",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M61.2072232,68.1369825 C56.8829239,70.9319564 54.2082892,74.793177 54.2082892,79.0581634 C54.2082892,86.9638335 63.3980995,93.4821994 75.2498076,94.3940006 C77.412197,98.2964184 83.8475284,101.178858 91.5684735,101.403106 C86.4420125,100.27851 82.4506393,97.6624107 80.9477167,94.3948272 C92.8046245,93.4861461 102,86.9662269 102,79.0581634 C102,70.5281905 91.3014611,63.6132813 78.1041446,63.6132813 C71.5054863,63.6132813 65.5315225,65.3420086 61.2072232,68.1369825 Z M74.001066,53.9793443 C69.6767667,56.7743182 63.7028029,58.5030456 57.1041446,58.5030456 C54.4851745,58.5030456 51.9646095,58.2307276 49.6065315,57.7275105 C46.2945155,59.9778212 41.2235699,61.4171743 35.5395922,61.4171743 C35.4545771,61.4171743 35.3696991,61.4168523 35.2849622,61.4162104 C39.404008,60.5235193 42.7961717,58.6691298 44.7630507,56.286533 C37.8379411,53.5817651 33.2082892,48.669413 33.2082892,43.0581634 C33.2082892,34.5281905 43.9068281,27.6132812 57.1041446,27.6132812 C70.3014611,27.6132812 81,34.5281905 81,43.0581634 C81,47.3231498 78.3253653,51.1843704 74.001066,53.9793443 Z M64,0 L118.5596,32 L118.5596,96 L64,128 L9.44039956,96 L9.44039956,32 L64,0 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/plugin-generic-plugin": {
            "title": "$:/core/images/plugin-generic-plugin",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M40.3972881,76.4456988 L40.3972881,95.3404069 L54.5170166,95.3404069 L54.5170166,95.3404069 C54.5165526,95.3385183 54.516089,95.3366295 54.515626,95.3347404 C54.6093153,95.3385061 54.7034848,95.3404069 54.7980982,95.3404069 C58.6157051,95.3404069 61.710487,92.245625 61.710487,88.4280181 C61.710487,86.6197822 61.01617,84.9737128 59.8795929,83.7418666 L59.8795929,83.7418666 C59.8949905,83.7341665 59.9104102,83.7265043 59.925852,83.7188798 C58.8840576,82.5086663 58.2542926,80.9336277 58.2542926,79.2114996 C58.2542926,75.3938927 61.3490745,72.2991108 65.1666814,72.2991108 C68.9842884,72.2991108 72.0790703,75.3938927 72.0790703,79.2114996 C72.0790703,81.1954221 71.2432806,82.9841354 69.9045961,84.2447446 L69.9045961,84.2447446 C69.9333407,84.2629251 69.9619885,84.281245 69.9905383,84.2997032 L69.9905383,84.2997032 C69.1314315,85.4516923 68.6228758,86.8804654 68.6228758,88.4280181 C68.6228758,91.8584969 71.1218232,94.7053153 74.3986526,95.2474079 C74.3913315,95.2784624 74.3838688,95.3094624 74.3762652,95.3404069 L95.6963988,95.3404069 L95.6963988,75.5678578 L95.6963988,75.5678578 C95.6466539,75.5808558 95.5967614,75.5934886 95.5467242,75.6057531 C95.5504899,75.5120637 95.5523907,75.4178943 95.5523907,75.3232809 C95.5523907,71.505674 92.4576088,68.4108921 88.6400019,68.4108921 C86.831766,68.4108921 85.1856966,69.105209 83.9538504,70.2417862 L83.9538504,70.2417862 C83.9461503,70.2263886 83.938488,70.2109688 83.9308636,70.1955271 C82.7206501,71.2373215 81.1456115,71.8670865 79.4234834,71.8670865 C75.6058765,71.8670865 72.5110946,68.7723046 72.5110946,64.9546976 C72.5110946,61.1370907 75.6058765,58.0423088 79.4234834,58.0423088 C81.4074059,58.0423088 83.1961192,58.8780985 84.4567284,60.2167829 L84.4567284,60.2167829 C84.4749089,60.1880383 84.4932288,60.1593906 84.511687,60.1308407 L84.511687,60.1308407 C85.6636761,60.9899475 87.0924492,61.4985032 88.6400019,61.4985032 C92.0704807,61.4985032 94.9172991,58.9995558 95.4593917,55.7227265 C95.538755,55.7414363 95.6177614,55.761071 95.6963988,55.7816184 L95.6963988,40.0412962 L74.3762652,40.0412962 L74.3762652,40.0412962 C74.3838688,40.0103516 74.3913315,39.9793517 74.3986526,39.9482971 L74.3986526,39.9482971 C71.1218232,39.4062046 68.6228758,36.5593862 68.6228758,33.1289073 C68.6228758,31.5813547 69.1314315,30.1525815 69.9905383,29.0005925 C69.9619885,28.9821342 69.9333407,28.9638143 69.9045961,28.9456339 C71.2432806,27.6850247 72.0790703,25.8963113 72.0790703,23.9123888 C72.0790703,20.0947819 68.9842884,17 65.1666814,17 C61.3490745,17 58.2542926,20.0947819 58.2542926,23.9123888 C58.2542926,25.6345169 58.8840576,27.2095556 59.925852,28.419769 L59.925852,28.419769 C59.9104102,28.4273935 59.8949905,28.4350558 59.8795929,28.4427558 C61.01617,29.674602 61.710487,31.3206715 61.710487,33.1289073 C61.710487,36.9465143 58.6157051,40.0412962 54.7980982,40.0412962 C54.7034848,40.0412962 54.6093153,40.0393953 54.515626,40.0356296 L54.515626,40.0356296 C54.516089,40.0375187 54.5165526,40.0394075 54.5170166,40.0412962 L40.3972881,40.0412962 L40.3972881,52.887664 L40.3972881,52.887664 C40.4916889,53.3430132 40.5412962,53.8147625 40.5412962,54.2980982 C40.5412962,58.1157051 37.4465143,61.210487 33.6289073,61.210487 C32.0813547,61.210487 30.6525815,60.7019313 29.5005925,59.8428245 C29.4821342,59.8713744 29.4638143,59.9000221 29.4456339,59.9287667 C28.1850247,58.5900823 26.3963113,57.7542926 24.4123888,57.7542926 C20.5947819,57.7542926 17.5,60.8490745 17.5,64.6666814 C17.5,68.4842884 20.5947819,71.5790703 24.4123888,71.5790703 C26.134517,71.5790703 27.7095556,70.9493053 28.919769,69.9075109 L28.919769,69.9075109 C28.9273935,69.9229526 28.9350558,69.9383724 28.9427558,69.95377 C30.174602,68.8171928 31.8206715,68.1228758 33.6289073,68.1228758 C37.4465143,68.1228758 40.5412962,71.2176578 40.5412962,75.0352647 C40.5412962,75.5186004 40.4916889,75.9903496 40.3972881,76.4456988 Z M64,0 L118.5596,32 L118.5596,96 L64,128 L9.44039956,96 L9.44039956,32 L64,0 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/plugin-generic-theme": {
            "title": "$:/core/images/plugin-generic-theme",
            "tags": "$:/tags/Image",
            "text": "<svg width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M29.4078519,91.4716406 L51.4693474,69.4101451 L51.4646675,69.4054652 C50.5969502,68.5377479 50.5929779,67.1348725 51.4693474,66.2585029 C52.3396494,65.3882009 53.7499654,65.3874786 54.6163097,66.2538229 L64.0805963,75.7181095 C64.9483136,76.5858268 64.9522859,77.9887022 64.0759163,78.8650718 C63.2056143,79.7353737 61.7952984,79.736096 60.9289541,78.8697517 L60.9242741,78.8650718 L60.9242741,78.8650718 L38.8627786,100.926567 C36.2518727,103.537473 32.0187578,103.537473 29.4078519,100.926567 C26.796946,98.3156614 26.796946,94.0825465 29.4078519,91.4716406 Z M60.8017407,66.3810363 C58.3659178,63.6765806 56.3370667,61.2899536 54.9851735,59.5123615 C48.1295381,50.4979488 44.671561,55.2444054 40.7586738,59.5123614 C36.8457866,63.7803174 41.789473,67.2384487 38.0759896,70.2532832 C34.3625062,73.2681177 34.5917646,74.3131575 28.3243876,68.7977024 C22.0570105,63.2822473 21.6235306,61.7636888 24.5005999,58.6166112 C27.3776691,55.4695337 29.7823103,60.4247912 35.6595047,54.8320442 C41.5366991,49.2392972 36.5996215,44.2825646 36.5996215,44.2825646 C36.5996215,44.2825646 48.8365511,19.267683 65.1880231,21.1152173 C81.5394952,22.9627517 59.0022276,18.7228947 53.3962199,38.3410355 C50.9960082,46.7405407 53.8429162,44.7613399 58.3941742,48.3090467 C59.7875202,49.3951602 64.4244828,52.7100463 70.1884353,56.9943417 L90.8648751,36.3179019 L92.4795866,31.5515482 L100.319802,26.8629752 L103.471444,30.0146174 L98.782871,37.8548326 L94.0165173,39.4695441 L73.7934912,59.6925702 C86.4558549,69.2403631 102.104532,81.8392557 102.104532,86.4016913 C102.104533,93.6189834 99.0337832,97.9277545 92.5695848,95.5655717 C87.8765989,93.8506351 73.8015497,80.3744087 63.8173444,69.668717 L60.9242741,72.5617873 L57.7726319,69.4101451 L60.8017407,66.3810363 L60.8017407,66.3810363 Z M63.9533761,1.42108547e-13 L118.512977,32 L118.512977,96 L63.9533761,128 L9.39377563,96 L9.39377563,32 L63.9533761,1.42108547e-13 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/preview-closed": {
            "title": "$:/core/images/preview-closed",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-preview-closed tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M0.0881363238,64 C-0.210292223,65.8846266 0.249135869,67.8634737 1.4664206,69.4579969 C16.2465319,88.8184886 39.1692554,100.414336 64,100.414336 C88.8307446,100.414336 111.753468,88.8184886 126.533579,69.4579969 C127.750864,67.8634737 128.210292,65.8846266 127.911864,64 C110.582357,78.4158332 88.3036732,87.0858436 64,87.0858436 C39.6963268,87.0858436 17.4176431,78.4158332 0.0881363238,64 Z\"></path>\n        <rect x=\"62\" y=\"96\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(80.000000, 101.000000) rotate(-5.000000) translate(-80.000000, -101.000000) \" x=\"78\" y=\"93\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(48.000000, 101.000000) rotate(-355.000000) translate(-48.000000, -101.000000) \" x=\"46\" y=\"93\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(32.000000, 96.000000) rotate(-350.000000) translate(-32.000000, -96.000000) \" x=\"30\" y=\"88\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(96.000000, 96.000000) rotate(-10.000000) translate(-96.000000, -96.000000) \" x=\"94\" y=\"88\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(112.000000, 88.000000) rotate(-20.000000) translate(-112.000000, -88.000000) \" x=\"110\" y=\"80\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n        <rect transform=\"translate(16.000000, 88.000000) rotate(-340.000000) translate(-16.000000, -88.000000) \" x=\"14\" y=\"80\" width=\"4\" height=\"16\" rx=\"4\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/preview-open": {
            "title": "$:/core/images/preview-open",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-preview-open tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64.1099282,99.5876785 C39.2791836,99.5876785 16.3564602,87.9918313 1.57634884,68.6313396 C-0.378878622,66.070184 -0.378878622,62.5174945 1.57634884,59.9563389 C16.3564602,40.5958472 39.2791836,29 64.1099282,29 C88.9406729,29 111.863396,40.5958472 126.643508,59.9563389 C128.598735,62.5174945 128.598735,66.070184 126.643508,68.6313396 C111.863396,87.9918313 88.9406729,99.5876785 64.1099282,99.5876785 Z M110.213805,67.5808331 C111.654168,66.0569335 111.654168,63.9430665 110.213805,62.4191669 C99.3257042,50.8995835 82.4391647,44 64.1470385,44 C45.8549124,44 28.9683729,50.8995835 18.0802717,62.4191669 C16.6399094,63.9430665 16.6399094,66.0569335 18.0802717,67.5808331 C28.9683729,79.1004165 45.8549124,86 64.1470385,86 C82.4391647,86 99.3257042,79.1004165 110.213805,67.5808331 Z\"></path>\n        <path d=\"M63.5,88 C76.4786916,88 87,77.4786916 87,64.5 C87,51.5213084 76.4786916,41 63.5,41 C50.5213084,41 40,51.5213084 40,64.5 C40,77.4786916 50.5213084,88 63.5,88 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/quote": {
            "title": "$:/core/images/quote",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-quote tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M51.2188077,117.712501 L51.2188077,62.1993386 L27.4274524,62.1993386 C27.4274524,53.3075754 29.1096526,45.797753 32.4741035,39.669646 C35.8385544,33.541539 42.0867267,28.9154883 51.2188077,25.7913554 L51.2188077,2 C43.7689521,2.96127169 36.8599155,5.18417913 30.4914905,8.668789 C24.1230656,12.1533989 18.6559149,16.5391352 14.0898743,21.8261295 C9.52383382,27.1131238 5.97919764,33.2411389 3.45585945,40.2103586 C0.932521268,47.1795784 -0.208971741,54.6293222 0.0313461819,62.5598136 L0.0313461819,117.712501 L51.2188077,117.712501 Z M128,117.712501 L128,62.1993386 L104.208645,62.1993386 C104.208645,53.3075754 105.890845,45.797753 109.255296,39.669646 C112.619747,33.541539 118.867919,28.9154883 128,25.7913554 L128,2 C120.550144,2.96127169 113.641108,5.18417913 107.272683,8.668789 C100.904258,12.1533989 95.4371072,16.5391352 90.8710666,21.8261295 C86.3050261,27.1131238 82.7603899,33.2411389 80.2370517,40.2103586 C77.7137136,47.1795784 76.5722206,54.6293222 76.8125385,62.5598136 L76.8125385,117.712501 L128,117.712501 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/refresh-button": {
            "title": "$:/core/images/refresh-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-refresh-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M106.369002,39.4325143 C116.529932,60.3119371 112.939592,86.1974934 95.5979797,103.539105 C73.7286194,125.408466 38.2713806,125.408466 16.4020203,103.539105 C-5.46734008,81.6697449 -5.46734008,46.2125061 16.4020203,24.3431458 C19.5262146,21.2189514 24.5915344,21.2189514 27.7157288,24.3431458 C30.8399231,27.4673401 30.8399231,32.5326599 27.7157288,35.6568542 C12.0947571,51.2778259 12.0947571,76.6044251 27.7157288,92.2253967 C43.3367004,107.846368 68.6632996,107.846368 84.2842712,92.2253967 C97.71993,78.7897379 99.5995262,58.1740623 89.9230597,42.729491 L83.4844861,54.9932839 C81.4307001,58.9052072 76.5945372,60.4115251 72.682614,58.3577391 C68.7706907,56.3039532 67.2643728,51.4677903 69.3181587,47.555867 L84.4354914,18.7613158 C86.4966389,14.8353707 91.3577499,13.3347805 95.273202,15.415792 L124.145886,30.7612457 C128.047354,32.8348248 129.52915,37.6785572 127.455571,41.5800249 C125.381992,45.4814927 120.53826,46.9632892 116.636792,44.8897102 L106.369002,39.4325143 Z M98.1470904,27.0648707 C97.9798954,26.8741582 97.811187,26.6843098 97.6409651,26.4953413 L98.6018187,26.1987327 L98.1470904,27.0648707 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/right-arrow": {
            "title": "$:/core/images/right-arrow",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-right-arrow tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path d=\"M80.3563798,109.353315 C78.9238993,110.786918 76.9450203,111.675144 74.7592239,111.675144 L-4.40893546,111.675144 C-8.77412698,111.675144 -12.3248558,108.130732 -12.3248558,103.758478 C-12.3248558,99.3951199 -8.78077754,95.8418109 -4.40893546,95.8418109 L66.8418109,95.8418109 L66.8418109,24.5910645 C66.8418109,20.225873 70.3862233,16.6751442 74.7584775,16.6751442 C79.1218352,16.6751442 82.6751442,20.2192225 82.6751442,24.5910645 L82.6751442,103.759224 C82.6751442,105.941695 81.7891419,107.920575 80.3566508,109.353886 Z\" transform=\"translate(35.175144, 64.175144) rotate(-45.000000) translate(-35.175144, -64.175144) \"></path>\n</svg>"
        },
        "$:/core/images/save-button": {
            "title": "$:/core/images/save-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-save-button tc-image-button\" viewBox=\"0 0 128 128\" width=\"22pt\" height=\"22pt\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M120.78304,34.329058 C125.424287,43.1924006 128.049406,53.2778608 128.049406,63.9764502 C128.049406,99.3226742 99.3956295,127.97645 64.0494055,127.97645 C28.7031816,127.97645 0.0494055385,99.3226742 0.0494055385,63.9764502 C0.0494055385,28.6302262 28.7031816,-0.0235498012 64.0494055,-0.0235498012 C82.8568763,-0.0235498012 99.769563,8.08898558 111.479045,21.0056358 L114.159581,18.3250998 C117.289194,15.1954866 122.356036,15.1939641 125.480231,18.3181584 C128.598068,21.4359957 128.601317,26.5107804 125.473289,29.6388083 L120.78304,34.329058 Z M108.72451,46.3875877 C110.870571,51.8341374 112.049406,57.767628 112.049406,63.9764502 C112.049406,90.4861182 90.5590735,111.97645 64.0494055,111.97645 C37.5397375,111.97645 16.0494055,90.4861182 16.0494055,63.9764502 C16.0494055,37.4667822 37.5397375,15.9764502 64.0494055,15.9764502 C78.438886,15.9764502 91.3495036,22.308215 100.147097,32.3375836 L58.9411255,73.5435552 L41.975581,56.5780107 C38.8486152,53.4510448 33.7746915,53.4551552 30.6568542,56.5729924 C27.5326599,59.6971868 27.5372202,64.7670668 30.6618725,67.8917192 L53.279253,90.5090997 C54.8435723,92.073419 56.8951519,92.8541315 58.9380216,92.8558261 C60.987971,92.8559239 63.0389578,92.0731398 64.6049211,90.5071765 L108.72451,46.3875877 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/size": {
            "title": "$:/core/images/size",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-size tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <path d=\"M92.3431458,26 L83.1715729,35.1715729 C81.6094757,36.73367 81.6094757,39.26633 83.1715729,40.8284271 C84.73367,42.3905243 87.26633,42.3905243 88.8284271,40.8284271 L104.828427,24.8284271 C106.390524,23.26633 106.390524,20.73367 104.828427,19.1715729 L88.8284271,3.17157288 C87.26633,1.60947571 84.73367,1.60947571 83.1715729,3.17157288 C81.6094757,4.73367004 81.6094757,7.26632996 83.1715729,8.82842712 L92.3431457,18 L22,18 C19.790861,18 18,19.790861 18,22 L18,92.3431458 L8.82842712,83.1715729 C7.26632996,81.6094757 4.73367004,81.6094757 3.17157288,83.1715729 C1.60947571,84.73367 1.60947571,87.26633 3.17157288,88.8284271 L19.1715729,104.828427 C20.73367,106.390524 23.26633,106.390524 24.8284271,104.828427 L40.8284271,88.8284271 C42.3905243,87.26633 42.3905243,84.73367 40.8284271,83.1715729 C39.26633,81.6094757 36.73367,81.6094757 35.1715729,83.1715729 L26,92.3431458 L26,22 L22,26 L92.3431458,26 L92.3431458,26 Z M112,52 L112,116 L116,112 L52,112 C49.790861,112 48,113.790861 48,116 C48,118.209139 49.790861,120 52,120 L116,120 C118.209139,120 120,118.209139 120,116 L120,52 C120,49.790861 118.209139,48 116,48 C113.790861,48 112,49.790861 112,52 L112,52 Z\"></path>\n</svg>"
        },
        "$:/core/images/spiral": {
            "title": "$:/core/images/spiral",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-spiral tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"nonzero\">\n        <path d=\"M64.534 68.348c3.39 0 6.097-2.62 6.476-5.968l-4.755-.538 4.75.583c.377-3.07-1.194-6.054-3.89-7.78-2.757-1.773-6.34-2.01-9.566-.7-3.46 1.403-6.14 4.392-7.35 8.148l-.01.026c-1.3 4.08-.72 8.64 1.58 12.52 2.5 4.2 6.77 7.2 11.76 8.27 5.37 1.15 11.11-.05 15.83-3.31 5.04-3.51 8.46-9.02 9.45-15.3 1.05-6.7-.72-13.63-4.92-19.19l.02.02c-4.42-5.93-11.2-9.82-18.78-10.78-7.96-1.01-16.13 1.31-22.59 6.43-6.81 5.39-11.18 13.41-12.11 22.26-.98 9.27 1.87 18.65 7.93 26.02 6.32 7.69 15.6 12.56 25.74 13.48 10.54.96 21.15-2.42 29.45-9.4l.01-.01c8.58-7.25 13.94-17.78 14.86-29.21.94-11.84-2.96-23.69-10.86-32.9-8.19-9.5-19.95-15.36-32.69-16.27-13.16-.94-26.24 3.49-36.34 12.34l.01-.01c-10.41 9.08-16.78 22.1-17.68 36.15-.93 14.44 4.03 28.77 13.79 39.78 10.03 11.32 24.28 18.2 39.6 19.09 15.73.92 31.31-4.56 43.24-15.234 12.23-10.954 19.61-26.44 20.5-43.074.14-2.64-1.89-4.89-4.52-5.03-2.64-.14-4.89 1.88-5.03 4.52-.75 14.1-7 27.2-17.33 36.45-10.03 8.98-23.11 13.58-36.3 12.81-12.79-.75-24.67-6.48-33-15.89-8.07-9.11-12.17-20.94-11.41-32.827.74-11.52 5.942-22.15 14.43-29.54l.01-.01c8.18-7.17 18.74-10.75 29.35-9.998 10.21.726 19.6 5.41 26.11 12.96 6.24 7.273 9.32 16.61 8.573 25.894-.718 8.9-4.88 17.064-11.504 22.66l.01-.007c-6.36 5.342-14.44 7.92-22.425 7.19-7.604-.68-14.52-4.314-19.21-10.027-4.44-5.4-6.517-12.23-5.806-18.94.67-6.3 3.76-11.977 8.54-15.766 4.46-3.54 10.05-5.128 15.44-4.44 5.03.63 9.46 3.18 12.32 7.01l.02.024c2.65 3.5 3.75 7.814 3.1 11.92-.59 3.71-2.58 6.925-5.45 8.924-2.56 1.767-5.61 2.403-8.38 1.81-2.42-.516-4.42-1.92-5.53-3.79-.93-1.56-1.15-3.3-.69-4.75l-4.56-1.446L59.325 65c.36-1.12 1.068-1.905 1.84-2.22.25-.103.48-.14.668-.13.06.006.11.015.14.025.01 0 .01 0-.01-.01-.02-.015-.054-.045-.094-.088-.06-.064-.12-.145-.17-.244-.15-.29-.23-.678-.18-1.11l-.005.04c.15-1.332 1.38-2.523 3.035-2.523-2.65 0-4.79 2.144-4.79 4.787s2.14 4.785 4.78 4.785z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/stamp": {
            "title": "$:/core/images/stamp",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-stamp tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M49.7334301,64 L16.0098166,64 C11.5838751,64 8,67.5829053 8,72.002643 L8,74.4986785 L8,97 L120,97 L120,74.4986785 L120,72.002643 C120,67.5737547 116.413883,64 111.990183,64 L78.2665699,64 C76.502049,60.7519149 75.5,57.0311962 75.5,53.0769231 C75.5,46.6017951 78.1869052,40.7529228 82.5087769,36.5800577 C85.3313113,32.7688808 87,28.0549983 87,22.952183 C87,10.2760423 76.7025492,0 64,0 C51.2974508,0 41,10.2760423 41,22.952183 C41,28.0549983 42.6686887,32.7688808 45.4912231,36.5800577 C49.8130948,40.7529228 52.5,46.6017951 52.5,53.0769231 C52.5,57.0311962 51.497951,60.7519149 49.7334301,64 Z M8,104 L120,104 L120,112 L8,112 L8,104 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/star-filled": {
            "title": "$:/core/images/star-filled",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-star-filled tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"nonzero\">\n        <path d=\"M61.8361286,96.8228569 L99.1627704,124.110219 C101.883827,126.099427 105.541968,123.420868 104.505636,120.198072 L90.2895569,75.9887263 L89.0292911,79.8977279 L126.314504,52.5528988 C129.032541,50.5595011 127.635256,46.2255025 124.273711,46.2229134 L78.1610486,46.1873965 L81.4604673,48.6032923 L67.1773543,4.41589688 C66.1361365,1.19470104 61.6144265,1.19470104 60.5732087,4.41589688 L46.2900957,48.6032923 L49.5895144,46.1873965 L3.47685231,46.2229134 C0.115307373,46.2255025 -1.28197785,50.5595011 1.43605908,52.5528988 L38.7212719,79.8977279 L37.4610061,75.9887263 L23.2449266,120.198072 C22.2085954,123.420868 25.8667356,126.099427 28.5877926,124.110219 L65.9144344,96.8228569 L61.8361286,96.8228569 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/storyview-classic": {
            "title": "$:/core/images/storyview-classic",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-storyview-classic tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00697327,0 C3.58484404,0 0,3.59075293 0,8.00697327 L0,119.993027 C0,124.415156 3.59075293,128 8.00697327,128 L119.993027,128 C124.415156,128 128,124.409247 128,119.993027 L128,8.00697327 C128,3.58484404 124.409247,0 119.993027,0 L8.00697327,0 L8.00697327,0 Z M23.9992458,16 C19.5813843,16 16,19.5776607 16,23.9924054 L16,40.0075946 C16,44.4216782 19.5881049,48 23.9992458,48 L104.000754,48 C108.418616,48 112,44.4223393 112,40.0075946 L112,23.9924054 C112,19.5783218 108.411895,16 104.000754,16 L23.9992458,16 L23.9992458,16 Z M23.9992458,64 C19.5813843,64 16,67.5907123 16,72 C16,76.418278 19.5881049,80 23.9992458,80 L104.000754,80 C108.418616,80 112,76.4092877 112,72 C112,67.581722 108.411895,64 104.000754,64 L23.9992458,64 L23.9992458,64 Z M23.9992458,96 C19.5813843,96 16,99.5907123 16,104 C16,108.418278 19.5881049,112 23.9992458,112 L104.000754,112 C108.418616,112 112,108.409288 112,104 C112,99.581722 108.411895,96 104.000754,96 L23.9992458,96 L23.9992458,96 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/storyview-pop": {
            "title": "$:/core/images/storyview-pop",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-storyview-pop tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00697327,0 C3.58484404,0 0,3.59075293 0,8.00697327 L0,119.993027 C0,124.415156 3.59075293,128 8.00697327,128 L119.993027,128 C124.415156,128 128,124.409247 128,119.993027 L128,8.00697327 C128,3.58484404 124.409247,0 119.993027,0 L8.00697327,0 L8.00697327,0 Z M23.9992458,16 C19.5813843,16 16,19.5776607 16,23.9924054 L16,40.0075946 C16,44.4216782 19.5881049,48 23.9992458,48 L104.000754,48 C108.418616,48 112,44.4223393 112,40.0075946 L112,23.9924054 C112,19.5783218 108.411895,16 104.000754,16 L23.9992458,16 L23.9992458,16 Z M16.0098166,56 C11.586117,56 8,59.5776607 8,63.9924054 L8,80.0075946 C8,84.4216782 11.5838751,88 16.0098166,88 L111.990183,88 C116.413883,88 120,84.4223393 120,80.0075946 L120,63.9924054 C120,59.5783218 116.416125,56 111.990183,56 L16.0098166,56 L16.0098166,56 Z M23.9992458,96 C19.5813843,96 16,99.5907123 16,104 C16,108.418278 19.5881049,112 23.9992458,112 L104.000754,112 C108.418616,112 112,108.409288 112,104 C112,99.581722 108.411895,96 104.000754,96 L23.9992458,96 L23.9992458,96 Z M23.9992458,64 C19.5813843,64 16,67.5907123 16,72 C16,76.418278 19.5881049,80 23.9992458,80 L104.000754,80 C108.418616,80 112,76.4092877 112,72 C112,67.581722 108.411895,64 104.000754,64 L23.9992458,64 L23.9992458,64 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/storyview-zoomin": {
            "title": "$:/core/images/storyview-zoomin",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-storyview-zoomin tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M8.00697327,0 C3.58484404,0 0,3.59075293 0,8.00697327 L0,119.993027 C0,124.415156 3.59075293,128 8.00697327,128 L119.993027,128 C124.415156,128 128,124.409247 128,119.993027 L128,8.00697327 C128,3.58484404 124.409247,0 119.993027,0 L8.00697327,0 L8.00697327,0 Z M23.9992458,16 C19.5813843,16 16,19.578055 16,24.0085154 L16,71.9914846 C16,76.4144655 19.5881049,80 23.9992458,80 L104.000754,80 C108.418616,80 112,76.421945 112,71.9914846 L112,24.0085154 C112,19.5855345 108.411895,16 104.000754,16 L23.9992458,16 L23.9992458,16 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/strikethrough": {
            "title": "$:/core/images/strikethrough",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-strikethrough tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M92.793842,38.7255689 L108.215529,38.7255689 C107.987058,31.985687 106.70193,26.1883331 104.360107,21.3333333 C102.018284,16.4783336 98.8197436,12.4516001 94.7643909,9.25301205 C90.7090382,6.05442399 85.9969032,3.71263572 80.6278447,2.22757697 C75.2587862,0.742518233 69.4328739,0 63.1499331,0 C57.552404,0 52.0977508,0.713959839 46.7858099,2.14190094 C41.473869,3.56984203 36.7331757,5.74027995 32.5635877,8.65327979 C28.3939997,11.5662796 25.0526676,15.2788708 22.5394913,19.7911647 C20.026315,24.3034585 18.7697456,29.6438781 18.7697456,35.8125837 C18.7697456,41.4101128 19.883523,46.0651309 22.1111111,49.7777778 C24.3386992,53.4904246 27.3087722,56.5176144 31.021419,58.8594378 C34.7340659,61.2012612 38.9321497,63.0861151 43.6157965,64.5140562 C48.2994433,65.9419973 53.068695,67.1985666 57.9236948,68.2838019 C62.7786945,69.3690371 67.5479462,70.4256977 72.231593,71.4538153 C76.9152398,72.4819329 81.1133237,73.8241773 84.8259705,75.480589 C88.5386174,77.1370007 91.5086903,79.2788802 93.7362784,81.9062918 C95.9638666,84.5337035 97.0776439,87.9607107 97.0776439,92.1874163 C97.0776439,96.6425926 96.1637753,100.298067 94.3360107,103.153949 C92.5082461,106.009831 90.109341,108.265944 87.1392236,109.922356 C84.1691061,111.578768 80.827774,112.749662 77.1151272,113.435074 C73.4024803,114.120485 69.7184476,114.463186 66.0629183,114.463186 C61.4935068,114.463186 57.0383974,113.892018 52.6974565,112.749665 C48.3565156,111.607312 44.5582492,109.836692 41.3025435,107.437751 C38.0468378,105.03881 35.4194656,101.983062 33.4203481,98.270415 C31.4212305,94.5577681 30.4216867,90.1312171 30.4216867,84.9906292 L15,84.9906292 C15,92.4159229 16.3422445,98.8415614 19.0267738,104.267738 C21.711303,109.693914 25.3667774,114.149023 29.9933066,117.633199 C34.6198357,121.117376 39.9888137,123.71619 46.1004016,125.429719 C52.2119895,127.143248 58.6947448,128 65.5488621,128 C71.1463912,128 76.7723948,127.343157 82.4270415,126.029451 C88.0816882,124.715745 93.1936407,122.602424 97.7630522,119.689424 C102.332464,116.776425 106.073613,113.006717 108.986613,108.380187 C111.899613,103.753658 113.356091,98.1847715 113.356091,91.6733601 C113.356091,85.6188899 112.242314,80.5926126 110.014726,76.5943775 C107.787137,72.5961424 104.817065,69.2833688 101.104418,66.6559572 C97.3917708,64.0285455 93.193687,61.9437828 88.5100402,60.4016064 C83.8263934,58.85943 79.0571416,57.5171855 74.2021419,56.3748327 C69.3471422,55.2324798 64.5778904,54.1758192 59.8942436,53.2048193 C55.2105968,52.2338193 51.012513,51.0058084 47.2998661,49.5207497 C43.5872193,48.0356909 40.6171463,46.1222786 38.3895582,43.7804552 C36.1619701,41.4386318 35.0481928,38.3828836 35.0481928,34.6131191 C35.0481928,30.6148841 35.8192694,27.273552 37.3614458,24.5890228 C38.9036222,21.9044935 40.9598265,19.762614 43.5301205,18.1633199 C46.1004145,16.5640259 49.041929,15.4216902 52.3547523,14.7362784 C55.6675757,14.0508667 59.0374661,13.708166 62.4645248,13.708166 C70.9179361,13.708166 77.8576257,15.6786952 83.2838019,19.6198126 C88.709978,23.56093 91.8799597,29.9294518 92.793842,38.7255689 L92.793842,38.7255689 Z\"></path>\n        <rect x=\"5\" y=\"54\" width=\"118\" height=\"16\"></rect>\n    </g>\n</svg>"
        },
        "$:/core/images/subscript": {
            "title": "$:/core/images/subscript",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-subscript tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M2.27170276,16 L22.1825093,16 L43.8305003,49.6746527 L66.4138983,16 L85.1220387,16 L53.5854592,61.9685735 L87.3937414,111.411516 L67.0820462,111.411516 L43.295982,74.9306422 L19.1090291,111.411516 L0,111.411516 L33.8082822,61.9685735 L2.27170276,16 Z M127.910914,128.411516 L85.3276227,128.411516 C85.3870139,123.24448 86.6342108,118.730815 89.0692508,114.870386 C91.5042907,111.009956 94.8301491,107.654403 99.0469256,104.803624 C101.066227,103.318844 103.174584,101.878629 105.372059,100.482935 C107.569534,99.0872413 109.588805,97.5876355 111.429933,95.9840726 C113.271061,94.3805097 114.785514,92.6433426 115.973338,90.7725192 C117.161163,88.9016958 117.784761,86.7487964 117.844152,84.3137564 C117.844152,83.1853233 117.710524,81.9826691 117.443264,80.7057579 C117.176003,79.4288467 116.656338,78.2410402 115.884252,77.1423026 C115.112166,76.0435651 114.04314,75.123015 112.677142,74.3806248 C111.311144,73.6382345 109.529434,73.267045 107.331959,73.267045 C105.312658,73.267045 103.634881,73.6679297 102.298579,74.4697112 C100.962276,75.2714926 99.8932503,76.3702137 99.0914688,77.7659073 C98.2896874,79.161601 97.6957841,80.8096826 97.3097412,82.7102016 C96.9236982,84.6107206 96.7009845,86.6596869 96.6415933,88.857162 L86.4857457,88.857162 C86.4857457,85.4124713 86.9460207,82.2202411 87.8665846,79.2803758 C88.7871485,76.3405105 90.1679736,73.801574 92.0091014,71.6634901 C93.8502292,69.5254062 96.092214,67.8476295 98.7351233,66.6301095 C101.378033,65.4125895 104.451482,64.8038386 107.955564,64.8038386 C111.756602,64.8038386 114.933984,65.4274371 117.487807,66.6746527 C120.041629,67.9218683 122.105443,69.4957119 123.67931,71.3962309 C125.253178,73.2967499 126.366746,75.3605638 127.02005,77.5877345 C127.673353,79.8149053 128,81.9381095 128,83.9574109 C128,86.4518421 127.613963,88.7086746 126.841877,90.727976 C126.069791,92.7472774 125.03046,94.6032252 123.723854,96.2958749 C122.417247,97.9885247 120.932489,99.5475208 119.269534,100.97291 C117.60658,102.398299 115.884261,103.734582 114.102524,104.981797 C112.320788,106.229013 110.539078,107.416819 108.757341,108.545253 C106.975605,109.673686 105.327523,110.802102 103.813047,111.930535 C102.298571,113.058968 100.977136,114.231927 99.8487031,115.449447 C98.7202699,116.666967 97.9481956,117.958707 97.5324571,119.324705 L127.910914,119.324705 L127.910914,128.411516 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/superscript": {
            "title": "$:/core/images/superscript",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-superscript tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M2.27170276,16 L22.1825093,16 L43.8305003,49.6746527 L66.4138983,16 L85.1220387,16 L53.5854592,61.9685735 L87.3937414,111.411516 L67.0820462,111.411516 L43.295982,74.9306422 L19.1090291,111.411516 L0,111.411516 L33.8082822,61.9685735 L2.27170276,16 Z M127.910914,63.4115159 L85.3276227,63.4115159 C85.3870139,58.2444799 86.6342108,53.7308149 89.0692508,49.8703857 C91.5042907,46.0099565 94.8301491,42.654403 99.0469256,39.8036245 C101.066227,38.318844 103.174584,36.8786285 105.372059,35.4829349 C107.569534,34.0872413 109.588805,32.5876355 111.429933,30.9840726 C113.271061,29.3805097 114.785514,27.6433426 115.973338,25.7725192 C117.161163,23.9016958 117.784761,21.7487964 117.844152,19.3137564 C117.844152,18.1853233 117.710524,16.9826691 117.443264,15.7057579 C117.176003,14.4288467 116.656338,13.2410402 115.884252,12.1423026 C115.112166,11.0435651 114.04314,10.123015 112.677142,9.38062477 C111.311144,8.63823453 109.529434,8.26704499 107.331959,8.26704499 C105.312658,8.26704499 103.634881,8.6679297 102.298579,9.46971115 C100.962276,10.2714926 99.8932503,11.3702137 99.0914688,12.7659073 C98.2896874,14.161601 97.6957841,15.8096826 97.3097412,17.7102016 C96.9236982,19.6107206 96.7009845,21.6596869 96.6415933,23.857162 L86.4857457,23.857162 C86.4857457,20.4124713 86.9460207,17.2202411 87.8665846,14.2803758 C88.7871485,11.3405105 90.1679736,8.80157397 92.0091014,6.6634901 C93.8502292,4.52540622 96.092214,2.84762946 98.7351233,1.63010947 C101.378033,0.412589489 104.451482,-0.196161372 107.955564,-0.196161372 C111.756602,-0.196161372 114.933984,0.427437071 117.487807,1.67465266 C120.041629,2.92186826 122.105443,4.49571195 123.67931,6.39623095 C125.253178,8.29674995 126.366746,10.3605638 127.02005,12.5877345 C127.673353,14.8149053 128,16.9381095 128,18.9574109 C128,21.4518421 127.613963,23.7086746 126.841877,25.727976 C126.069791,27.7472774 125.03046,29.6032252 123.723854,31.2958749 C122.417247,32.9885247 120.932489,34.5475208 119.269534,35.97291 C117.60658,37.3982993 115.884261,38.7345816 114.102524,39.9817972 C112.320788,41.2290128 110.539078,42.4168194 108.757341,43.5452525 C106.975605,44.6736857 105.327523,45.8021019 103.813047,46.9305351 C102.298571,48.0589682 100.977136,49.2319272 99.8487031,50.4494472 C98.7202699,51.6669672 97.9481956,52.9587068 97.5324571,54.3247048 L127.910914,54.3247048 L127.910914,63.4115159 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/tag-button": {
            "title": "$:/core/images/tag-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-tag-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M18.1643182,47.6600756 L18.1677196,51.7651887 C18.1708869,55.5878829 20.3581578,60.8623899 23.0531352,63.5573673 L84.9021823,125.406414 C87.5996731,128.103905 91.971139,128.096834 94.6717387,125.396234 L125.766905,94.3010679 C128.473612,91.5943612 128.472063,87.2264889 125.777085,84.5315115 L63.9280381,22.6824644 C61.2305472,19.9849735 55.9517395,17.801995 52.1318769,17.8010313 L25.0560441,17.7942007 C21.2311475,17.7932358 18.1421354,20.8872832 18.1452985,24.7049463 L18.1535504,34.6641936 C18.2481119,34.6754562 18.3439134,34.6864294 18.4409623,34.6971263 C22.1702157,35.1081705 26.9295004,34.6530132 31.806204,33.5444844 C32.1342781,33.0700515 32.5094815,32.6184036 32.9318197,32.1960654 C35.6385117,29.4893734 39.5490441,28.718649 42.94592,29.8824694 C43.0432142,29.8394357 43.1402334,29.7961748 43.2369683,29.7526887 L43.3646982,30.0368244 C44.566601,30.5115916 45.6933052,31.2351533 46.6655958,32.2074439 C50.4612154,36.0030635 50.4663097,42.1518845 46.6769742,45.94122 C43.0594074,49.5587868 37.2914155,49.7181264 33.4734256,46.422636 C28.1082519,47.5454734 22.7987486,48.0186448 18.1643182,47.6600756 Z\"></path>\n        <path d=\"M47.6333528,39.5324628 L47.6562932,39.5834939 C37.9670934,43.9391617 26.0718874,46.3819521 17.260095,45.4107025 C5.27267473,44.0894301 -1.02778744,36.4307276 2.44271359,24.0779512 C5.56175386,12.9761516 14.3014034,4.36129832 24.0466405,1.54817001 C34.7269254,-1.53487574 43.7955833,3.51606438 43.7955834,14.7730751 L35.1728168,14.7730752 C35.1728167,9.91428944 32.0946059,8.19982862 26.4381034,9.83267419 C19.5270911,11.8276553 13.046247,18.2159574 10.7440788,26.4102121 C8.82861123,33.2280582 11.161186,36.0634845 18.2047888,36.8398415 C25.3302805,37.6252244 35.7353482,35.4884477 44.1208333,31.7188498 L44.1475077,31.7781871 C44.159701,31.7725635 44.1718402,31.7671479 44.1839238,31.7619434 C45.9448098,31.0035157 50.4503245,38.3109156 47.7081571,39.5012767 C47.6834429,39.512005 47.6585061,39.5223987 47.6333528,39.5324628 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/theme-button": {
            "title": "$:/core/images/theme-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-theme-button tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M55.854113,66.9453198 C54.3299482,65.1432292 53.0133883,63.518995 51.9542746,62.1263761 C40.8899947,47.578055 35.3091807,55.2383404 28.9941893,62.1263758 C22.6791979,69.0144112 30.6577916,74.5954741 24.6646171,79.4611023 C18.6714426,84.3267304 19.0414417,86.0133155 8.92654943,77.1119468 C-1.18834284,68.2105781 -1.88793412,65.7597832 2.7553553,60.6807286 C7.39864472,55.601674 11.2794845,63.5989423 20.7646627,54.5728325 C30.2498409,45.5467226 22.2819131,37.5470737 22.2819131,37.5470737 C22.2819131,37.5470737 42.0310399,-2.82433362 68.4206088,0.157393922 C94.8101776,3.13912147 58.4373806,-3.70356506 49.3898693,27.958066 C45.5161782,41.5139906 50.1107906,38.3197672 57.4560458,44.0453955 C59.1625767,45.3756367 63.8839488,48.777453 70.127165,53.3625321 C63.9980513,59.2416709 58.9704753,64.0315459 55.854113,66.9453198 Z M67.4952439,79.8919946 C83.5082212,96.9282402 105.237121,117.617674 112.611591,120.312493 C123.044132,124.12481 128.000001,117.170903 128,105.522947 C127.999999,98.3705516 104.170675,78.980486 84.0760493,63.7529565 C76.6683337,70.9090328 70.7000957,76.7055226 67.4952439,79.8919946 Z\"></path>\n        <path d=\"M58.2852966,138.232794 L58.2852966,88.3943645 C56.318874,88.3923153 54.7254089,86.7952906 54.7254089,84.8344788 C54.7254089,82.8684071 56.3175932,81.2745911 58.2890859,81.2745911 L79.6408336,81.2745911 C81.608998,81.2745911 83.2045105,82.8724076 83.2045105,84.8344788 C83.2045105,86.7992907 81.614366,88.3923238 79.6446228,88.3943645 L79.6446228,88.3943646 L79.6446228,138.232794 C79.6446228,144.131009 74.8631748,148.912457 68.9649597,148.912457 C63.0667446,148.912457 58.2852966,144.131009 58.2852966,138.232794 Z M65.405072,-14.8423767 L72.5248474,-14.8423767 L76.0847351,-0.690681892 L72.5248474,6.51694947 L72.5248474,81.2745911 L65.405072,81.2745911 L65.405072,6.51694947 L61.8451843,-0.690681892 L65.405072,-14.8423767 Z\" transform=\"translate(68.964960, 67.035040) rotate(45.000000) translate(-68.964960, -67.035040) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/tip": {
            "title": "$:/core/images/tip",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-tip tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64,128.241818 C99.346224,128.241818 128,99.5880417 128,64.2418177 C128,28.8955937 99.346224,0.241817675 64,0.241817675 C28.653776,0.241817675 0,28.8955937 0,64.2418177 C0,99.5880417 28.653776,128.241818 64,128.241818 Z M75.9358659,91.4531941 C75.3115438,95.581915 70.2059206,98.8016748 64,98.8016748 C57.7940794,98.8016748 52.6884562,95.581915 52.0641341,91.4531941 C54.3299053,94.0502127 58.8248941,95.8192805 64,95.8192805 C69.1751059,95.8192805 73.6700947,94.0502127 75.9358659,91.4531941 L75.9358659,91.4531941 Z M75.9358659,95.9453413 C75.3115438,100.074062 70.2059206,103.293822 64,103.293822 C57.7940794,103.293822 52.6884562,100.074062 52.0641341,95.9453413 C54.3299053,98.5423599 58.8248941,100.311428 64,100.311428 C69.1751059,100.311428 73.6700947,98.5423599 75.9358659,95.9453413 L75.9358659,95.9453413 Z M75.9358659,100.40119 C75.3115438,104.529911 70.2059206,107.74967 64,107.74967 C57.7940794,107.74967 52.6884562,104.529911 52.0641341,100.40119 C54.3299053,102.998208 58.8248941,104.767276 64,104.767276 C69.1751059,104.767276 73.6700947,102.998208 75.9358659,100.40119 L75.9358659,100.40119 Z M75.9358659,104.893337 C75.3115438,109.022058 70.2059206,112.241818 64,112.241818 C57.7940794,112.241818 52.6884562,109.022058 52.0641341,104.893337 C54.3299053,107.490356 58.8248941,109.259423 64,109.259423 C69.1751059,109.259423 73.6700947,107.490356 75.9358659,104.893337 L75.9358659,104.893337 Z M64.3010456,24.2418177 C75.9193117,24.2418188 88.0000013,32.0619847 88,48.4419659 C87.9999987,64.8219472 75.9193018,71.7540963 75.9193021,83.5755932 C75.9193022,89.4486648 70.0521957,92.8368862 63.9999994,92.8368862 C57.947803,92.8368862 51.9731007,89.8295115 51.9731007,83.5755932 C51.9731007,71.1469799 39.9999998,65.4700602 40,48.4419647 C40.0000002,31.4138691 52.6827796,24.2418166 64.3010456,24.2418177 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/twitter": {
            "title": "$:/core/images/twitter",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-twitter tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M41.6263422,115.803477 C27.0279663,115.803477 13.4398394,111.540813 1.99987456,104.234833 C4.02221627,104.472643 6.08004574,104.594302 8.16644978,104.594302 C20.277456,104.594302 31.4238403,100.47763 40.270894,93.5715185 C28.9590538,93.3635501 19.4123842,85.9189246 16.1230832,75.6885328 C17.7011365,75.9892376 19.320669,76.1503787 20.9862896,76.1503787 C23.344152,76.1503787 25.6278127,75.8359011 27.7971751,75.247346 C15.9709927,72.8821073 7.06079851,62.4745062 7.06079851,49.9982394 C7.06079851,49.8898938 7.06079851,49.7820074 7.06264203,49.67458 C10.5482779,51.6032228 14.5339687,52.7615103 18.7717609,52.8951059 C11.8355159,48.277565 7.2714207,40.3958845 7.2714207,31.4624258 C7.2714207,26.7434257 8.54621495,22.3200804 10.7713439,18.5169676 C23.5211299,34.0957738 42.568842,44.3472839 64.0532269,45.4210985 C63.6126256,43.5365285 63.3835682,41.5711584 63.3835682,39.5529928 C63.3835682,25.3326379 74.95811,13.8034766 89.2347917,13.8034766 C96.6697089,13.8034766 103.387958,16.930807 108.103682,21.9353619 C113.991886,20.780288 119.52429,18.6372496 124.518847,15.6866694 C122.588682,21.6993889 118.490075,26.7457211 113.152623,29.9327334 C118.381769,29.3102055 123.363882,27.926045 127.999875,25.8780385 C124.534056,31.0418981 120.151087,35.5772616 115.100763,39.2077561 C115.150538,40.3118708 115.175426,41.4224128 115.175426,42.538923 C115.175426,76.5663154 89.1744164,115.803477 41.6263422,115.803477\"></path>\n    </g>\n</svg>\n"
        },
        "$:/core/images/underline": {
            "title": "$:/core/images/underline",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-underline tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M7,117.421488 L121.247934,117.421488 L121.247934,128 L7,128 L7,117.421488 Z M104.871212,98.8958333 L104.871212,0 L88.6117424,0 L88.6117424,55.8560606 C88.6117424,60.3194668 88.0060035,64.432115 86.7945076,68.1941288 C85.5830116,71.9561425 83.7657949,75.239885 81.342803,78.0454545 C78.9198111,80.8510241 75.8911167,83.0189317 72.2566288,84.5492424 C68.6221409,86.0795531 64.3182067,86.844697 59.344697,86.844697 C53.0959284,86.844697 48.1862552,85.0593613 44.6155303,81.4886364 C41.0448054,77.9179114 39.2594697,73.0720003 39.2594697,66.9507576 L39.2594697,0 L23,0 L23,65.0378788 C23,70.3939662 23.5419769,75.2717583 24.625947,79.6714015 C25.709917,84.0710447 27.5908957,87.864883 30.2689394,91.0530303 C32.9469831,94.2411776 36.4538925,96.6960141 40.7897727,98.4176136 C45.125653,100.139213 50.545422,101 57.0492424,101 C64.3182182,101 70.630655,99.5653553 75.9867424,96.6960227 C81.3428298,93.8266902 85.742407,89.33147 89.1856061,83.2102273 L89.5681818,83.2102273 L89.5681818,98.8958333 L104.871212,98.8958333 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/unfold-all-button": {
            "title": "$:/core/images/unfold-all-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-unfold-all tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <rect x=\"0\" y=\"64\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M85.598226,8.34884273 C84.1490432,6.89863875 82.1463102,6 79.9340286,6 L47.9482224,6 C43.5292967,6 39.9411255,9.581722 39.9411255,14 C39.9411255,18.4092877 43.5260249,22 47.9482224,22 L71.9411255,22 L71.9411255,45.9929031 C71.9411255,50.4118288 75.5228475,54 79.9411255,54 C84.3504132,54 87.9411255,50.4151006 87.9411255,45.9929031 L87.9411255,14.0070969 C87.9411255,11.7964515 87.0447363,9.79371715 85.5956548,8.34412458 Z\" transform=\"translate(63.941125, 30.000000) scale(1, -1) rotate(-45.000000) translate(-63.941125, -30.000000) \"></path>\n        <path d=\"M85.6571005,72.2899682 C84.2079177,70.8397642 82.2051847,69.9411255 79.9929031,69.9411255 L48.0070969,69.9411255 C43.5881712,69.9411255 40,73.5228475 40,77.9411255 C40,82.3504132 43.5848994,85.9411255 48.0070969,85.9411255 L72,85.9411255 L72,109.934029 C72,114.352954 75.581722,117.941125 80,117.941125 C84.4092877,117.941125 88,114.356226 88,109.934029 L88,77.9482224 C88,75.737577 87.1036108,73.7348426 85.6545293,72.2852501 Z\" transform=\"translate(64.000000, 93.941125) scale(1, -1) rotate(-45.000000) translate(-64.000000, -93.941125) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/unfold-button": {
            "title": "$:/core/images/unfold-button",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-unfold tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <rect x=\"0\" y=\"0\" width=\"128\" height=\"16\" rx=\"8\"></rect>\n        <path d=\"M85.598226,11.3488427 C84.1490432,9.89863875 82.1463102,9 79.9340286,9 L47.9482224,9 C43.5292967,9 39.9411255,12.581722 39.9411255,17 C39.9411255,21.4092877 43.5260249,25 47.9482224,25 L71.9411255,25 L71.9411255,48.9929031 C71.9411255,53.4118288 75.5228475,57 79.9411255,57 C84.3504132,57 87.9411255,53.4151006 87.9411255,48.9929031 L87.9411255,17.0070969 C87.9411255,14.7964515 87.0447363,12.7937171 85.5956548,11.3441246 Z\" transform=\"translate(63.941125, 33.000000) scale(1, -1) rotate(-45.000000) translate(-63.941125, -33.000000) \"></path>\n        <path d=\"M85.6571005,53.4077172 C84.2079177,51.9575133 82.2051847,51.0588745 79.9929031,51.0588745 L48.0070969,51.0588745 C43.5881712,51.0588745 40,54.6405965 40,59.0588745 C40,63.4681622 43.5848994,67.0588745 48.0070969,67.0588745 L72,67.0588745 L72,91.0517776 C72,95.4707033 75.581722,99.0588745 80,99.0588745 C84.4092877,99.0588745 88,95.4739751 88,91.0517776 L88,59.0659714 C88,56.855326 87.1036108,54.8525917 85.6545293,53.4029991 Z\" transform=\"translate(64.000000, 75.058875) scale(1, -1) rotate(-45.000000) translate(-64.000000, -75.058875) \"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/unlocked-padlock": {
            "title": "$:/core/images/unlocked-padlock",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-unlocked-padlock tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M48.6266053,64 L105,64 L105,96.0097716 C105,113.673909 90.6736461,128 73.001193,128 L55.998807,128 C38.3179793,128 24,113.677487 24,96.0097716 L24,64 L30.136303,64 C19.6806213,51.3490406 2.77158986,28.2115132 25.8366966,8.85759246 C50.4723026,-11.8141335 71.6711028,13.2108337 81.613302,25.0594855 C91.5555012,36.9081373 78.9368488,47.4964439 69.1559674,34.9513593 C59.375086,22.4062748 47.9893192,10.8049522 35.9485154,20.9083862 C23.9077117,31.0118202 34.192312,43.2685325 44.7624679,55.8655518 C47.229397,58.805523 48.403443,61.5979188 48.6266053,64 Z M67.7315279,92.3641717 C70.8232551,91.0923621 73,88.0503841 73,84.5 C73,79.8055796 69.1944204,76 64.5,76 C59.8055796,76 56,79.8055796 56,84.5 C56,87.947435 58.0523387,90.9155206 61.0018621,92.2491029 L55.9067479,115.020857 L72.8008958,115.020857 L67.7315279,92.3641717 L67.7315279,92.3641717 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/up-arrow": {
            "created": "20150316000544368",
            "modified": "20150316000831867",
            "tags": "$:/tags/Image",
            "title": "$:/core/images/up-arrow",
            "text": "<svg class=\"tc-image-up-arrow tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n<path transform=\"rotate(-135, 63.8945, 64.1752)\" d=\"m109.07576,109.35336c-1.43248,1.43361 -3.41136,2.32182 -5.59717,2.32182l-79.16816,0c-4.36519,0 -7.91592,-3.5444 -7.91592,-7.91666c0,-4.36337 3.54408,-7.91667 7.91592,-7.91667l71.25075,0l0,-71.25074c0,-4.3652 3.54442,-7.91592 7.91667,-7.91592c4.36336,0 7.91667,3.54408 7.91667,7.91592l0,79.16815c0,2.1825 -0.88602,4.16136 -2.3185,5.59467l-0.00027,-0.00056l0.00001,-0.00001z\" />\n</svg>\n \n"
        },
        "$:/core/images/video": {
            "title": "$:/core/images/video",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-video tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M64,12 C29.0909091,12 8.72727273,14.9166667 5.81818182,17.8333333 C2.90909091,20.75 1.93784382e-15,41.1666667 0,64.5 C1.93784382e-15,87.8333333 2.90909091,108.25 5.81818182,111.166667 C8.72727273,114.083333 29.0909091,117 64,117 C98.9090909,117 119.272727,114.083333 122.181818,111.166667 C125.090909,108.25 128,87.8333333 128,64.5 C128,41.1666667 125.090909,20.75 122.181818,17.8333333 C119.272727,14.9166667 98.9090909,12 64,12 Z M54.9161194,44.6182253 C51.102648,42.0759111 48.0112186,43.7391738 48.0112186,48.3159447 L48.0112186,79.6840553 C48.0112186,84.2685636 51.109784,85.9193316 54.9161194,83.3817747 L77.0838806,68.6032672 C80.897352,66.0609529 80.890216,61.9342897 77.0838806,59.3967328 L54.9161194,44.6182253 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/core/images/warning": {
            "title": "$:/core/images/warning",
            "tags": "$:/tags/Image",
            "text": "<svg class=\"tc-image-warning tc-image-button\" width=\"22pt\" height=\"22pt\" viewBox=\"0 0 128 128\">\n    <g fill-rule=\"evenodd\">\n        <path d=\"M57.0717968,11 C60.1509982,5.66666667 67.8490018,5.66666667 70.9282032,11 L126.353829,107 C129.433031,112.333333 125.584029,119 119.425626,119 L8.57437416,119 C2.41597129,119 -1.43303051,112.333333 1.64617093,107 L57.0717968,11 Z M64,37 C59.581722,37 56,40.5820489 56,44.9935776 L56,73.0064224 C56,77.4211534 59.5907123,81 64,81 C68.418278,81 72,77.4179511 72,73.0064224 L72,44.9935776 C72,40.5788466 68.4092877,37 64,37 Z M64,104 C68.418278,104 72,100.418278 72,96 C72,91.581722 68.418278,88 64,88 C59.581722,88 56,91.581722 56,96 C56,100.418278 59.581722,104 64,104 Z\"></path>\n    </g>\n</svg>"
        },
        "$:/language/Buttons/AdvancedSearch/Caption": {
            "title": "$:/language/Buttons/AdvancedSearch/Caption",
            "text": "advanced search"
        },
        "$:/language/Buttons/AdvancedSearch/Hint": {
            "title": "$:/language/Buttons/AdvancedSearch/Hint",
            "text": "Advanced search"
        },
        "$:/language/Buttons/Cancel/Caption": {
            "title": "$:/language/Buttons/Cancel/Caption",
            "text": "cancel"
        },
        "$:/language/Buttons/Cancel/Hint": {
            "title": "$:/language/Buttons/Cancel/Hint",
            "text": "Discard changes to this tiddler"
        },
        "$:/language/Buttons/Clone/Caption": {
            "title": "$:/language/Buttons/Clone/Caption",
            "text": "clone"
        },
        "$:/language/Buttons/Clone/Hint": {
            "title": "$:/language/Buttons/Clone/Hint",
            "text": "Clone this tiddler"
        },
        "$:/language/Buttons/Close/Caption": {
            "title": "$:/language/Buttons/Close/Caption",
            "text": "close"
        },
        "$:/language/Buttons/Close/Hint": {
            "title": "$:/language/Buttons/Close/Hint",
            "text": "Close this tiddler"
        },
        "$:/language/Buttons/CloseAll/Caption": {
            "title": "$:/language/Buttons/CloseAll/Caption",
            "text": "close all"
        },
        "$:/language/Buttons/CloseAll/Hint": {
            "title": "$:/language/Buttons/CloseAll/Hint",
            "text": "Close all tiddlers"
        },
        "$:/language/Buttons/CloseOthers/Caption": {
            "title": "$:/language/Buttons/CloseOthers/Caption",
            "text": "close others"
        },
        "$:/language/Buttons/CloseOthers/Hint": {
            "title": "$:/language/Buttons/CloseOthers/Hint",
            "text": "Close other tiddlers"
        },
        "$:/language/Buttons/ControlPanel/Caption": {
            "title": "$:/language/Buttons/ControlPanel/Caption",
            "text": "control panel"
        },
        "$:/language/Buttons/ControlPanel/Hint": {
            "title": "$:/language/Buttons/ControlPanel/Hint",
            "text": "Open control panel"
        },
        "$:/language/Buttons/Delete/Caption": {
            "title": "$:/language/Buttons/Delete/Caption",
            "text": "delete"
        },
        "$:/language/Buttons/Delete/Hint": {
            "title": "$:/language/Buttons/Delete/Hint",
            "text": "Delete this tiddler"
        },
        "$:/language/Buttons/Edit/Caption": {
            "title": "$:/language/Buttons/Edit/Caption",
            "text": "edit"
        },
        "$:/language/Buttons/Edit/Hint": {
            "title": "$:/language/Buttons/Edit/Hint",
            "text": "Edit this tiddler"
        },
        "$:/language/Buttons/Encryption/Caption": {
            "title": "$:/language/Buttons/Encryption/Caption",
            "text": "encryption"
        },
        "$:/language/Buttons/Encryption/Hint": {
            "title": "$:/language/Buttons/Encryption/Hint",
            "text": "Set or clear a password for saving this wiki"
        },
        "$:/language/Buttons/Encryption/ClearPassword/Caption": {
            "title": "$:/language/Buttons/Encryption/ClearPassword/Caption",
            "text": "clear password"
        },
        "$:/language/Buttons/Encryption/ClearPassword/Hint": {
            "title": "$:/language/Buttons/Encryption/ClearPassword/Hint",
            "text": "Clear the password and save this wiki without encryption"
        },
        "$:/language/Buttons/Encryption/SetPassword/Caption": {
            "title": "$:/language/Buttons/Encryption/SetPassword/Caption",
            "text": "set password"
        },
        "$:/language/Buttons/Encryption/SetPassword/Hint": {
            "title": "$:/language/Buttons/Encryption/SetPassword/Hint",
            "text": "Set a password for saving this wiki with encryption"
        },
        "$:/language/Buttons/ExportPage/Caption": {
            "title": "$:/language/Buttons/ExportPage/Caption",
            "text": "export all"
        },
        "$:/language/Buttons/ExportPage/Hint": {
            "title": "$:/language/Buttons/ExportPage/Hint",
            "text": "Export all tiddlers"
        },
        "$:/language/Buttons/ExportTiddler/Caption": {
            "title": "$:/language/Buttons/ExportTiddler/Caption",
            "text": "export tiddler"
        },
        "$:/language/Buttons/ExportTiddler/Hint": {
            "title": "$:/language/Buttons/ExportTiddler/Hint",
            "text": "Export tiddler"
        },
        "$:/language/Buttons/ExportTiddlers/Caption": {
            "title": "$:/language/Buttons/ExportTiddlers/Caption",
            "text": "export tiddlers"
        },
        "$:/language/Buttons/ExportTiddlers/Hint": {
            "title": "$:/language/Buttons/ExportTiddlers/Hint",
            "text": "Export tiddlers"
        },
        "$:/language/Buttons/Fold/Caption": {
            "title": "$:/language/Buttons/Fold/Caption",
            "text": "fold tiddler"
        },
        "$:/language/Buttons/Fold/Hint": {
            "title": "$:/language/Buttons/Fold/Hint",
            "text": "Fold the body of this tiddler"
        },
        "$:/language/Buttons/Fold/FoldBar/Caption": {
            "title": "$:/language/Buttons/Fold/FoldBar/Caption",
            "text": "fold-bar"
        },
        "$:/language/Buttons/Fold/FoldBar/Hint": {
            "title": "$:/language/Buttons/Fold/FoldBar/Hint",
            "text": "Optional bars to fold and unfold tiddlers"
        },
        "$:/language/Buttons/Unfold/Caption": {
            "title": "$:/language/Buttons/Unfold/Caption",
            "text": "unfold tiddler"
        },
        "$:/language/Buttons/Unfold/Hint": {
            "title": "$:/language/Buttons/Unfold/Hint",
            "text": "Unfold the body of this tiddler"
        },
        "$:/language/Buttons/FoldOthers/Caption": {
            "title": "$:/language/Buttons/FoldOthers/Caption",
            "text": "fold other tiddlers"
        },
        "$:/language/Buttons/FoldOthers/Hint": {
            "title": "$:/language/Buttons/FoldOthers/Hint",
            "text": "Fold the bodies of other opened tiddlers"
        },
        "$:/language/Buttons/FoldAll/Caption": {
            "title": "$:/language/Buttons/FoldAll/Caption",
            "text": "fold all tiddlers"
        },
        "$:/language/Buttons/FoldAll/Hint": {
            "title": "$:/language/Buttons/FoldAll/Hint",
            "text": "Fold the bodies of all opened tiddlers"
        },
        "$:/language/Buttons/UnfoldAll/Caption": {
            "title": "$:/language/Buttons/UnfoldAll/Caption",
            "text": "unfold all tiddlers"
        },
        "$:/language/Buttons/UnfoldAll/Hint": {
            "title": "$:/language/Buttons/UnfoldAll/Hint",
            "text": "Unfold the bodies of all opened tiddlers"
        },
        "$:/language/Buttons/FullScreen/Caption": {
            "title": "$:/language/Buttons/FullScreen/Caption",
            "text": "full-screen"
        },
        "$:/language/Buttons/FullScreen/Hint": {
            "title": "$:/language/Buttons/FullScreen/Hint",
            "text": "Enter or leave full-screen mode"
        },
        "$:/language/Buttons/Help/Caption": {
            "title": "$:/language/Buttons/Help/Caption",
            "text": "help"
        },
        "$:/language/Buttons/Help/Hint": {
            "title": "$:/language/Buttons/Help/Hint",
            "text": "Show help panel"
        },
        "$:/language/Buttons/Import/Caption": {
            "title": "$:/language/Buttons/Import/Caption",
            "text": "import"
        },
        "$:/language/Buttons/Import/Hint": {
            "title": "$:/language/Buttons/Import/Hint",
            "text": "Import many types of file including text, image, TiddlyWiki or JSON"
        },
        "$:/language/Buttons/Info/Caption": {
            "title": "$:/language/Buttons/Info/Caption",
            "text": "info"
        },
        "$:/language/Buttons/Info/Hint": {
            "title": "$:/language/Buttons/Info/Hint",
            "text": "Show information for this tiddler"
        },
        "$:/language/Buttons/Home/Caption": {
            "title": "$:/language/Buttons/Home/Caption",
            "text": "home"
        },
        "$:/language/Buttons/Home/Hint": {
            "title": "$:/language/Buttons/Home/Hint",
            "text": "Open the default tiddlers"
        },
        "$:/language/Buttons/Language/Caption": {
            "title": "$:/language/Buttons/Language/Caption",
            "text": "language"
        },
        "$:/language/Buttons/Language/Hint": {
            "title": "$:/language/Buttons/Language/Hint",
            "text": "Choose the user interface language"
        },
        "$:/language/Buttons/More/Caption": {
            "title": "$:/language/Buttons/More/Caption",
            "text": "more"
        },
        "$:/language/Buttons/More/Hint": {
            "title": "$:/language/Buttons/More/Hint",
            "text": "More actions"
        },
        "$:/language/Buttons/NewHere/Caption": {
            "title": "$:/language/Buttons/NewHere/Caption",
            "text": "new here"
        },
        "$:/language/Buttons/NewHere/Hint": {
            "title": "$:/language/Buttons/NewHere/Hint",
            "text": "Create a new tiddler tagged with this one"
        },
        "$:/language/Buttons/NewJournal/Caption": {
            "title": "$:/language/Buttons/NewJournal/Caption",
            "text": "new journal"
        },
        "$:/language/Buttons/NewJournal/Hint": {
            "title": "$:/language/Buttons/NewJournal/Hint",
            "text": "Create a new journal tiddler"
        },
        "$:/language/Buttons/NewJournalHere/Caption": {
            "title": "$:/language/Buttons/NewJournalHere/Caption",
            "text": "new journal here"
        },
        "$:/language/Buttons/NewJournalHere/Hint": {
            "title": "$:/language/Buttons/NewJournalHere/Hint",
            "text": "Create a new journal tiddler tagged with this one"
        },
        "$:/language/Buttons/NewImage/Caption": {
            "title": "$:/language/Buttons/NewImage/Caption",
            "text": "new image"
        },
        "$:/language/Buttons/NewImage/Hint": {
            "title": "$:/language/Buttons/NewImage/Hint",
            "text": "Create a new image tiddler"
        },
        "$:/language/Buttons/NewMarkdown/Caption": {
            "title": "$:/language/Buttons/NewMarkdown/Caption",
            "text": "new Markdown tiddler"
        },
        "$:/language/Buttons/NewMarkdown/Hint": {
            "title": "$:/language/Buttons/NewMarkdown/Hint",
            "text": "Create a new Markdown tiddler"
        },
        "$:/language/Buttons/NewTiddler/Caption": {
            "title": "$:/language/Buttons/NewTiddler/Caption",
            "text": "new tiddler"
        },
        "$:/language/Buttons/NewTiddler/Hint": {
            "title": "$:/language/Buttons/NewTiddler/Hint",
            "text": "Create a new tiddler"
        },
        "$:/language/Buttons/OpenWindow/Caption": {
            "title": "$:/language/Buttons/OpenWindow/Caption",
            "text": "open in new window"
        },
        "$:/language/Buttons/OpenWindow/Hint": {
            "title": "$:/language/Buttons/OpenWindow/Hint",
            "text": "Open tiddler in new window"
        },
        "$:/language/Buttons/Palette/Caption": {
            "title": "$:/language/Buttons/Palette/Caption",
            "text": "palette"
        },
        "$:/language/Buttons/Palette/Hint": {
            "title": "$:/language/Buttons/Palette/Hint",
            "text": "Choose the colour palette"
        },
        "$:/language/Buttons/Permalink/Caption": {
            "title": "$:/language/Buttons/Permalink/Caption",
            "text": "permalink"
        },
        "$:/language/Buttons/Permalink/Hint": {
            "title": "$:/language/Buttons/Permalink/Hint",
            "text": "Set browser address bar to a direct link to this tiddler"
        },
        "$:/language/Buttons/Permaview/Caption": {
            "title": "$:/language/Buttons/Permaview/Caption",
            "text": "permaview"
        },
        "$:/language/Buttons/Permaview/Hint": {
            "title": "$:/language/Buttons/Permaview/Hint",
            "text": "Set browser address bar to a direct link to all the tiddlers in this story"
        },
        "$:/language/Buttons/Refresh/Caption": {
            "title": "$:/language/Buttons/Refresh/Caption",
            "text": "refresh"
        },
        "$:/language/Buttons/Refresh/Hint": {
            "title": "$:/language/Buttons/Refresh/Hint",
            "text": "Perform a full refresh of the wiki"
        },
        "$:/language/Buttons/Save/Caption": {
            "title": "$:/language/Buttons/Save/Caption",
            "text": "ok"
        },
        "$:/language/Buttons/Save/Hint": {
            "title": "$:/language/Buttons/Save/Hint",
            "text": "Confirm changes to this tiddler"
        },
        "$:/language/Buttons/SaveWiki/Caption": {
            "title": "$:/language/Buttons/SaveWiki/Caption",
            "text": "save changes"
        },
        "$:/language/Buttons/SaveWiki/Hint": {
            "title": "$:/language/Buttons/SaveWiki/Hint",
            "text": "Save changes"
        },
        "$:/language/Buttons/StoryView/Caption": {
            "title": "$:/language/Buttons/StoryView/Caption",
            "text": "storyview"
        },
        "$:/language/Buttons/StoryView/Hint": {
            "title": "$:/language/Buttons/StoryView/Hint",
            "text": "Choose the story visualisation"
        },
        "$:/language/Buttons/HideSideBar/Caption": {
            "title": "$:/language/Buttons/HideSideBar/Caption",
            "text": "hide sidebar"
        },
        "$:/language/Buttons/HideSideBar/Hint": {
            "title": "$:/language/Buttons/HideSideBar/Hint",
            "text": "Hide sidebar"
        },
        "$:/language/Buttons/ShowSideBar/Caption": {
            "title": "$:/language/Buttons/ShowSideBar/Caption",
            "text": "show sidebar"
        },
        "$:/language/Buttons/ShowSideBar/Hint": {
            "title": "$:/language/Buttons/ShowSideBar/Hint",
            "text": "Show sidebar"
        },
        "$:/language/Buttons/TagManager/Caption": {
            "title": "$:/language/Buttons/TagManager/Caption",
            "text": "tag manager"
        },
        "$:/language/Buttons/TagManager/Hint": {
            "title": "$:/language/Buttons/TagManager/Hint",
            "text": "Open tag manager"
        },
        "$:/language/Buttons/Theme/Caption": {
            "title": "$:/language/Buttons/Theme/Caption",
            "text": "theme"
        },
        "$:/language/Buttons/Theme/Hint": {
            "title": "$:/language/Buttons/Theme/Hint",
            "text": "Choose the display theme"
        },
        "$:/language/Buttons/Bold/Caption": {
            "title": "$:/language/Buttons/Bold/Caption",
            "text": "bold"
        },
        "$:/language/Buttons/Bold/Hint": {
            "title": "$:/language/Buttons/Bold/Hint",
            "text": "Apply bold formatting to selection"
        },
        "$:/language/Buttons/Clear/Caption": {
            "title": "$:/language/Buttons/Clear/Caption",
            "text": "clear"
        },
        "$:/language/Buttons/Clear/Hint": {
            "title": "$:/language/Buttons/Clear/Hint",
            "text": "Clear image to solid colour"
        },
        "$:/language/Buttons/EditorHeight/Caption": {
            "title": "$:/language/Buttons/EditorHeight/Caption",
            "text": "editor height"
        },
        "$:/language/Buttons/EditorHeight/Caption/Auto": {
            "title": "$:/language/Buttons/EditorHeight/Caption/Auto",
            "text": "Automatically adjust height to fit content"
        },
        "$:/language/Buttons/EditorHeight/Caption/Fixed": {
            "title": "$:/language/Buttons/EditorHeight/Caption/Fixed",
            "text": "Fixed height:"
        },
        "$:/language/Buttons/EditorHeight/Hint": {
            "title": "$:/language/Buttons/EditorHeight/Hint",
            "text": "Choose the height of the text editor"
        },
        "$:/language/Buttons/Excise/Caption": {
            "title": "$:/language/Buttons/Excise/Caption",
            "text": "excise"
        },
        "$:/language/Buttons/Excise/Caption/Excise": {
            "title": "$:/language/Buttons/Excise/Caption/Excise",
            "text": "Perform excision"
        },
        "$:/language/Buttons/Excise/Caption/MacroName": {
            "title": "$:/language/Buttons/Excise/Caption/MacroName",
            "text": "Macro name:"
        },
        "$:/language/Buttons/Excise/Caption/NewTitle": {
            "title": "$:/language/Buttons/Excise/Caption/NewTitle",
            "text": "Title of new tiddler:"
        },
        "$:/language/Buttons/Excise/Caption/Replace": {
            "title": "$:/language/Buttons/Excise/Caption/Replace",
            "text": "Replace excised text with:"
        },
        "$:/language/Buttons/Excise/Caption/Replace/Macro": {
            "title": "$:/language/Buttons/Excise/Caption/Replace/Macro",
            "text": "macro"
        },
        "$:/language/Buttons/Excise/Caption/Replace/Link": {
            "title": "$:/language/Buttons/Excise/Caption/Replace/Link",
            "text": "link"
        },
        "$:/language/Buttons/Excise/Caption/Replace/Transclusion": {
            "title": "$:/language/Buttons/Excise/Caption/Replace/Transclusion",
            "text": "transclusion"
        },
        "$:/language/Buttons/Excise/Caption/Tag": {
            "title": "$:/language/Buttons/Excise/Caption/Tag",
            "text": "Tag new tiddler with the title of this tiddler"
        },
        "$:/language/Buttons/Excise/Caption/TiddlerExists": {
            "title": "$:/language/Buttons/Excise/Caption/TiddlerExists",
            "text": "Warning: tiddler already exists"
        },
        "$:/language/Buttons/Excise/Hint": {
            "title": "$:/language/Buttons/Excise/Hint",
            "text": "Excise the selected text into a new tiddler"
        },
        "$:/language/Buttons/Heading1/Caption": {
            "title": "$:/language/Buttons/Heading1/Caption",
            "text": "heading 1"
        },
        "$:/language/Buttons/Heading1/Hint": {
            "title": "$:/language/Buttons/Heading1/Hint",
            "text": "Apply heading level 1 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading2/Caption": {
            "title": "$:/language/Buttons/Heading2/Caption",
            "text": "heading 2"
        },
        "$:/language/Buttons/Heading2/Hint": {
            "title": "$:/language/Buttons/Heading2/Hint",
            "text": "Apply heading level 2 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading3/Caption": {
            "title": "$:/language/Buttons/Heading3/Caption",
            "text": "heading 3"
        },
        "$:/language/Buttons/Heading3/Hint": {
            "title": "$:/language/Buttons/Heading3/Hint",
            "text": "Apply heading level 3 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading4/Caption": {
            "title": "$:/language/Buttons/Heading4/Caption",
            "text": "heading 4"
        },
        "$:/language/Buttons/Heading4/Hint": {
            "title": "$:/language/Buttons/Heading4/Hint",
            "text": "Apply heading level 4 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading5/Caption": {
            "title": "$:/language/Buttons/Heading5/Caption",
            "text": "heading 5"
        },
        "$:/language/Buttons/Heading5/Hint": {
            "title": "$:/language/Buttons/Heading5/Hint",
            "text": "Apply heading level 5 formatting to lines containing selection"
        },
        "$:/language/Buttons/Heading6/Caption": {
            "title": "$:/language/Buttons/Heading6/Caption",
            "text": "heading 6"
        },
        "$:/language/Buttons/Heading6/Hint": {
            "title": "$:/language/Buttons/Heading6/Hint",
            "text": "Apply heading level 6 formatting to lines containing selection"
        },
        "$:/language/Buttons/Italic/Caption": {
            "title": "$:/language/Buttons/Italic/Caption",
            "text": "italic"
        },
        "$:/language/Buttons/Italic/Hint": {
            "title": "$:/language/Buttons/Italic/Hint",
            "text": "Apply italic formatting to selection"
        },
        "$:/language/Buttons/LineWidth/Caption": {
            "title": "$:/language/Buttons/LineWidth/Caption",
            "text": "line width"
        },
        "$:/language/Buttons/LineWidth/Hint": {
            "title": "$:/language/Buttons/LineWidth/Hint",
            "text": "Set line width for painting"
        },
        "$:/language/Buttons/Link/Caption": {
            "title": "$:/language/Buttons/Link/Caption",
            "text": "link"
        },
        "$:/language/Buttons/Link/Hint": {
            "title": "$:/language/Buttons/Link/Hint",
            "text": "Create wikitext link"
        },
        "$:/language/Buttons/ListBullet/Caption": {
            "title": "$:/language/Buttons/ListBullet/Caption",
            "text": "bulleted list"
        },
        "$:/language/Buttons/ListBullet/Hint": {
            "title": "$:/language/Buttons/ListBullet/Hint",
            "text": "Apply bulleted list formatting to lines containing selection"
        },
        "$:/language/Buttons/ListNumber/Caption": {
            "title": "$:/language/Buttons/ListNumber/Caption",
            "text": "numbered list"
        },
        "$:/language/Buttons/ListNumber/Hint": {
            "title": "$:/language/Buttons/ListNumber/Hint",
            "text": "Apply numbered list formatting to lines containing selection"
        },
        "$:/language/Buttons/MonoBlock/Caption": {
            "title": "$:/language/Buttons/MonoBlock/Caption",
            "text": "monospaced block"
        },
        "$:/language/Buttons/MonoBlock/Hint": {
            "title": "$:/language/Buttons/MonoBlock/Hint",
            "text": "Apply monospaced block formatting to lines containing selection"
        },
        "$:/language/Buttons/MonoLine/Caption": {
            "title": "$:/language/Buttons/MonoLine/Caption",
            "text": "monospaced"
        },
        "$:/language/Buttons/MonoLine/Hint": {
            "title": "$:/language/Buttons/MonoLine/Hint",
            "text": "Apply monospaced character formatting to selection"
        },
        "$:/language/Buttons/Opacity/Caption": {
            "title": "$:/language/Buttons/Opacity/Caption",
            "text": "opacity"
        },
        "$:/language/Buttons/Opacity/Hint": {
            "title": "$:/language/Buttons/Opacity/Hint",
            "text": "Set painting opacity"
        },
        "$:/language/Buttons/Paint/Caption": {
            "title": "$:/language/Buttons/Paint/Caption",
            "text": "paint colour"
        },
        "$:/language/Buttons/Paint/Hint": {
            "title": "$:/language/Buttons/Paint/Hint",
            "text": "Set painting colour"
        },
        "$:/language/Buttons/Picture/Caption": {
            "title": "$:/language/Buttons/Picture/Caption",
            "text": "picture"
        },
        "$:/language/Buttons/Picture/Hint": {
            "title": "$:/language/Buttons/Picture/Hint",
            "text": "Insert picture"
        },
        "$:/language/Buttons/Preview/Caption": {
            "title": "$:/language/Buttons/Preview/Caption",
            "text": "preview"
        },
        "$:/language/Buttons/Preview/Hint": {
            "title": "$:/language/Buttons/Preview/Hint",
            "text": "Show preview pane"
        },
        "$:/language/Buttons/PreviewType/Caption": {
            "title": "$:/language/Buttons/PreviewType/Caption",
            "text": "preview type"
        },
        "$:/language/Buttons/PreviewType/Hint": {
            "title": "$:/language/Buttons/PreviewType/Hint",
            "text": "Choose preview type"
        },
        "$:/language/Buttons/Quote/Caption": {
            "title": "$:/language/Buttons/Quote/Caption",
            "text": "quote"
        },
        "$:/language/Buttons/Quote/Hint": {
            "title": "$:/language/Buttons/Quote/Hint",
            "text": "Apply quoted text formatting to lines containing selection"
        },
        "$:/language/Buttons/Size/Caption": {
            "title": "$:/language/Buttons/Size/Caption",
            "text": "image size"
        },
        "$:/language/Buttons/Size/Caption/Height": {
            "title": "$:/language/Buttons/Size/Caption/Height",
            "text": "Height:"
        },
        "$:/language/Buttons/Size/Caption/Resize": {
            "title": "$:/language/Buttons/Size/Caption/Resize",
            "text": "Resize image"
        },
        "$:/language/Buttons/Size/Caption/Width": {
            "title": "$:/language/Buttons/Size/Caption/Width",
            "text": "Width:"
        },
        "$:/language/Buttons/Size/Hint": {
            "title": "$:/language/Buttons/Size/Hint",
            "text": "Set image size"
        },
        "$:/language/Buttons/Stamp/Caption": {
            "title": "$:/language/Buttons/Stamp/Caption",
            "text": "stamp"
        },
        "$:/language/Buttons/Stamp/Caption/New": {
            "title": "$:/language/Buttons/Stamp/Caption/New",
            "text": "Add your own"
        },
        "$:/language/Buttons/Stamp/Hint": {
            "title": "$:/language/Buttons/Stamp/Hint",
            "text": "Insert a preconfigured snippet of text"
        },
        "$:/language/Buttons/Stamp/New/Title": {
            "title": "$:/language/Buttons/Stamp/New/Title",
            "text": "Name as shown in menu"
        },
        "$:/language/Buttons/Stamp/New/Text": {
            "title": "$:/language/Buttons/Stamp/New/Text",
            "text": "Text of snippet. (Remember to add a descriptive title in the caption field)."
        },
        "$:/language/Buttons/Strikethrough/Caption": {
            "title": "$:/language/Buttons/Strikethrough/Caption",
            "text": "strikethrough"
        },
        "$:/language/Buttons/Strikethrough/Hint": {
            "title": "$:/language/Buttons/Strikethrough/Hint",
            "text": "Apply strikethrough formatting to selection"
        },
        "$:/language/Buttons/Subscript/Caption": {
            "title": "$:/language/Buttons/Subscript/Caption",
            "text": "subscript"
        },
        "$:/language/Buttons/Subscript/Hint": {
            "title": "$:/language/Buttons/Subscript/Hint",
            "text": "Apply subscript formatting to selection"
        },
        "$:/language/Buttons/Superscript/Caption": {
            "title": "$:/language/Buttons/Superscript/Caption",
            "text": "superscript"
        },
        "$:/language/Buttons/Superscript/Hint": {
            "title": "$:/language/Buttons/Superscript/Hint",
            "text": "Apply superscript formatting to selection"
        },
        "$:/language/Buttons/Underline/Caption": {
            "title": "$:/language/Buttons/Underline/Caption",
            "text": "underline"
        },
        "$:/language/Buttons/Underline/Hint": {
            "title": "$:/language/Buttons/Underline/Hint",
            "text": "Apply underline formatting to selection"
        },
        "$:/language/ControlPanel/Advanced/Caption": {
            "title": "$:/language/ControlPanel/Advanced/Caption",
            "text": "Advanced"
        },
        "$:/language/ControlPanel/Advanced/Hint": {
            "title": "$:/language/ControlPanel/Advanced/Hint",
            "text": "Internal information about this TiddlyWiki"
        },
        "$:/language/ControlPanel/Appearance/Caption": {
            "title": "$:/language/ControlPanel/Appearance/Caption",
            "text": "Appearance"
        },
        "$:/language/ControlPanel/Appearance/Hint": {
            "title": "$:/language/ControlPanel/Appearance/Hint",
            "text": "Ways to customise the appearance of your TiddlyWiki."
        },
        "$:/language/ControlPanel/Basics/AnimDuration/Prompt": {
            "title": "$:/language/ControlPanel/Basics/AnimDuration/Prompt",
            "text": "Animation duration:"
        },
        "$:/language/ControlPanel/Basics/Caption": {
            "title": "$:/language/ControlPanel/Basics/Caption",
            "text": "Basics"
        },
        "$:/language/ControlPanel/Basics/DefaultTiddlers/BottomHint": {
            "title": "$:/language/ControlPanel/Basics/DefaultTiddlers/BottomHint",
            "text": "Use &#91;&#91;double square brackets&#93;&#93; for titles with spaces. Or you can choose to <$button set=\"$:/DefaultTiddlers\" setTo=\"[list[$:/StoryList]]\">retain story ordering</$button>"
        },
        "$:/language/ControlPanel/Basics/DefaultTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/DefaultTiddlers/Prompt",
            "text": "Default tiddlers:"
        },
        "$:/language/ControlPanel/Basics/DefaultTiddlers/TopHint": {
            "title": "$:/language/ControlPanel/Basics/DefaultTiddlers/TopHint",
            "text": "Choose which tiddlers are displayed at startup:"
        },
        "$:/language/ControlPanel/Basics/Language/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Language/Prompt",
            "text": "Hello! Current language:"
        },
        "$:/language/ControlPanel/Basics/NewJournal/Title/Prompt": {
            "title": "$:/language/ControlPanel/Basics/NewJournal/Title/Prompt",
            "text": "Title of new journal tiddlers"
        },
        "$:/language/ControlPanel/Basics/NewJournal/Tags/Prompt": {
            "title": "$:/language/ControlPanel/Basics/NewJournal/Tags/Prompt",
            "text": "Tags for new journal tiddlers"
        },
        "$:/language/ControlPanel/Basics/OverriddenShadowTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/OverriddenShadowTiddlers/Prompt",
            "text": "Number of overridden shadow tiddlers:"
        },
        "$:/language/ControlPanel/Basics/ShadowTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/ShadowTiddlers/Prompt",
            "text": "Number of shadow tiddlers:"
        },
        "$:/language/ControlPanel/Basics/Subtitle/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Subtitle/Prompt",
            "text": "Subtitle:"
        },
        "$:/language/ControlPanel/Basics/SystemTiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/SystemTiddlers/Prompt",
            "text": "Number of system tiddlers:"
        },
        "$:/language/ControlPanel/Basics/Tags/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Tags/Prompt",
            "text": "Number of tags:"
        },
        "$:/language/ControlPanel/Basics/Tiddlers/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Tiddlers/Prompt",
            "text": "Number of tiddlers:"
        },
        "$:/language/ControlPanel/Basics/Title/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Title/Prompt",
            "text": "Title of this ~TiddlyWiki:"
        },
        "$:/language/ControlPanel/Basics/Username/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Username/Prompt",
            "text": "Username for signing edits:"
        },
        "$:/language/ControlPanel/Basics/Version/Prompt": {
            "title": "$:/language/ControlPanel/Basics/Version/Prompt",
            "text": "~TiddlyWiki version:"
        },
        "$:/language/ControlPanel/EditorTypes/Caption": {
            "title": "$:/language/ControlPanel/EditorTypes/Caption",
            "text": "Editor Types"
        },
        "$:/language/ControlPanel/EditorTypes/Editor/Caption": {
            "title": "$:/language/ControlPanel/EditorTypes/Editor/Caption",
            "text": "Editor"
        },
        "$:/language/ControlPanel/EditorTypes/Hint": {
            "title": "$:/language/ControlPanel/EditorTypes/Hint",
            "text": "These tiddlers determine which editor is used to edit specific tiddler types."
        },
        "$:/language/ControlPanel/EditorTypes/Type/Caption": {
            "title": "$:/language/ControlPanel/EditorTypes/Type/Caption",
            "text": "Type"
        },
        "$:/language/ControlPanel/Info/Caption": {
            "title": "$:/language/ControlPanel/Info/Caption",
            "text": "Info"
        },
        "$:/language/ControlPanel/Info/Hint": {
            "title": "$:/language/ControlPanel/Info/Hint",
            "text": "Information about this TiddlyWiki"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Add/Prompt": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Add/Prompt",
            "text": "Type shortcut here"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Add/Caption": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Add/Caption",
            "text": "add shortcut"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Caption": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Caption",
            "text": "Keyboard Shortcuts"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Hint": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Hint",
            "text": "Manage keyboard shortcut assignments"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/NoShortcuts/Caption": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/NoShortcuts/Caption",
            "text": "No keyboard shortcuts assigned"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Remove/Hint": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Remove/Hint",
            "text": "remove keyboard shortcut"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/All": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/All",
            "text": "All platforms"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/Mac": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/Mac",
            "text": "Macintosh platform only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonMac": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonMac",
            "text": "Non-Macintosh platforms only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/Linux": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/Linux",
            "text": "Linux platform only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonLinux": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonLinux",
            "text": "Non-Linux platforms only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/Windows": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/Windows",
            "text": "Windows platform only"
        },
        "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonWindows": {
            "title": "$:/language/ControlPanel/KeyboardShortcuts/Platform/NonWindows",
            "text": "Non-Windows platforms only"
        },
        "$:/language/ControlPanel/LoadedModules/Caption": {
            "title": "$:/language/ControlPanel/LoadedModules/Caption",
            "text": "Loaded Modules"
        },
        "$:/language/ControlPanel/LoadedModules/Hint": {
            "title": "$:/language/ControlPanel/LoadedModules/Hint",
            "text": "These are the currently loaded tiddler modules linked to their source tiddlers. Any italicised modules lack a source tiddler, typically because they were setup during the boot process."
        },
        "$:/language/ControlPanel/Palette/Caption": {
            "title": "$:/language/ControlPanel/Palette/Caption",
            "text": "Palette"
        },
        "$:/language/ControlPanel/Palette/Editor/Clone/Caption": {
            "title": "$:/language/ControlPanel/Palette/Editor/Clone/Caption",
            "text": "clone"
        },
        "$:/language/ControlPanel/Palette/Editor/Clone/Prompt": {
            "title": "$:/language/ControlPanel/Palette/Editor/Clone/Prompt",
            "text": "It is recommended that you clone this shadow palette before editing it"
        },
        "$:/language/ControlPanel/Palette/Editor/Prompt/Modified": {
            "title": "$:/language/ControlPanel/Palette/Editor/Prompt/Modified",
            "text": "This shadow palette has been modified"
        },
        "$:/language/ControlPanel/Palette/Editor/Prompt": {
            "title": "$:/language/ControlPanel/Palette/Editor/Prompt",
            "text": "Editing"
        },
        "$:/language/ControlPanel/Palette/Editor/Reset/Caption": {
            "title": "$:/language/ControlPanel/Palette/Editor/Reset/Caption",
            "text": "reset"
        },
        "$:/language/ControlPanel/Palette/HideEditor/Caption": {
            "title": "$:/language/ControlPanel/Palette/HideEditor/Caption",
            "text": "hide editor"
        },
        "$:/language/ControlPanel/Palette/Prompt": {
            "title": "$:/language/ControlPanel/Palette/Prompt",
            "text": "Current palette:"
        },
        "$:/language/ControlPanel/Palette/ShowEditor/Caption": {
            "title": "$:/language/ControlPanel/Palette/ShowEditor/Caption",
            "text": "show editor"
        },
        "$:/language/ControlPanel/Parsing/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Caption",
            "text": "Parsing"
        },
        "$:/language/ControlPanel/Parsing/Hint": {
            "title": "$:/language/ControlPanel/Parsing/Hint",
            "text": "Here you can globally disable individual wiki parser rules. Take care as disabling some parser rules can prevent ~TiddlyWiki functioning correctly (you can restore normal operation with [[safe mode|http://tiddlywiki.com/#SafeMode]] )"
        },
        "$:/language/ControlPanel/Parsing/Block/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Block/Caption",
            "text": "Block Parse Rules"
        },
        "$:/language/ControlPanel/Parsing/Inline/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Inline/Caption",
            "text": "Inline Parse Rules"
        },
        "$:/language/ControlPanel/Parsing/Pragma/Caption": {
            "title": "$:/language/ControlPanel/Parsing/Pragma/Caption",
            "text": "Pragma Parse Rules"
        },
        "$:/language/ControlPanel/Plugins/Add/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Add/Caption",
            "text": "Get more plugins"
        },
        "$:/language/ControlPanel/Plugins/Add/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Add/Hint",
            "text": "Install plugins from the official library"
        },
        "$:/language/ControlPanel/Plugins/AlreadyInstalled/Hint": {
            "title": "$:/language/ControlPanel/Plugins/AlreadyInstalled/Hint",
            "text": "This plugin is already installed at version <$text text=<<installedVersion>>/>"
        },
        "$:/language/ControlPanel/Plugins/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Caption",
            "text": "Plugins"
        },
        "$:/language/ControlPanel/Plugins/Disable/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Disable/Caption",
            "text": "disable"
        },
        "$:/language/ControlPanel/Plugins/Disable/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Disable/Hint",
            "text": "Disable this plugin when reloading page"
        },
        "$:/language/ControlPanel/Plugins/Disabled/Status": {
            "title": "$:/language/ControlPanel/Plugins/Disabled/Status",
            "text": "(disabled)"
        },
        "$:/language/ControlPanel/Plugins/Empty/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Empty/Hint",
            "text": "None"
        },
        "$:/language/ControlPanel/Plugins/Enable/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Enable/Caption",
            "text": "enable"
        },
        "$:/language/ControlPanel/Plugins/Enable/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Enable/Hint",
            "text": "Enable this plugin when reloading page"
        },
        "$:/language/ControlPanel/Plugins/Install/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Install/Caption",
            "text": "install"
        },
        "$:/language/ControlPanel/Plugins/Installed/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Installed/Hint",
            "text": "Currently installed plugins:"
        },
        "$:/language/ControlPanel/Plugins/Languages/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Languages/Caption",
            "text": "Languages"
        },
        "$:/language/ControlPanel/Plugins/Languages/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Languages/Hint",
            "text": "Language pack plugins"
        },
        "$:/language/ControlPanel/Plugins/NoInfoFound/Hint": {
            "title": "$:/language/ControlPanel/Plugins/NoInfoFound/Hint",
            "text": "No ''\"<$text text=<<currentTab>>/>\"'' found"
        },
        "$:/language/ControlPanel/Plugins/NoInformation/Hint": {
            "title": "$:/language/ControlPanel/Plugins/NoInformation/Hint",
            "text": "No information provided"
        },
        "$:/language/ControlPanel/Plugins/NotInstalled/Hint": {
            "title": "$:/language/ControlPanel/Plugins/NotInstalled/Hint",
            "text": "This plugin is not currently installed"
        },
        "$:/language/ControlPanel/Plugins/OpenPluginLibrary": {
            "title": "$:/language/ControlPanel/Plugins/OpenPluginLibrary",
            "text": "open plugin library"
        },
        "$:/language/ControlPanel/Plugins/Plugins/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Plugins/Caption",
            "text": "Plugins"
        },
        "$:/language/ControlPanel/Plugins/Plugins/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Plugins/Hint",
            "text": "Plugins"
        },
        "$:/language/ControlPanel/Plugins/Reinstall/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Reinstall/Caption",
            "text": "reinstall"
        },
        "$:/language/ControlPanel/Plugins/Themes/Caption": {
            "title": "$:/language/ControlPanel/Plugins/Themes/Caption",
            "text": "Themes"
        },
        "$:/language/ControlPanel/Plugins/Themes/Hint": {
            "title": "$:/language/ControlPanel/Plugins/Themes/Hint",
            "text": "Theme plugins"
        },
        "$:/language/ControlPanel/Saving/Caption": {
            "title": "$:/language/ControlPanel/Saving/Caption",
            "text": "Saving"
        },
        "$:/language/ControlPanel/Saving/Heading": {
            "title": "$:/language/ControlPanel/Saving/Heading",
            "text": "Saving"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Advanced/Heading": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Advanced/Heading",
            "text": "Advanced Settings"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/BackupDir": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/BackupDir",
            "text": "Backup Directory"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Backups": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Backups",
            "text": "Backups"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Description": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Description",
            "text": "These settings are only used when saving to http://tiddlyspot.com or a compatible remote server"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Filename": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Filename",
            "text": "Upload Filename"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Heading": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Heading",
            "text": "~TiddlySpot"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Hint": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Hint",
            "text": "//The server URL defaults to `http://<wikiname>.tiddlyspot.com/store.cgi` and can be changed to use a custom server address, e.g. `http://example.com/store.php`.//"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/Password": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/Password",
            "text": "Password"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/ServerURL": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/ServerURL",
            "text": "Server URL"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/UploadDir": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/UploadDir",
            "text": "Upload Directory"
        },
        "$:/language/ControlPanel/Saving/TiddlySpot/UserName": {
            "title": "$:/language/ControlPanel/Saving/TiddlySpot/UserName",
            "text": "Wiki Name"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Caption": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Caption",
            "text": "Autosave"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Disabled/Description": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Disabled/Description",
            "text": "Do not save changes automatically"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Enabled/Description": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Enabled/Description",
            "text": "Save changes automatically"
        },
        "$:/language/ControlPanel/Settings/AutoSave/Hint": {
            "title": "$:/language/ControlPanel/Settings/AutoSave/Hint",
            "text": "Automatically save changes during editing"
        },
        "$:/language/ControlPanel/Settings/CamelCase/Caption": {
            "title": "$:/language/ControlPanel/Settings/CamelCase/Caption",
            "text": "Camel Case Wiki Links"
        },
        "$:/language/ControlPanel/Settings/CamelCase/Hint": {
            "title": "$:/language/ControlPanel/Settings/CamelCase/Hint",
            "text": "You can globally disable automatic linking of ~CamelCase phrases. Requires reload to take effect"
        },
        "$:/language/ControlPanel/Settings/CamelCase/Description": {
            "title": "$:/language/ControlPanel/Settings/CamelCase/Description",
            "text": "Enable automatic ~CamelCase linking"
        },
        "$:/language/ControlPanel/Settings/Caption": {
            "title": "$:/language/ControlPanel/Settings/Caption",
            "text": "Settings"
        },
        "$:/language/ControlPanel/Settings/EditorToolbar/Caption": {
            "title": "$:/language/ControlPanel/Settings/EditorToolbar/Caption",
            "text": "Editor Toolbar"
        },
        "$:/language/ControlPanel/Settings/EditorToolbar/Hint": {
            "title": "$:/language/ControlPanel/Settings/EditorToolbar/Hint",
            "text": "Enable or disable the editor toolbar:"
        },
        "$:/language/ControlPanel/Settings/EditorToolbar/Description": {
            "title": "$:/language/ControlPanel/Settings/EditorToolbar/Description",
            "text": "Show editor toolbar"
        },
        "$:/language/ControlPanel/Settings/Hint": {
            "title": "$:/language/ControlPanel/Settings/Hint",
            "text": "These settings let you customise the behaviour of TiddlyWiki."
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Caption": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Caption",
            "text": "Navigation Address Bar"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Hint": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Hint",
            "text": "Behaviour of the browser address bar when navigating to a tiddler:"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/No/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/No/Description",
            "text": "Do not update the address bar"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Permalink/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Permalink/Description",
            "text": "Include the target tiddler"
        },
        "$:/language/ControlPanel/Settings/NavigationAddressBar/Permaview/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationAddressBar/Permaview/Description",
            "text": "Include the target tiddler and the current story sequence"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/Caption": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/Caption",
            "text": "Navigation History"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/Hint": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/Hint",
            "text": "Update browser history when navigating to a tiddler:"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/No/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/No/Description",
            "text": "Do not update history"
        },
        "$:/language/ControlPanel/Settings/NavigationHistory/Yes/Description": {
            "title": "$:/language/ControlPanel/Settings/NavigationHistory/Yes/Description",
            "text": "Update history"
        },
        "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Caption": {
            "title": "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Caption",
            "text": "Performance Instrumentation"
        },
        "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Hint": {
            "title": "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Hint",
            "text": "Displays performance statistics in the browser developer console. Requires reload to take effect"
        },
        "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Description": {
            "title": "$:/language/ControlPanel/Settings/PerformanceInstrumentation/Description",
            "text": "Enable performance instrumentation"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Caption": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Caption",
            "text": "Toolbar Button Style"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Hint": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Hint",
            "text": "Choose the style for toolbar buttons:"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Borderless": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Borderless",
            "text": "Borderless"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Boxed": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Boxed",
            "text": "Boxed"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Rounded": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Rounded",
            "text": "Rounded"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Caption": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Caption",
            "text": "Toolbar Buttons"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Hint": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Hint",
            "text": "Default toolbar button appearance:"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Icons/Description": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Icons/Description",
            "text": "Include icon"
        },
        "$:/language/ControlPanel/Settings/ToolbarButtons/Text/Description": {
            "title": "$:/language/ControlPanel/Settings/ToolbarButtons/Text/Description",
            "text": "Include text"
        },
        "$:/language/ControlPanel/Settings/DefaultSidebarTab/Caption": {
            "title": "$:/language/ControlPanel/Settings/DefaultSidebarTab/Caption",
            "text": "Default Sidebar Tab"
        },
        "$:/language/ControlPanel/Settings/DefaultSidebarTab/Hint": {
            "title": "$:/language/ControlPanel/Settings/DefaultSidebarTab/Hint",
            "text": "Specify which sidebar tab is displayed by default"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/Caption": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/Caption",
            "text": "Tiddler Opening Behaviour"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/InsideRiver/Hint": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/InsideRiver/Hint",
            "text": "Navigation from //within// the story river"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OutsideRiver/Hint": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OutsideRiver/Hint",
            "text": "Navigation from //outside// the story river"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAbove": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAbove",
            "text": "Open above the current tiddler"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenBelow": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenBelow",
            "text": "Open below the current tiddler"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtTop": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtTop",
            "text": "Open at the top of the story river"
        },
        "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtBottom": {
            "title": "$:/language/ControlPanel/Settings/LinkToBehaviour/OpenAtBottom",
            "text": "Open at the bottom of the story river"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/Caption": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/Caption",
            "text": "Tiddler Titles"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/Hint": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/Hint",
            "text": "Optionally display tiddler titles as links"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/No/Description": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/No/Description",
            "text": "Do not display tiddler titles as links"
        },
        "$:/language/ControlPanel/Settings/TitleLinks/Yes/Description": {
            "title": "$:/language/ControlPanel/Settings/TitleLinks/Yes/Description",
            "text": "Display tiddler titles as links"
        },
        "$:/language/ControlPanel/Settings/MissingLinks/Caption": {
            "title": "$:/language/ControlPanel/Settings/MissingLinks/Caption",
            "text": "Wiki Links"
        },
        "$:/language/ControlPanel/Settings/MissingLinks/Hint": {
            "title": "$:/language/ControlPanel/Settings/MissingLinks/Hint",
            "text": "Choose whether to link to tiddlers that do not exist yet"
        },
        "$:/language/ControlPanel/Settings/MissingLinks/Description": {
            "title": "$:/language/ControlPanel/Settings/MissingLinks/Description",
            "text": "Enable links to missing tiddlers"
        },
        "$:/language/ControlPanel/StoryView/Caption": {
            "title": "$:/language/ControlPanel/StoryView/Caption",
            "text": "Story View"
        },
        "$:/language/ControlPanel/StoryView/Prompt": {
            "title": "$:/language/ControlPanel/StoryView/Prompt",
            "text": "Current view:"
        },
        "$:/language/ControlPanel/Theme/Caption": {
            "title": "$:/language/ControlPanel/Theme/Caption",
            "text": "Theme"
        },
        "$:/language/ControlPanel/Theme/Prompt": {
            "title": "$:/language/ControlPanel/Theme/Prompt",
            "text": "Current theme:"
        },
        "$:/language/ControlPanel/TiddlerFields/Caption": {
            "title": "$:/language/ControlPanel/TiddlerFields/Caption",
            "text": "Tiddler Fields"
        },
        "$:/language/ControlPanel/TiddlerFields/Hint": {
            "title": "$:/language/ControlPanel/TiddlerFields/Hint",
            "text": "This is the full set of TiddlerFields in use in this wiki (including system tiddlers but excluding shadow tiddlers)."
        },
        "$:/language/ControlPanel/Toolbars/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/Caption",
            "text": "Toolbars"
        },
        "$:/language/ControlPanel/Toolbars/EditToolbar/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/EditToolbar/Caption",
            "text": "Edit Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/EditToolbar/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/EditToolbar/Hint",
            "text": "Choose which buttons are displayed for tiddlers in edit mode"
        },
        "$:/language/ControlPanel/Toolbars/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/Hint",
            "text": "Select which toolbar buttons are displayed"
        },
        "$:/language/ControlPanel/Toolbars/PageControls/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/PageControls/Caption",
            "text": "Page Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/PageControls/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/PageControls/Hint",
            "text": "Choose which buttons are displayed on the main page toolbar"
        },
        "$:/language/ControlPanel/Toolbars/EditorToolbar/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/EditorToolbar/Caption",
            "text": "Editor Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/EditorToolbar/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/EditorToolbar/Hint",
            "text": "Choose which buttons are displayed in the editor toolbar. Note that some buttons will only appear when editing tiddlers of a certain type"
        },
        "$:/language/ControlPanel/Toolbars/ViewToolbar/Caption": {
            "title": "$:/language/ControlPanel/Toolbars/ViewToolbar/Caption",
            "text": "View Toolbar"
        },
        "$:/language/ControlPanel/Toolbars/ViewToolbar/Hint": {
            "title": "$:/language/ControlPanel/Toolbars/ViewToolbar/Hint",
            "text": "Choose which buttons are displayed for tiddlers in view mode"
        },
        "$:/language/ControlPanel/Tools/Download/Full/Caption": {
            "title": "$:/language/ControlPanel/Tools/Download/Full/Caption",
            "text": "Download full wiki"
        },
        "$:/language/Date/DaySuffix/1": {
            "title": "$:/language/Date/DaySuffix/1",
            "text": "st"
        },
        "$:/language/Date/DaySuffix/2": {
            "title": "$:/language/Date/DaySuffix/2",
            "text": "nd"
        },
        "$:/language/Date/DaySuffix/3": {
            "title": "$:/language/Date/DaySuffix/3",
            "text": "rd"
        },
        "$:/language/Date/DaySuffix/4": {
            "title": "$:/language/Date/DaySuffix/4",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/5": {
            "title": "$:/language/Date/DaySuffix/5",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/6": {
            "title": "$:/language/Date/DaySuffix/6",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/7": {
            "title": "$:/language/Date/DaySuffix/7",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/8": {
            "title": "$:/language/Date/DaySuffix/8",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/9": {
            "title": "$:/language/Date/DaySuffix/9",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/10": {
            "title": "$:/language/Date/DaySuffix/10",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/11": {
            "title": "$:/language/Date/DaySuffix/11",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/12": {
            "title": "$:/language/Date/DaySuffix/12",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/13": {
            "title": "$:/language/Date/DaySuffix/13",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/14": {
            "title": "$:/language/Date/DaySuffix/14",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/15": {
            "title": "$:/language/Date/DaySuffix/15",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/16": {
            "title": "$:/language/Date/DaySuffix/16",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/17": {
            "title": "$:/language/Date/DaySuffix/17",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/18": {
            "title": "$:/language/Date/DaySuffix/18",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/19": {
            "title": "$:/language/Date/DaySuffix/19",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/20": {
            "title": "$:/language/Date/DaySuffix/20",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/21": {
            "title": "$:/language/Date/DaySuffix/21",
            "text": "st"
        },
        "$:/language/Date/DaySuffix/22": {
            "title": "$:/language/Date/DaySuffix/22",
            "text": "nd"
        },
        "$:/language/Date/DaySuffix/23": {
            "title": "$:/language/Date/DaySuffix/23",
            "text": "rd"
        },
        "$:/language/Date/DaySuffix/24": {
            "title": "$:/language/Date/DaySuffix/24",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/25": {
            "title": "$:/language/Date/DaySuffix/25",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/26": {
            "title": "$:/language/Date/DaySuffix/26",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/27": {
            "title": "$:/language/Date/DaySuffix/27",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/28": {
            "title": "$:/language/Date/DaySuffix/28",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/29": {
            "title": "$:/language/Date/DaySuffix/29",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/30": {
            "title": "$:/language/Date/DaySuffix/30",
            "text": "th"
        },
        "$:/language/Date/DaySuffix/31": {
            "title": "$:/language/Date/DaySuffix/31",
            "text": "st"
        },
        "$:/language/Date/Long/Day/0": {
            "title": "$:/language/Date/Long/Day/0",
            "text": "Sunday"
        },
        "$:/language/Date/Long/Day/1": {
            "title": "$:/language/Date/Long/Day/1",
            "text": "Monday"
        },
        "$:/language/Date/Long/Day/2": {
            "title": "$:/language/Date/Long/Day/2",
            "text": "Tuesday"
        },
        "$:/language/Date/Long/Day/3": {
            "title": "$:/language/Date/Long/Day/3",
            "text": "Wednesday"
        },
        "$:/language/Date/Long/Day/4": {
            "title": "$:/language/Date/Long/Day/4",
            "text": "Thursday"
        },
        "$:/language/Date/Long/Day/5": {
            "title": "$:/language/Date/Long/Day/5",
            "text": "Friday"
        },
        "$:/language/Date/Long/Day/6": {
            "title": "$:/language/Date/Long/Day/6",
            "text": "Saturday"
        },
        "$:/language/Date/Long/Month/1": {
            "title": "$:/language/Date/Long/Month/1",
            "text": "January"
        },
        "$:/language/Date/Long/Month/2": {
            "title": "$:/language/Date/Long/Month/2",
            "text": "February"
        },
        "$:/language/Date/Long/Month/3": {
            "title": "$:/language/Date/Long/Month/3",
            "text": "March"
        },
        "$:/language/Date/Long/Month/4": {
            "title": "$:/language/Date/Long/Month/4",
            "text": "April"
        },
        "$:/language/Date/Long/Month/5": {
            "title": "$:/language/Date/Long/Month/5",
            "text": "May"
        },
        "$:/language/Date/Long/Month/6": {
            "title": "$:/language/Date/Long/Month/6",
            "text": "June"
        },
        "$:/language/Date/Long/Month/7": {
            "title": "$:/language/Date/Long/Month/7",
            "text": "July"
        },
        "$:/language/Date/Long/Month/8": {
            "title": "$:/language/Date/Long/Month/8",
            "text": "August"
        },
        "$:/language/Date/Long/Month/9": {
            "title": "$:/language/Date/Long/Month/9",
            "text": "September"
        },
        "$:/language/Date/Long/Month/10": {
            "title": "$:/language/Date/Long/Month/10",
            "text": "October"
        },
        "$:/language/Date/Long/Month/11": {
            "title": "$:/language/Date/Long/Month/11",
            "text": "November"
        },
        "$:/language/Date/Long/Month/12": {
            "title": "$:/language/Date/Long/Month/12",
            "text": "December"
        },
        "$:/language/Date/Period/am": {
            "title": "$:/language/Date/Period/am",
            "text": "am"
        },
        "$:/language/Date/Period/pm": {
            "title": "$:/language/Date/Period/pm",
            "text": "pm"
        },
        "$:/language/Date/Short/Day/0": {
            "title": "$:/language/Date/Short/Day/0",
            "text": "Sun"
        },
        "$:/language/Date/Short/Day/1": {
            "title": "$:/language/Date/Short/Day/1",
            "text": "Mon"
        },
        "$:/language/Date/Short/Day/2": {
            "title": "$:/language/Date/Short/Day/2",
            "text": "Tue"
        },
        "$:/language/Date/Short/Day/3": {
            "title": "$:/language/Date/Short/Day/3",
            "text": "Wed"
        },
        "$:/language/Date/Short/Day/4": {
            "title": "$:/language/Date/Short/Day/4",
            "text": "Thu"
        },
        "$:/language/Date/Short/Day/5": {
            "title": "$:/language/Date/Short/Day/5",
            "text": "Fri"
        },
        "$:/language/Date/Short/Day/6": {
            "title": "$:/language/Date/Short/Day/6",
            "text": "Sat"
        },
        "$:/language/Date/Short/Month/1": {
            "title": "$:/language/Date/Short/Month/1",
            "text": "Jan"
        },
        "$:/language/Date/Short/Month/2": {
            "title": "$:/language/Date/Short/Month/2",
            "text": "Feb"
        },
        "$:/language/Date/Short/Month/3": {
            "title": "$:/language/Date/Short/Month/3",
            "text": "Mar"
        },
        "$:/language/Date/Short/Month/4": {
            "title": "$:/language/Date/Short/Month/4",
            "text": "Apr"
        },
        "$:/language/Date/Short/Month/5": {
            "title": "$:/language/Date/Short/Month/5",
            "text": "May"
        },
        "$:/language/Date/Short/Month/6": {
            "title": "$:/language/Date/Short/Month/6",
            "text": "Jun"
        },
        "$:/language/Date/Short/Month/7": {
            "title": "$:/language/Date/Short/Month/7",
            "text": "Jul"
        },
        "$:/language/Date/Short/Month/8": {
            "title": "$:/language/Date/Short/Month/8",
            "text": "Aug"
        },
        "$:/language/Date/Short/Month/9": {
            "title": "$:/language/Date/Short/Month/9",
            "text": "Sep"
        },
        "$:/language/Date/Short/Month/10": {
            "title": "$:/language/Date/Short/Month/10",
            "text": "Oct"
        },
        "$:/language/Date/Short/Month/11": {
            "title": "$:/language/Date/Short/Month/11",
            "text": "Nov"
        },
        "$:/language/Date/Short/Month/12": {
            "title": "$:/language/Date/Short/Month/12",
            "text": "Dec"
        },
        "$:/language/RelativeDate/Future/Days": {
            "title": "$:/language/RelativeDate/Future/Days",
            "text": "<<period>> days from now"
        },
        "$:/language/RelativeDate/Future/Hours": {
            "title": "$:/language/RelativeDate/Future/Hours",
            "text": "<<period>> hours from now"
        },
        "$:/language/RelativeDate/Future/Minutes": {
            "title": "$:/language/RelativeDate/Future/Minutes",
            "text": "<<period>> minutes from now"
        },
        "$:/language/RelativeDate/Future/Months": {
            "title": "$:/language/RelativeDate/Future/Months",
            "text": "<<period>> months from now"
        },
        "$:/language/RelativeDate/Future/Second": {
            "title": "$:/language/RelativeDate/Future/Second",
            "text": "1 second from now"
        },
        "$:/language/RelativeDate/Future/Seconds": {
            "title": "$:/language/RelativeDate/Future/Seconds",
            "text": "<<period>> seconds from now"
        },
        "$:/language/RelativeDate/Future/Years": {
            "title": "$:/language/RelativeDate/Future/Years",
            "text": "<<period>> years from now"
        },
        "$:/language/RelativeDate/Past/Days": {
            "title": "$:/language/RelativeDate/Past/Days",
            "text": "<<period>> days ago"
        },
        "$:/language/RelativeDate/Past/Hours": {
            "title": "$:/language/RelativeDate/Past/Hours",
            "text": "<<period>> hours ago"
        },
        "$:/language/RelativeDate/Past/Minutes": {
            "title": "$:/language/RelativeDate/Past/Minutes",
            "text": "<<period>> minutes ago"
        },
        "$:/language/RelativeDate/Past/Months": {
            "title": "$:/language/RelativeDate/Past/Months",
            "text": "<<period>> months ago"
        },
        "$:/language/RelativeDate/Past/Second": {
            "title": "$:/language/RelativeDate/Past/Second",
            "text": "1 second ago"
        },
        "$:/language/RelativeDate/Past/Seconds": {
            "title": "$:/language/RelativeDate/Past/Seconds",
            "text": "<<period>> seconds ago"
        },
        "$:/language/RelativeDate/Past/Years": {
            "title": "$:/language/RelativeDate/Past/Years",
            "text": "<<period>> years ago"
        },
        "$:/language/Docs/ModuleTypes/animation": {
            "title": "$:/language/Docs/ModuleTypes/animation",
            "text": "Animations that may be used with the RevealWidget."
        },
        "$:/language/Docs/ModuleTypes/command": {
            "title": "$:/language/Docs/ModuleTypes/command",
            "text": "Commands that can be executed under Node.js."
        },
        "$:/language/Docs/ModuleTypes/config": {
            "title": "$:/language/Docs/ModuleTypes/config",
            "text": "Data to be inserted into `$tw.config`."
        },
        "$:/language/Docs/ModuleTypes/filteroperator": {
            "title": "$:/language/Docs/ModuleTypes/filteroperator",
            "text": "Individual filter operator methods."
        },
        "$:/language/Docs/ModuleTypes/global": {
            "title": "$:/language/Docs/ModuleTypes/global",
            "text": "Global data to be inserted into `$tw`."
        },
        "$:/language/Docs/ModuleTypes/isfilteroperator": {
            "title": "$:/language/Docs/ModuleTypes/isfilteroperator",
            "text": "Operands for the ''is'' filter operator."
        },
        "$:/language/Docs/ModuleTypes/macro": {
            "title": "$:/language/Docs/ModuleTypes/macro",
            "text": "JavaScript macro definitions."
        },
        "$:/language/Docs/ModuleTypes/parser": {
            "title": "$:/language/Docs/ModuleTypes/parser",
            "text": "Parsers for different content types."
        },
        "$:/language/Docs/ModuleTypes/saver": {
            "title": "$:/language/Docs/ModuleTypes/saver",
            "text": "Savers handle different methods for saving files from the browser."
        },
        "$:/language/Docs/ModuleTypes/startup": {
            "title": "$:/language/Docs/ModuleTypes/startup",
            "text": "Startup functions."
        },
        "$:/language/Docs/ModuleTypes/storyview": {
            "title": "$:/language/Docs/ModuleTypes/storyview",
            "text": "Story views customise the animation and behaviour of list widgets."
        },
        "$:/language/Docs/ModuleTypes/tiddlerdeserializer": {
            "title": "$:/language/Docs/ModuleTypes/tiddlerdeserializer",
            "text": "Converts different content types into tiddlers."
        },
        "$:/language/Docs/ModuleTypes/tiddlerfield": {
            "title": "$:/language/Docs/ModuleTypes/tiddlerfield",
            "text": "Defines the behaviour of an individual tiddler field."
        },
        "$:/language/Docs/ModuleTypes/tiddlermethod": {
            "title": "$:/language/Docs/ModuleTypes/tiddlermethod",
            "text": "Adds methods to the `$tw.Tiddler` prototype."
        },
        "$:/language/Docs/ModuleTypes/upgrader": {
            "title": "$:/language/Docs/ModuleTypes/upgrader",
            "text": "Applies upgrade processing to tiddlers during an upgrade/import."
        },
        "$:/language/Docs/ModuleTypes/utils": {
            "title": "$:/language/Docs/ModuleTypes/utils",
            "text": "Adds methods to `$tw.utils`."
        },
        "$:/language/Docs/ModuleTypes/utils-node": {
            "title": "$:/language/Docs/ModuleTypes/utils-node",
            "text": "Adds Node.js-specific methods to `$tw.utils`."
        },
        "$:/language/Docs/ModuleTypes/widget": {
            "title": "$:/language/Docs/ModuleTypes/widget",
            "text": "Widgets encapsulate DOM rendering and refreshing."
        },
        "$:/language/Docs/ModuleTypes/wikimethod": {
            "title": "$:/language/Docs/ModuleTypes/wikimethod",
            "text": "Adds methods to `$tw.Wiki`."
        },
        "$:/language/Docs/ModuleTypes/wikirule": {
            "title": "$:/language/Docs/ModuleTypes/wikirule",
            "text": "Individual parser rules for the main WikiText parser."
        },
        "$:/language/Docs/PaletteColours/alert-background": {
            "title": "$:/language/Docs/PaletteColours/alert-background",
            "text": "Alert background"
        },
        "$:/language/Docs/PaletteColours/alert-border": {
            "title": "$:/language/Docs/PaletteColours/alert-border",
            "text": "Alert border"
        },
        "$:/language/Docs/PaletteColours/alert-highlight": {
            "title": "$:/language/Docs/PaletteColours/alert-highlight",
            "text": "Alert highlight"
        },
        "$:/language/Docs/PaletteColours/alert-muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/alert-muted-foreground",
            "text": "Alert muted foreground"
        },
        "$:/language/Docs/PaletteColours/background": {
            "title": "$:/language/Docs/PaletteColours/background",
            "text": "General background"
        },
        "$:/language/Docs/PaletteColours/blockquote-bar": {
            "title": "$:/language/Docs/PaletteColours/blockquote-bar",
            "text": "Blockquote bar"
        },
        "$:/language/Docs/PaletteColours/button-background": {
            "title": "$:/language/Docs/PaletteColours/button-background",
            "text": "Default button background"
        },
        "$:/language/Docs/PaletteColours/button-border": {
            "title": "$:/language/Docs/PaletteColours/button-border",
            "text": "Default button border"
        },
        "$:/language/Docs/PaletteColours/button-foreground": {
            "title": "$:/language/Docs/PaletteColours/button-foreground",
            "text": "Default button foreground"
        },
        "$:/language/Docs/PaletteColours/dirty-indicator": {
            "title": "$:/language/Docs/PaletteColours/dirty-indicator",
            "text": "Unsaved changes indicator"
        },
        "$:/language/Docs/PaletteColours/code-background": {
            "title": "$:/language/Docs/PaletteColours/code-background",
            "text": "Code background"
        },
        "$:/language/Docs/PaletteColours/code-border": {
            "title": "$:/language/Docs/PaletteColours/code-border",
            "text": "Code border"
        },
        "$:/language/Docs/PaletteColours/code-foreground": {
            "title": "$:/language/Docs/PaletteColours/code-foreground",
            "text": "Code foreground"
        },
        "$:/language/Docs/PaletteColours/download-background": {
            "title": "$:/language/Docs/PaletteColours/download-background",
            "text": "Download button background"
        },
        "$:/language/Docs/PaletteColours/download-foreground": {
            "title": "$:/language/Docs/PaletteColours/download-foreground",
            "text": "Download button foreground"
        },
        "$:/language/Docs/PaletteColours/dragger-background": {
            "title": "$:/language/Docs/PaletteColours/dragger-background",
            "text": "Dragger background"
        },
        "$:/language/Docs/PaletteColours/dragger-foreground": {
            "title": "$:/language/Docs/PaletteColours/dragger-foreground",
            "text": "Dragger foreground"
        },
        "$:/language/Docs/PaletteColours/dropdown-background": {
            "title": "$:/language/Docs/PaletteColours/dropdown-background",
            "text": "Dropdown background"
        },
        "$:/language/Docs/PaletteColours/dropdown-border": {
            "title": "$:/language/Docs/PaletteColours/dropdown-border",
            "text": "Dropdown border"
        },
        "$:/language/Docs/PaletteColours/dropdown-tab-background-selected": {
            "title": "$:/language/Docs/PaletteColours/dropdown-tab-background-selected",
            "text": "Dropdown tab background for selected tabs"
        },
        "$:/language/Docs/PaletteColours/dropdown-tab-background": {
            "title": "$:/language/Docs/PaletteColours/dropdown-tab-background",
            "text": "Dropdown tab background"
        },
        "$:/language/Docs/PaletteColours/dropzone-background": {
            "title": "$:/language/Docs/PaletteColours/dropzone-background",
            "text": "Dropzone background"
        },
        "$:/language/Docs/PaletteColours/external-link-background-hover": {
            "title": "$:/language/Docs/PaletteColours/external-link-background-hover",
            "text": "External link background hover"
        },
        "$:/language/Docs/PaletteColours/external-link-background-visited": {
            "title": "$:/language/Docs/PaletteColours/external-link-background-visited",
            "text": "External link background visited"
        },
        "$:/language/Docs/PaletteColours/external-link-background": {
            "title": "$:/language/Docs/PaletteColours/external-link-background",
            "text": "External link background"
        },
        "$:/language/Docs/PaletteColours/external-link-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/external-link-foreground-hover",
            "text": "External link foreground hover"
        },
        "$:/language/Docs/PaletteColours/external-link-foreground-visited": {
            "title": "$:/language/Docs/PaletteColours/external-link-foreground-visited",
            "text": "External link foreground visited"
        },
        "$:/language/Docs/PaletteColours/external-link-foreground": {
            "title": "$:/language/Docs/PaletteColours/external-link-foreground",
            "text": "External link foreground"
        },
        "$:/language/Docs/PaletteColours/foreground": {
            "title": "$:/language/Docs/PaletteColours/foreground",
            "text": "General foreground"
        },
        "$:/language/Docs/PaletteColours/message-background": {
            "title": "$:/language/Docs/PaletteColours/message-background",
            "text": "Message box background"
        },
        "$:/language/Docs/PaletteColours/message-border": {
            "title": "$:/language/Docs/PaletteColours/message-border",
            "text": "Message box border"
        },
        "$:/language/Docs/PaletteColours/message-foreground": {
            "title": "$:/language/Docs/PaletteColours/message-foreground",
            "text": "Message box foreground"
        },
        "$:/language/Docs/PaletteColours/modal-backdrop": {
            "title": "$:/language/Docs/PaletteColours/modal-backdrop",
            "text": "Modal backdrop"
        },
        "$:/language/Docs/PaletteColours/modal-background": {
            "title": "$:/language/Docs/PaletteColours/modal-background",
            "text": "Modal background"
        },
        "$:/language/Docs/PaletteColours/modal-border": {
            "title": "$:/language/Docs/PaletteColours/modal-border",
            "text": "Modal border"
        },
        "$:/language/Docs/PaletteColours/modal-footer-background": {
            "title": "$:/language/Docs/PaletteColours/modal-footer-background",
            "text": "Modal footer background"
        },
        "$:/language/Docs/PaletteColours/modal-footer-border": {
            "title": "$:/language/Docs/PaletteColours/modal-footer-border",
            "text": "Modal footer border"
        },
        "$:/language/Docs/PaletteColours/modal-header-border": {
            "title": "$:/language/Docs/PaletteColours/modal-header-border",
            "text": "Modal header border"
        },
        "$:/language/Docs/PaletteColours/muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/muted-foreground",
            "text": "General muted foreground"
        },
        "$:/language/Docs/PaletteColours/notification-background": {
            "title": "$:/language/Docs/PaletteColours/notification-background",
            "text": "Notification background"
        },
        "$:/language/Docs/PaletteColours/notification-border": {
            "title": "$:/language/Docs/PaletteColours/notification-border",
            "text": "Notification border"
        },
        "$:/language/Docs/PaletteColours/page-background": {
            "title": "$:/language/Docs/PaletteColours/page-background",
            "text": "Page background"
        },
        "$:/language/Docs/PaletteColours/pre-background": {
            "title": "$:/language/Docs/PaletteColours/pre-background",
            "text": "Preformatted code background"
        },
        "$:/language/Docs/PaletteColours/pre-border": {
            "title": "$:/language/Docs/PaletteColours/pre-border",
            "text": "Preformatted code border"
        },
        "$:/language/Docs/PaletteColours/primary": {
            "title": "$:/language/Docs/PaletteColours/primary",
            "text": "General primary"
        },
        "$:/language/Docs/PaletteColours/sidebar-button-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-button-foreground",
            "text": "Sidebar button foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-controls-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/sidebar-controls-foreground-hover",
            "text": "Sidebar controls foreground hover"
        },
        "$:/language/Docs/PaletteColours/sidebar-controls-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-controls-foreground",
            "text": "Sidebar controls foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-foreground-shadow": {
            "title": "$:/language/Docs/PaletteColours/sidebar-foreground-shadow",
            "text": "Sidebar foreground shadow"
        },
        "$:/language/Docs/PaletteColours/sidebar-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-foreground",
            "text": "Sidebar foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-muted-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/sidebar-muted-foreground-hover",
            "text": "Sidebar muted foreground hover"
        },
        "$:/language/Docs/PaletteColours/sidebar-muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-muted-foreground",
            "text": "Sidebar muted foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-background-selected": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-background-selected",
            "text": "Sidebar tab background for selected tabs"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-background": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-background",
            "text": "Sidebar tab background"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-border-selected": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-border-selected",
            "text": "Sidebar tab border for selected tabs"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-border": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-border",
            "text": "Sidebar tab border"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-divider": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-divider",
            "text": "Sidebar tab divider"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-foreground-selected": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-foreground-selected",
            "text": "Sidebar tab foreground for selected tabs"
        },
        "$:/language/Docs/PaletteColours/sidebar-tab-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tab-foreground",
            "text": "Sidebar tab foreground"
        },
        "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground-hover",
            "text": "Sidebar tiddler link foreground hover"
        },
        "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground": {
            "title": "$:/language/Docs/PaletteColours/sidebar-tiddler-link-foreground",
            "text": "Sidebar tiddler link foreground"
        },
        "$:/language/Docs/PaletteColours/site-title-foreground": {
            "title": "$:/language/Docs/PaletteColours/site-title-foreground",
            "text": "Site title foreground"
        },
        "$:/language/Docs/PaletteColours/static-alert-foreground": {
            "title": "$:/language/Docs/PaletteColours/static-alert-foreground",
            "text": "Static alert foreground"
        },
        "$:/language/Docs/PaletteColours/tab-background-selected": {
            "title": "$:/language/Docs/PaletteColours/tab-background-selected",
            "text": "Tab background for selected tabs"
        },
        "$:/language/Docs/PaletteColours/tab-background": {
            "title": "$:/language/Docs/PaletteColours/tab-background",
            "text": "Tab background"
        },
        "$:/language/Docs/PaletteColours/tab-border-selected": {
            "title": "$:/language/Docs/PaletteColours/tab-border-selected",
            "text": "Tab border for selected tabs"
        },
        "$:/language/Docs/PaletteColours/tab-border": {
            "title": "$:/language/Docs/PaletteColours/tab-border",
            "text": "Tab border"
        },
        "$:/language/Docs/PaletteColours/tab-divider": {
            "title": "$:/language/Docs/PaletteColours/tab-divider",
            "text": "Tab divider"
        },
        "$:/language/Docs/PaletteColours/tab-foreground-selected": {
            "title": "$:/language/Docs/PaletteColours/tab-foreground-selected",
            "text": "Tab foreground for selected tabs"
        },
        "$:/language/Docs/PaletteColours/tab-foreground": {
            "title": "$:/language/Docs/PaletteColours/tab-foreground",
            "text": "Tab foreground"
        },
        "$:/language/Docs/PaletteColours/table-border": {
            "title": "$:/language/Docs/PaletteColours/table-border",
            "text": "Table border"
        },
        "$:/language/Docs/PaletteColours/table-footer-background": {
            "title": "$:/language/Docs/PaletteColours/table-footer-background",
            "text": "Table footer background"
        },
        "$:/language/Docs/PaletteColours/table-header-background": {
            "title": "$:/language/Docs/PaletteColours/table-header-background",
            "text": "Table header background"
        },
        "$:/language/Docs/PaletteColours/tag-background": {
            "title": "$:/language/Docs/PaletteColours/tag-background",
            "text": "Tag background"
        },
        "$:/language/Docs/PaletteColours/tag-foreground": {
            "title": "$:/language/Docs/PaletteColours/tag-foreground",
            "text": "Tag foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-background",
            "text": "Tiddler background"
        },
        "$:/language/Docs/PaletteColours/tiddler-border": {
            "title": "$:/language/Docs/PaletteColours/tiddler-border",
            "text": "Tiddler border"
        },
        "$:/language/Docs/PaletteColours/tiddler-controls-foreground-hover": {
            "title": "$:/language/Docs/PaletteColours/tiddler-controls-foreground-hover",
            "text": "Tiddler controls foreground hover"
        },
        "$:/language/Docs/PaletteColours/tiddler-controls-foreground-selected": {
            "title": "$:/language/Docs/PaletteColours/tiddler-controls-foreground-selected",
            "text": "Tiddler controls foreground for selected controls"
        },
        "$:/language/Docs/PaletteColours/tiddler-controls-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-controls-foreground",
            "text": "Tiddler controls foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-background",
            "text": "Tiddler editor background"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-border-image": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-border-image",
            "text": "Tiddler editor border image"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-border": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-border",
            "text": "Tiddler editor border"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-fields-even": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-fields-even",
            "text": "Tiddler editor background for even fields"
        },
        "$:/language/Docs/PaletteColours/tiddler-editor-fields-odd": {
            "title": "$:/language/Docs/PaletteColours/tiddler-editor-fields-odd",
            "text": "Tiddler editor background for odd fields"
        },
        "$:/language/Docs/PaletteColours/tiddler-info-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-info-background",
            "text": "Tiddler info panel background"
        },
        "$:/language/Docs/PaletteColours/tiddler-info-border": {
            "title": "$:/language/Docs/PaletteColours/tiddler-info-border",
            "text": "Tiddler info panel border"
        },
        "$:/language/Docs/PaletteColours/tiddler-info-tab-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-info-tab-background",
            "text": "Tiddler info panel tab background"
        },
        "$:/language/Docs/PaletteColours/tiddler-link-background": {
            "title": "$:/language/Docs/PaletteColours/tiddler-link-background",
            "text": "Tiddler link background"
        },
        "$:/language/Docs/PaletteColours/tiddler-link-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-link-foreground",
            "text": "Tiddler link foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-subtitle-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-subtitle-foreground",
            "text": "Tiddler subtitle foreground"
        },
        "$:/language/Docs/PaletteColours/tiddler-title-foreground": {
            "title": "$:/language/Docs/PaletteColours/tiddler-title-foreground",
            "text": "Tiddler title foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-new-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-new-button",
            "text": "Toolbar 'new tiddler' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-options-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-options-button",
            "text": "Toolbar 'options' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-save-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-save-button",
            "text": "Toolbar 'save' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-info-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-info-button",
            "text": "Toolbar 'info' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-edit-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-edit-button",
            "text": "Toolbar 'edit' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-close-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-close-button",
            "text": "Toolbar 'close' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-delete-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-delete-button",
            "text": "Toolbar 'delete' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-cancel-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-cancel-button",
            "text": "Toolbar 'cancel' button foreground"
        },
        "$:/language/Docs/PaletteColours/toolbar-done-button": {
            "title": "$:/language/Docs/PaletteColours/toolbar-done-button",
            "text": "Toolbar 'done' button foreground"
        },
        "$:/language/Docs/PaletteColours/untagged-background": {
            "title": "$:/language/Docs/PaletteColours/untagged-background",
            "text": "Untagged pill background"
        },
        "$:/language/Docs/PaletteColours/very-muted-foreground": {
            "title": "$:/language/Docs/PaletteColours/very-muted-foreground",
            "text": "Very muted foreground"
        },
        "$:/language/EditTemplate/Body/External/Hint": {
            "title": "$:/language/EditTemplate/Body/External/Hint",
            "text": "This is an external tiddler stored outside of the main TiddlyWiki file. You can edit the tags and fields but cannot directly edit the content itself"
        },
        "$:/language/EditTemplate/Body/Placeholder": {
            "title": "$:/language/EditTemplate/Body/Placeholder",
            "text": "Type the text for this tiddler"
        },
        "$:/language/EditTemplate/Body/Preview/Type/Output": {
            "title": "$:/language/EditTemplate/Body/Preview/Type/Output",
            "text": "output"
        },
        "$:/language/EditTemplate/Field/Remove/Caption": {
            "title": "$:/language/EditTemplate/Field/Remove/Caption",
            "text": "remove field"
        },
        "$:/language/EditTemplate/Field/Remove/Hint": {
            "title": "$:/language/EditTemplate/Field/Remove/Hint",
            "text": "Remove field"
        },
        "$:/language/EditTemplate/Fields/Add/Button": {
            "title": "$:/language/EditTemplate/Fields/Add/Button",
            "text": "add"
        },
        "$:/language/EditTemplate/Fields/Add/Name/Placeholder": {
            "title": "$:/language/EditTemplate/Fields/Add/Name/Placeholder",
            "text": "field name"
        },
        "$:/language/EditTemplate/Fields/Add/Prompt": {
            "title": "$:/language/EditTemplate/Fields/Add/Prompt",
            "text": "Add a new field:"
        },
        "$:/language/EditTemplate/Fields/Add/Value/Placeholder": {
            "title": "$:/language/EditTemplate/Fields/Add/Value/Placeholder",
            "text": "field value"
        },
        "$:/language/EditTemplate/Fields/Add/Dropdown/System": {
            "title": "$:/language/EditTemplate/Fields/Add/Dropdown/System",
            "text": "System fields"
        },
        "$:/language/EditTemplate/Fields/Add/Dropdown/User": {
            "title": "$:/language/EditTemplate/Fields/Add/Dropdown/User",
            "text": "User fields"
        },
        "$:/language/EditTemplate/Shadow/Warning": {
            "title": "$:/language/EditTemplate/Shadow/Warning",
            "text": "This is a shadow tiddler. Any changes you make will override the default version from the plugin <<pluginLink>>"
        },
        "$:/language/EditTemplate/Shadow/OverriddenWarning": {
            "title": "$:/language/EditTemplate/Shadow/OverriddenWarning",
            "text": "This is a modified shadow tiddler. You can revert to the default version in the plugin <<pluginLink>> by deleting this tiddler"
        },
        "$:/language/EditTemplate/Tags/Add/Button": {
            "title": "$:/language/EditTemplate/Tags/Add/Button",
            "text": "add"
        },
        "$:/language/EditTemplate/Tags/Add/Placeholder": {
            "title": "$:/language/EditTemplate/Tags/Add/Placeholder",
            "text": "tag name"
        },
        "$:/language/EditTemplate/Tags/Dropdown/Caption": {
            "title": "$:/language/EditTemplate/Tags/Dropdown/Caption",
            "text": "tag list"
        },
        "$:/language/EditTemplate/Tags/Dropdown/Hint": {
            "title": "$:/language/EditTemplate/Tags/Dropdown/Hint",
            "text": "Show tag list"
        },
        "$:/language/EditTemplate/Title/BadCharacterWarning": {
            "title": "$:/language/EditTemplate/Title/BadCharacterWarning",
            "text": "Warning: avoid using any of the characters <<bad-chars>> in tiddler titles"
        },
        "$:/language/EditTemplate/Type/Dropdown/Caption": {
            "title": "$:/language/EditTemplate/Type/Dropdown/Caption",
            "text": "content type list"
        },
        "$:/language/EditTemplate/Type/Dropdown/Hint": {
            "title": "$:/language/EditTemplate/Type/Dropdown/Hint",
            "text": "Show content type list"
        },
        "$:/language/EditTemplate/Type/Delete/Caption": {
            "title": "$:/language/EditTemplate/Type/Delete/Caption",
            "text": "delete content type"
        },
        "$:/language/EditTemplate/Type/Delete/Hint": {
            "title": "$:/language/EditTemplate/Type/Delete/Hint",
            "text": "Delete content type"
        },
        "$:/language/EditTemplate/Type/Placeholder": {
            "title": "$:/language/EditTemplate/Type/Placeholder",
            "text": "content type"
        },
        "$:/language/EditTemplate/Type/Prompt": {
            "title": "$:/language/EditTemplate/Type/Prompt",
            "text": "Type:"
        },
        "$:/language/Exporters/StaticRiver": {
            "title": "$:/language/Exporters/StaticRiver",
            "text": "Static HTML"
        },
        "$:/language/Exporters/JsonFile": {
            "title": "$:/language/Exporters/JsonFile",
            "text": "JSON file"
        },
        "$:/language/Exporters/CsvFile": {
            "title": "$:/language/Exporters/CsvFile",
            "text": "CSV file"
        },
        "$:/language/Exporters/TidFile": {
            "title": "$:/language/Exporters/TidFile",
            "text": "\".tid\" file"
        },
        "$:/language/Docs/Fields/_canonical_uri": {
            "title": "$:/language/Docs/Fields/_canonical_uri",
            "text": "The full URI of an external image tiddler"
        },
        "$:/language/Docs/Fields/bag": {
            "title": "$:/language/Docs/Fields/bag",
            "text": "The name of the bag from which a tiddler came"
        },
        "$:/language/Docs/Fields/caption": {
            "title": "$:/language/Docs/Fields/caption",
            "text": "The text to be displayed on a tab or button"
        },
        "$:/language/Docs/Fields/color": {
            "title": "$:/language/Docs/Fields/color",
            "text": "The CSS color value associated with a tiddler"
        },
        "$:/language/Docs/Fields/component": {
            "title": "$:/language/Docs/Fields/component",
            "text": "The name of the component responsible for an [[alert tiddler|AlertMechanism]]"
        },
        "$:/language/Docs/Fields/current-tiddler": {
            "title": "$:/language/Docs/Fields/current-tiddler",
            "text": "Used to cache the top tiddler in a [[history list|HistoryMechanism]]"
        },
        "$:/language/Docs/Fields/created": {
            "title": "$:/language/Docs/Fields/created",
            "text": "The date a tiddler was created"
        },
        "$:/language/Docs/Fields/creator": {
            "title": "$:/language/Docs/Fields/creator",
            "text": "The name of the person who created a tiddler"
        },
        "$:/language/Docs/Fields/dependents": {
            "title": "$:/language/Docs/Fields/dependents",
            "text": "For a plugin, lists the dependent plugin titles"
        },
        "$:/language/Docs/Fields/description": {
            "title": "$:/language/Docs/Fields/description",
            "text": "The descriptive text for a plugin, or a modal dialogue"
        },
        "$:/language/Docs/Fields/draft.of": {
            "title": "$:/language/Docs/Fields/draft.of",
            "text": "For draft tiddlers, contains the title of the tiddler of which this is a draft"
        },
        "$:/language/Docs/Fields/draft.title": {
            "title": "$:/language/Docs/Fields/draft.title",
            "text": "For draft tiddlers, contains the proposed new title of the tiddler"
        },
        "$:/language/Docs/Fields/footer": {
            "title": "$:/language/Docs/Fields/footer",
            "text": "The footer text for a wizard"
        },
        "$:/language/Docs/Fields/hack-to-give-us-something-to-compare-against": {
            "title": "$:/language/Docs/Fields/hack-to-give-us-something-to-compare-against",
            "text": "A temporary storage field used in [[$:/core/templates/static.content]]"
        },
        "$:/language/Docs/Fields/icon": {
            "title": "$:/language/Docs/Fields/icon",
            "text": "The title of the tiddler containing the icon associated with a tiddler"
        },
        "$:/language/Docs/Fields/library": {
            "title": "$:/language/Docs/Fields/library",
            "text": "If set to \"yes\" indicates that a tiddler should be saved as a JavaScript library"
        },
        "$:/language/Docs/Fields/list": {
            "title": "$:/language/Docs/Fields/list",
            "text": "An ordered list of tiddler titles associated with a tiddler"
        },
        "$:/language/Docs/Fields/list-before": {
            "title": "$:/language/Docs/Fields/list-before",
            "text": "If set, the title of a tiddler before which this tiddler should be added to the ordered list of tiddler titles, or at the start of the list if this field is present but empty"
        },
        "$:/language/Docs/Fields/list-after": {
            "title": "$:/language/Docs/Fields/list-after",
            "text": "If set, the title of the tiddler after which this tiddler should be added to the ordered list of tiddler titles"
        },
        "$:/language/Docs/Fields/modified": {
            "title": "$:/language/Docs/Fields/modified",
            "text": "The date and time at which a tiddler was last modified"
        },
        "$:/language/Docs/Fields/modifier": {
            "title": "$:/language/Docs/Fields/modifier",
            "text": "The tiddler title associated with the person who last modified a tiddler"
        },
        "$:/language/Docs/Fields/name": {
            "title": "$:/language/Docs/Fields/name",
            "text": "The human readable name associated with a plugin tiddler"
        },
        "$:/language/Docs/Fields/plugin-priority": {
            "title": "$:/language/Docs/Fields/plugin-priority",
            "text": "A numerical value indicating the priority of a plugin tiddler"
        },
        "$:/language/Docs/Fields/plugin-type": {
            "title": "$:/language/Docs/Fields/plugin-type",
            "text": "The type of plugin in a plugin tiddler"
        },
        "$:/language/Docs/Fields/revision": {
            "title": "$:/language/Docs/Fields/revision",
            "text": "The revision of the tiddler held at the server"
        },
        "$:/language/Docs/Fields/released": {
            "title": "$:/language/Docs/Fields/released",
            "text": "Date of a TiddlyWiki release"
        },
        "$:/language/Docs/Fields/source": {
            "title": "$:/language/Docs/Fields/source",
            "text": "The source URL associated with a tiddler"
        },
        "$:/language/Docs/Fields/subtitle": {
            "title": "$:/language/Docs/Fields/subtitle",
            "text": "The subtitle text for a wizard"
        },
        "$:/language/Docs/Fields/tags": {
            "title": "$:/language/Docs/Fields/tags",
            "text": "A list of tags associated with a tiddler"
        },
        "$:/language/Docs/Fields/text": {
            "title": "$:/language/Docs/Fields/text",
            "text": "The body text of a tiddler"
        },
        "$:/language/Docs/Fields/title": {
            "title": "$:/language/Docs/Fields/title",
            "text": "The unique name of a tiddler"
        },
        "$:/language/Docs/Fields/type": {
            "title": "$:/language/Docs/Fields/type",
            "text": "The content type of a tiddler"
        },
        "$:/language/Docs/Fields/version": {
            "title": "$:/language/Docs/Fields/version",
            "text": "Version information for a plugin"
        },
        "$:/language/Filters/AllTiddlers": {
            "title": "$:/language/Filters/AllTiddlers",
            "text": "All tiddlers except system tiddlers"
        },
        "$:/language/Filters/RecentSystemTiddlers": {
            "title": "$:/language/Filters/RecentSystemTiddlers",
            "text": "Recently modified tiddlers, including system tiddlers"
        },
        "$:/language/Filters/RecentTiddlers": {
            "title": "$:/language/Filters/RecentTiddlers",
            "text": "Recently modified tiddlers"
        },
        "$:/language/Filters/AllTags": {
            "title": "$:/language/Filters/AllTags",
            "text": "All tags except system tags"
        },
        "$:/language/Filters/Missing": {
            "title": "$:/language/Filters/Missing",
            "text": "Missing tiddlers"
        },
        "$:/language/Filters/Drafts": {
            "title": "$:/language/Filters/Drafts",
            "text": "Draft tiddlers"
        },
        "$:/language/Filters/Orphans": {
            "title": "$:/language/Filters/Orphans",
            "text": "Orphan tiddlers"
        },
        "$:/language/Filters/SystemTiddlers": {
            "title": "$:/language/Filters/SystemTiddlers",
            "text": "System tiddlers"
        },
        "$:/language/Filters/ShadowTiddlers": {
            "title": "$:/language/Filters/ShadowTiddlers",
            "text": "Shadow tiddlers"
        },
        "$:/language/Filters/OverriddenShadowTiddlers": {
            "title": "$:/language/Filters/OverriddenShadowTiddlers",
            "text": "Overridden shadow tiddlers"
        },
        "$:/language/Filters/SystemTags": {
            "title": "$:/language/Filters/SystemTags",
            "text": "System tags"
        },
        "$:/language/Filters/TypedTiddlers": {
            "title": "$:/language/Filters/TypedTiddlers",
            "text": "Non wiki-text tiddlers"
        },
        "GettingStarted": {
            "title": "GettingStarted",
            "text": "\\define lingo-base() $:/language/ControlPanel/Basics/\nWelcome to ~TiddlyWiki and the ~TiddlyWiki community\n\nBefore you start storing important information in ~TiddlyWiki it is important to make sure that you can reliably save changes. See http://tiddlywiki.com/#GettingStarted for details\n\n!! Set up this ~TiddlyWiki\n\n<div class=\"tc-control-panel\">\n\n|<$link to=\"$:/SiteTitle\"><<lingo Title/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteTitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/SiteSubtitle\"><<lingo Subtitle/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteSubtitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/DefaultTiddlers\"><<lingo DefaultTiddlers/Prompt>></$link> |<<lingo DefaultTiddlers/TopHint>><br> <$edit tag=\"textarea\" tiddler=\"$:/DefaultTiddlers\"/><br>//<<lingo DefaultTiddlers/BottomHint>>// |\n</div>\n\nSee the [[control panel|$:/ControlPanel]] for more options.\n"
        },
        "$:/language/Help/build": {
            "title": "$:/language/Help/build",
            "description": "Automatically run configured commands",
            "text": "Build the specified build targets for the current wiki. If no build targets are specified then all available targets will be built.\n\n```\n--build <target> [<target> ...]\n```\n\nBuild targets are defined in the `tiddlywiki.info` file of a wiki folder.\n\n"
        },
        "$:/language/Help/clearpassword": {
            "title": "$:/language/Help/clearpassword",
            "description": "Clear a password for subsequent crypto operations",
            "text": "Clear the password for subsequent crypto operations\n\n```\n--clearpassword\n```\n"
        },
        "$:/language/Help/default": {
            "title": "$:/language/Help/default",
            "text": "\\define commandTitle()\n$:/language/Help/$(command)$\n\\end\n```\nusage: tiddlywiki [<wikifolder>] [--<command> [<args>...]...]\n```\n\nAvailable commands:\n\n<ul>\n<$list filter=\"[commands[]sort[title]]\" variable=\"command\">\n<li><$link to=<<commandTitle>>><$macrocall $name=\"command\" $type=\"text/plain\" $output=\"text/plain\"/></$link>: <$transclude tiddler=<<commandTitle>> field=\"description\"/></li>\n</$list>\n</ul>\n\nTo get detailed help on a command:\n\n```\ntiddlywiki --help <command>\n```\n"
        },
        "$:/language/Help/editions": {
            "title": "$:/language/Help/editions",
            "description": "Lists the available editions of TiddlyWiki",
            "text": "Lists the names and descriptions of the available editions. You can create a new wiki of a specified edition with the `--init` command.\n\n```\n--editions\n```\n"
        },
        "$:/language/Help/help": {
            "title": "$:/language/Help/help",
            "description": "Display help for TiddlyWiki commands",
            "text": "Displays help text for a command:\n\n```\n--help [<command>]\n```\n\nIf the command name is omitted then a list of available commands is displayed.\n"
        },
        "$:/language/Help/init": {
            "title": "$:/language/Help/init",
            "description": "Initialise a new wiki folder",
            "text": "Initialise an empty [[WikiFolder|WikiFolders]] with a copy of the specified edition.\n\n```\n--init <edition> [<edition> ...]\n```\n\nFor example:\n\n```\ntiddlywiki ./MyWikiFolder --init empty\n```\n\nNote:\n\n* The wiki folder directory will be created if necessary\n* The \"edition\" defaults to ''empty''\n* The init command will fail if the wiki folder is not empty\n* The init command removes any `includeWikis` definitions in the edition's `tiddlywiki.info` file\n* When multiple editions are specified, editions initialised later will overwrite any files shared with earlier editions (so, the final `tiddlywiki.info` file will be copied from the last edition)\n* `--editions` returns a list of available editions\n"
        },
        "$:/language/Help/load": {
            "title": "$:/language/Help/load",
            "description": "Load tiddlers from a file",
            "text": "Load tiddlers from 2.x.x TiddlyWiki files (`.html`), `.tiddler`, `.tid`, `.json` or other files\n\n```\n--load <filepath>\n```\n\nTo load tiddlers from an encrypted TiddlyWiki file you should first specify the password with the PasswordCommand. For example:\n\n```\ntiddlywiki ./MyWiki --password pa55w0rd --load my_encrypted_wiki.html\n```\n\nNote that TiddlyWiki will not load an older version of an already loaded plugin.\n"
        },
        "$:/language/Help/makelibrary": {
            "title": "$:/language/Help/makelibrary",
            "description": "Construct library plugin required by upgrade process",
            "text": "Constructs the `$:/UpgradeLibrary` tiddler for the upgrade process.\n\nThe upgrade library is formatted as an ordinary plugin tiddler with the plugin type `library`. It contains a copy of each of the plugins, themes and language packs available within the TiddlyWiki5 repository.\n\nThis command is intended for internal use; it is only relevant to users constructing a custom upgrade procedure.\n\n```\n--makelibrary <title>\n```\n\nThe title argument defaults to `$:/UpgradeLibrary`.\n"
        },
        "$:/language/Help/notfound": {
            "title": "$:/language/Help/notfound",
            "text": "No such help item"
        },
        "$:/language/Help/output": {
            "title": "$:/language/Help/output",
            "description": "Set the base output directory for subsequent commands",
            "text": "Sets the base output directory for subsequent commands. The default output directory is the `output` subdirectory of the edition directory.\n\n```\n--output <pathname>\n```\n\nIf the specified pathname is relative then it is resolved relative to the current working directory. For example `--output .` sets the output directory to the current working directory.\n\n"
        },
        "$:/language/Help/password": {
            "title": "$:/language/Help/password",
            "description": "Set a password for subsequent crypto operations",
            "text": "Set a password for subsequent crypto operations\n\n```\n--password <password>\n```\n\n''Note'': This should not be used for serving TiddlyWiki with password protection. Instead, see the password option under the [[ServerCommand]].\n"
        },
        "$:/language/Help/rendertiddler": {
            "title": "$:/language/Help/rendertiddler",
            "description": "Render an individual tiddler as a specified ContentType",
            "text": "Render an individual tiddler as a specified ContentType, defaulting to `text/html` and save it to the specified filename. Optionally a template can be specified, in which case the template tiddler is rendered with the \"currentTiddler\" variable set to the tiddler that is being rendered (the first parameter value).\n\n```\n--rendertiddler <title> <filename> [<type>] [<template>]\n```\n\nBy default, the filename is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nAny missing directories in the path to the filename are automatically created.\n"
        },
        "$:/language/Help/rendertiddlers": {
            "title": "$:/language/Help/rendertiddlers",
            "description": "Render tiddlers matching a filter to a specified ContentType",
            "text": "Render a set of tiddlers matching a filter to separate files of a specified ContentType (defaults to `text/html`) and extension (defaults to `.html`).\n\n```\n--rendertiddlers <filter> <template> <pathname> [<type>] [<extension>] [\"noclean\"]\n```\n\nFor example:\n\n```\n--rendertiddlers [!is[system]] $:/core/templates/static.tiddler.html ./static text/plain\n```\n\nBy default, the pathname is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nAny files in the target directory are deleted unless the ''noclean'' flag is specified. The target directory is recursively created if it is missing.\n"
        },
        "$:/language/Help/savetiddler": {
            "title": "$:/language/Help/savetiddler",
            "description": "Saves a raw tiddler to a file",
            "text": "Saves an individual tiddler in its raw text or binary format to the specified filename.\n\n```\n--savetiddler <title> <filename>\n```\n\nBy default, the filename is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nAny missing directories in the path to the filename are automatically created.\n"
        },
        "$:/language/Help/savetiddlers": {
            "title": "$:/language/Help/savetiddlers",
            "description": "Saves a group of raw tiddlers to a directory",
            "text": "Saves a group of tiddlers in their raw text or binary format to the specified directory.\n\n```\n--savetiddlers <filter> <pathname> [\"noclean\"]\n```\n\nBy default, the pathname is resolved relative to the `output` subdirectory of the edition directory. The `--output` command can be used to direct output to a different directory.\n\nThe output directory is cleared of existing files before saving the specified files. The deletion can be disabled by specifying the ''noclean'' flag.\n\nAny missing directories in the pathname are automatically created.\n"
        },
        "$:/language/Help/server": {
            "title": "$:/language/Help/server",
            "description": "Provides an HTTP server interface to TiddlyWiki",
            "text": "The server built in to TiddlyWiki5 is very simple. Although compatible with TiddlyWeb it doesn't support many of the features needed for robust Internet-facing usage.\n\nAt the root, it serves a rendering of a specified tiddler. Away from the root, it serves individual tiddlers encoded in JSON, and supports the basic HTTP operations for `GET`, `PUT` and `DELETE`.\n\n```\n--server <port> <roottiddler> <rendertype> <servetype> <username> <password> <host> <pathprefix>\n```\n\nThe parameters are:\n\n* ''port'' - port number to serve from (defaults to \"8080\")\n* ''roottiddler'' - the tiddler to serve at the root (defaults to \"$:/core/save/all\")\n* ''rendertype'' - the content type to which the root tiddler should be rendered (defaults to \"text/plain\")\n* ''servetype'' - the content type with which the root tiddler should be served (defaults to \"text/html\")\n* ''username'' - the default username for signing edits\n* ''password'' - optional password for basic authentication\n* ''host'' - optional hostname to serve from (defaults to \"127.0.0.1\" aka \"localhost\")\n* ''pathprefix'' - optional prefix for paths\n\nIf the password parameter is specified then the browser will prompt the user for the username and password. Note that the password is transmitted in plain text so this implementation isn't suitable for general use.\n\nFor example:\n\n```\n--server 8080 $:/core/save/all text/plain text/html MyUserName passw0rd\n```\n\nThe username and password can be specified as empty strings if you need to set the hostname or pathprefix and don't want to require a password:\n\n```\n--server 8080 $:/core/save/all text/plain text/html \"\" \"\" 192.168.0.245\n```\n\nTo run multiple TiddlyWiki servers at the same time you'll need to put each one on a different port.\n"
        },
        "$:/language/Help/setfield": {
            "title": "$:/language/Help/setfield",
            "description": "Prepares external tiddlers for use",
            "text": "//Note that this command is experimental and may change or be replaced before being finalised//\n\nSets the specified field of a group of tiddlers to the result of wikifying a template tiddler with the `currentTiddler` variable set to the tiddler.\n\n```\n--setfield <filter> <fieldname> <templatetitle> <rendertype>\n```\n\nThe parameters are:\n\n* ''filter'' - filter identifying the tiddlers to be affected\n* ''fieldname'' - the field to modify (defaults to \"text\")\n* ''templatetitle'' - the tiddler to wikify into the specified field. If blank or missing then the specified field is deleted\n* ''rendertype'' - the text type to render (defaults to \"text/plain\"; \"text/html\" can be used to include HTML tags)\n"
        },
        "$:/language/Help/unpackplugin": {
            "title": "$:/language/Help/unpackplugin",
            "description": "Unpack the payload tiddlers from a plugin",
            "text": "Extract the payload tiddlers from a plugin, creating them as ordinary tiddlers:\n\n```\n--unpackplugin <title>\n```\n"
        },
        "$:/language/Help/verbose": {
            "title": "$:/language/Help/verbose",
            "description": "Triggers verbose output mode",
            "text": "Triggers verbose output, useful for debugging\n\n```\n--verbose\n```\n"
        },
        "$:/language/Help/version": {
            "title": "$:/language/Help/version",
            "description": "Displays the version number of TiddlyWiki",
            "text": "Displays the version number of TiddlyWiki.\n\n```\n--version\n```\n"
        },
        "$:/languages/en-GB/icon": {
            "title": "$:/languages/en-GB/icon",
            "type": "image/svg+xml",
            "text": "<svg xmlns=\"http://www.w3.org/2000/svg\" viewBox=\"0 0 60 30\" width=\"1200\" height=\"600\">\n<clipPath id=\"t\">\n\t<path d=\"M30,15 h30 v15 z v15 h-30 z h-30 v-15 z v-15 h30 z\"/>\n</clipPath>\n<path d=\"M0,0 v30 h60 v-30 z\" fill=\"#00247d\"/>\n<path d=\"M0,0 L60,30 M60,0 L0,30\" stroke=\"#fff\" stroke-width=\"6\"/>\n<path d=\"M0,0 L60,30 M60,0 L0,30\" clip-path=\"url(#t)\" stroke=\"#cf142b\" stroke-width=\"4\"/>\n<path d=\"M30,0 v30 M0,15 h60\" stroke=\"#fff\" stroke-width=\"10\"/>\n<path d=\"M30,0 v30 M0,15 h60\" stroke=\"#cf142b\" stroke-width=\"6\"/>\n</svg>\n"
        },
        "$:/language/Import/Imported/Hint": {
            "title": "$:/language/Import/Imported/Hint",
            "text": "The following tiddlers were imported:"
        },
        "$:/language/Import/Listing/Cancel/Caption": {
            "title": "$:/language/Import/Listing/Cancel/Caption",
            "text": "Cancel"
        },
        "$:/language/Import/Listing/Hint": {
            "title": "$:/language/Import/Listing/Hint",
            "text": "These tiddlers are ready to import:"
        },
        "$:/language/Import/Listing/Import/Caption": {
            "title": "$:/language/Import/Listing/Import/Caption",
            "text": "Import"
        },
        "$:/language/Import/Listing/Select/Caption": {
            "title": "$:/language/Import/Listing/Select/Caption",
            "text": "Select"
        },
        "$:/language/Import/Listing/Status/Caption": {
            "title": "$:/language/Import/Listing/Status/Caption",
            "text": "Status"
        },
        "$:/language/Import/Listing/Title/Caption": {
            "title": "$:/language/Import/Listing/Title/Caption",
            "text": "Title"
        },
        "$:/language/Import/Upgrader/Plugins/Suppressed/Incompatible": {
            "title": "$:/language/Import/Upgrader/Plugins/Suppressed/Incompatible",
            "text": "Blocked incompatible or obsolete plugin"
        },
        "$:/language/Import/Upgrader/Plugins/Suppressed/Version": {
            "title": "$:/language/Import/Upgrader/Plugins/Suppressed/Version",
            "text": "Blocked plugin (due to incoming <<incoming>> being older than existing <<existing>>)"
        },
        "$:/language/Import/Upgrader/Plugins/Upgraded": {
            "title": "$:/language/Import/Upgrader/Plugins/Upgraded",
            "text": "Upgraded plugin from <<incoming>> to <<upgraded>>"
        },
        "$:/language/Import/Upgrader/State/Suppressed": {
            "title": "$:/language/Import/Upgrader/State/Suppressed",
            "text": "Blocked temporary state tiddler"
        },
        "$:/language/Import/Upgrader/System/Suppressed": {
            "title": "$:/language/Import/Upgrader/System/Suppressed",
            "text": "Blocked system tiddler"
        },
        "$:/language/Import/Upgrader/ThemeTweaks/Created": {
            "title": "$:/language/Import/Upgrader/ThemeTweaks/Created",
            "text": "Migrated theme tweak from <$text text=<<from>>/>"
        },
        "$:/language/AboveStory/ClassicPlugin/Warning": {
            "title": "$:/language/AboveStory/ClassicPlugin/Warning",
            "text": "It looks like you are trying to load a plugin designed for ~TiddlyWiki Classic. Please note that [[these plugins do not work with TiddlyWiki version 5.x.x|http://tiddlywiki.com/#TiddlyWikiClassic]]. ~TiddlyWiki Classic plugins detected:"
        },
        "$:/language/BinaryWarning/Prompt": {
            "title": "$:/language/BinaryWarning/Prompt",
            "text": "This tiddler contains binary data"
        },
        "$:/language/ClassicWarning/Hint": {
            "title": "$:/language/ClassicWarning/Hint",
            "text": "This tiddler is written in TiddlyWiki Classic wiki text format, which is not fully compatible with TiddlyWiki version 5. See http://tiddlywiki.com/static/Upgrading.html for more details."
        },
        "$:/language/ClassicWarning/Upgrade/Caption": {
            "title": "$:/language/ClassicWarning/Upgrade/Caption",
            "text": "upgrade"
        },
        "$:/language/CloseAll/Button": {
            "title": "$:/language/CloseAll/Button",
            "text": "close all"
        },
        "$:/language/ColourPicker/Recent": {
            "title": "$:/language/ColourPicker/Recent",
            "text": "Recent:"
        },
        "$:/language/ConfirmCancelTiddler": {
            "title": "$:/language/ConfirmCancelTiddler",
            "text": "Do you wish to discard changes to the tiddler \"<$text text=<<title>>/>\"?"
        },
        "$:/language/ConfirmDeleteTiddler": {
            "title": "$:/language/ConfirmDeleteTiddler",
            "text": "Do you wish to delete the tiddler \"<$text text=<<title>>/>\"?"
        },
        "$:/language/ConfirmOverwriteTiddler": {
            "title": "$:/language/ConfirmOverwriteTiddler",
            "text": "Do you wish to overwrite the tiddler \"<$text text=<<title>>/>\"?"
        },
        "$:/language/ConfirmEditShadowTiddler": {
            "title": "$:/language/ConfirmEditShadowTiddler",
            "text": "You are about to edit a ShadowTiddler. Any changes will override the default system making future upgrades non-trivial. Are you sure you want to edit \"<$text text=<<title>>/>\"?"
        },
        "$:/language/Count": {
            "title": "$:/language/Count",
            "text": "count"
        },
        "$:/language/DefaultNewTiddlerTitle": {
            "title": "$:/language/DefaultNewTiddlerTitle",
            "text": "New Tiddler"
        },
        "$:/language/DropMessage": {
            "title": "$:/language/DropMessage",
            "text": "Drop here (or use the 'Escape' key to cancel)"
        },
        "$:/language/Encryption/Cancel": {
            "title": "$:/language/Encryption/Cancel",
            "text": "Cancel"
        },
        "$:/language/Encryption/ConfirmClearPassword": {
            "title": "$:/language/Encryption/ConfirmClearPassword",
            "text": "Do you wish to clear the password? This will remove the encryption applied when saving this wiki"
        },
        "$:/language/Encryption/PromptSetPassword": {
            "title": "$:/language/Encryption/PromptSetPassword",
            "text": "Set a new password for this TiddlyWiki"
        },
        "$:/language/Encryption/Username": {
            "title": "$:/language/Encryption/Username",
            "text": "Username"
        },
        "$:/language/Encryption/Password": {
            "title": "$:/language/Encryption/Password",
            "text": "Password"
        },
        "$:/language/Encryption/RepeatPassword": {
            "title": "$:/language/Encryption/RepeatPassword",
            "text": "Repeat password"
        },
        "$:/language/Encryption/PasswordNoMatch": {
            "title": "$:/language/Encryption/PasswordNoMatch",
            "text": "Passwords do not match"
        },
        "$:/language/Encryption/SetPassword": {
            "title": "$:/language/Encryption/SetPassword",
            "text": "Set password"
        },
        "$:/language/Error/Caption": {
            "title": "$:/language/Error/Caption",
            "text": "Error"
        },
        "$:/language/Error/Filter": {
            "title": "$:/language/Error/Filter",
            "text": "Filter error"
        },
        "$:/language/Error/FilterSyntax": {
            "title": "$:/language/Error/FilterSyntax",
            "text": "Syntax error in filter expression"
        },
        "$:/language/Error/IsFilterOperator": {
            "title": "$:/language/Error/IsFilterOperator",
            "text": "Filter Error: Unknown operand for the 'is' filter operator"
        },
        "$:/language/Error/LoadingPluginLibrary": {
            "title": "$:/language/Error/LoadingPluginLibrary",
            "text": "Error loading plugin library"
        },
        "$:/language/Error/RecursiveTransclusion": {
            "title": "$:/language/Error/RecursiveTransclusion",
            "text": "Recursive transclusion error in transclude widget"
        },
        "$:/language/Error/RetrievingSkinny": {
            "title": "$:/language/Error/RetrievingSkinny",
            "text": "Error retrieving skinny tiddler list"
        },
        "$:/language/Error/SavingToTWEdit": {
            "title": "$:/language/Error/SavingToTWEdit",
            "text": "Error saving to TWEdit"
        },
        "$:/language/Error/WhileSaving": {
            "title": "$:/language/Error/WhileSaving",
            "text": "Error while saving"
        },
        "$:/language/Error/XMLHttpRequest": {
            "title": "$:/language/Error/XMLHttpRequest",
            "text": "XMLHttpRequest error code"
        },
        "$:/language/InternalJavaScriptError/Title": {
            "title": "$:/language/InternalJavaScriptError/Title",
            "text": "Internal JavaScript Error"
        },
        "$:/language/InternalJavaScriptError/Hint": {
            "title": "$:/language/InternalJavaScriptError/Hint",
            "text": "Well, this is embarrassing. It is recommended that you restart TiddlyWiki by refreshing your browser"
        },
        "$:/language/InvalidFieldName": {
            "title": "$:/language/InvalidFieldName",
            "text": "Illegal characters in field name \"<$text text=<<fieldName>>/>\". Fields can only contain lowercase letters, digits and the characters underscore (`_`), hyphen (`-`) and period (`.`)"
        },
        "$:/language/LazyLoadingWarning": {
            "title": "$:/language/LazyLoadingWarning",
            "text": "<p>Loading external text from ''<$text text={{!!_canonical_uri}}/>''</p><p>If this message doesn't disappear you may be using a browser that doesn't support external text in this configuration. See http://tiddlywiki.com/#ExternalText</p>"
        },
        "$:/language/LoginToTiddlySpace": {
            "title": "$:/language/LoginToTiddlySpace",
            "text": "Login to TiddlySpace"
        },
        "$:/language/MissingTiddler/Hint": {
            "title": "$:/language/MissingTiddler/Hint",
            "text": "Missing tiddler \"<$text text=<<currentTiddler>>/>\" - click {{$:/core/images/edit-button}} to create"
        },
        "$:/language/No": {
            "title": "$:/language/No",
            "text": "No"
        },
        "$:/language/OfficialPluginLibrary": {
            "title": "$:/language/OfficialPluginLibrary",
            "text": "Official ~TiddlyWiki Plugin Library"
        },
        "$:/language/OfficialPluginLibrary/Hint": {
            "title": "$:/language/OfficialPluginLibrary/Hint",
            "text": "The official ~TiddlyWiki plugin library at tiddlywiki.com. Plugins, themes and language packs are maintained by the core team."
        },
        "$:/language/PluginReloadWarning": {
            "title": "$:/language/PluginReloadWarning",
            "text": "Please save {{$:/core/ui/Buttons/save-wiki}} and reload {{$:/core/ui/Buttons/refresh}} to allow changes to plugins to take effect"
        },
        "$:/language/RecentChanges/DateFormat": {
            "title": "$:/language/RecentChanges/DateFormat",
            "text": "DDth MMM YYYY"
        },
        "$:/language/SystemTiddler/Tooltip": {
            "title": "$:/language/SystemTiddler/Tooltip",
            "text": "This is a system tiddler"
        },
        "$:/language/TagManager/Colour/Heading": {
            "title": "$:/language/TagManager/Colour/Heading",
            "text": "Colour"
        },
        "$:/language/TagManager/Count/Heading": {
            "title": "$:/language/TagManager/Count/Heading",
            "text": "Count"
        },
        "$:/language/TagManager/Icon/Heading": {
            "title": "$:/language/TagManager/Icon/Heading",
            "text": "Icon"
        },
        "$:/language/TagManager/Info/Heading": {
            "title": "$:/language/TagManager/Info/Heading",
            "text": "Info"
        },
        "$:/language/TagManager/Tag/Heading": {
            "title": "$:/language/TagManager/Tag/Heading",
            "text": "Tag"
        },
        "$:/language/Tiddler/DateFormat": {
            "title": "$:/language/Tiddler/DateFormat",
            "text": "DDth MMM YYYY at hh12:0mmam"
        },
        "$:/language/UnsavedChangesWarning": {
            "title": "$:/language/UnsavedChangesWarning",
            "text": "You have unsaved changes in TiddlyWiki"
        },
        "$:/language/Yes": {
            "title": "$:/language/Yes",
            "text": "Yes"
        },
        "$:/language/Modals/Download": {
            "title": "$:/language/Modals/Download",
            "type": "text/vnd.tiddlywiki",
            "subtitle": "Download changes",
            "footer": "<$button message=\"tm-close-tiddler\">Close</$button>",
            "help": "http://tiddlywiki.com/static/DownloadingChanges.html",
            "text": "Your browser only supports manual saving.\n\nTo save your modified wiki, right click on the download link below and select \"Download file\" or \"Save file\", and then choose the folder and filename.\n\n//You can marginally speed things up by clicking the link with the control key (Windows) or the options/alt key (Mac OS X). You will not be prompted for the folder or filename, but your browser is likely to give it an unrecognisable name -- you may need to rename the file to include an `.html` extension before you can do anything useful with it.//\n\nOn smartphones that do not allow files to be downloaded you can instead bookmark the link, and then sync your bookmarks to a desktop computer from where the wiki can be saved normally.\n"
        },
        "$:/language/Modals/SaveInstructions": {
            "title": "$:/language/Modals/SaveInstructions",
            "type": "text/vnd.tiddlywiki",
            "subtitle": "Save your work",
            "footer": "<$button message=\"tm-close-tiddler\">Close</$button>",
            "help": "http://tiddlywiki.com/static/SavingChanges.html",
            "text": "Your changes to this wiki need to be saved as a ~TiddlyWiki HTML file.\n\n!!! Desktop browsers\n\n# Select ''Save As'' from the ''File'' menu\n# Choose a filename and location\n#* Some browsers also require you to explicitly specify the file saving format as ''Webpage, HTML only'' or similar\n# Close this tab\n\n!!! Smartphone browsers\n\n# Create a bookmark to this page\n#* If you've got iCloud or Google Sync set up then the bookmark will automatically sync to your desktop where you can open it and save it as above\n# Close this tab\n\n//If you open the bookmark again in Mobile Safari you will see this message again. If you want to go ahead and use the file, just click the ''close'' button below//\n"
        },
        "$:/config/NewJournal/Title": {
            "title": "$:/config/NewJournal/Title",
            "text": "DDth MMM YYYY"
        },
        "$:/config/NewJournal/Tags": {
            "title": "$:/config/NewJournal/Tags",
            "text": "Journal"
        },
        "$:/language/Notifications/Save/Done": {
            "title": "$:/language/Notifications/Save/Done",
            "text": "Saved wiki"
        },
        "$:/language/Notifications/Save/Starting": {
            "title": "$:/language/Notifications/Save/Starting",
            "text": "Starting to save wiki"
        },
        "$:/language/Search/DefaultResults/Caption": {
            "title": "$:/language/Search/DefaultResults/Caption",
            "text": "List"
        },
        "$:/language/Search/Filter/Caption": {
            "title": "$:/language/Search/Filter/Caption",
            "text": "Filter"
        },
        "$:/language/Search/Filter/Hint": {
            "title": "$:/language/Search/Filter/Hint",
            "text": "Search via a [[filter expression|http://tiddlywiki.com/static/Filters.html]]"
        },
        "$:/language/Search/Filter/Matches": {
            "title": "$:/language/Search/Filter/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/Matches": {
            "title": "$:/language/Search/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/Matches/All": {
            "title": "$:/language/Search/Matches/All",
            "text": "All matches:"
        },
        "$:/language/Search/Matches/Title": {
            "title": "$:/language/Search/Matches/Title",
            "text": "Title matches:"
        },
        "$:/language/Search/Search": {
            "title": "$:/language/Search/Search",
            "text": "Search"
        },
        "$:/language/Search/Shadows/Caption": {
            "title": "$:/language/Search/Shadows/Caption",
            "text": "Shadows"
        },
        "$:/language/Search/Shadows/Hint": {
            "title": "$:/language/Search/Shadows/Hint",
            "text": "Search for shadow tiddlers"
        },
        "$:/language/Search/Shadows/Matches": {
            "title": "$:/language/Search/Shadows/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/Standard/Caption": {
            "title": "$:/language/Search/Standard/Caption",
            "text": "Standard"
        },
        "$:/language/Search/Standard/Hint": {
            "title": "$:/language/Search/Standard/Hint",
            "text": "Search for standard tiddlers"
        },
        "$:/language/Search/Standard/Matches": {
            "title": "$:/language/Search/Standard/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/Search/System/Caption": {
            "title": "$:/language/Search/System/Caption",
            "text": "System"
        },
        "$:/language/Search/System/Hint": {
            "title": "$:/language/Search/System/Hint",
            "text": "Search for system tiddlers"
        },
        "$:/language/Search/System/Matches": {
            "title": "$:/language/Search/System/Matches",
            "text": "//<small><<resultCount>> matches</small>//"
        },
        "$:/language/SideBar/All/Caption": {
            "title": "$:/language/SideBar/All/Caption",
            "text": "All"
        },
        "$:/language/SideBar/Contents/Caption": {
            "title": "$:/language/SideBar/Contents/Caption",
            "text": "Contents"
        },
        "$:/language/SideBar/Drafts/Caption": {
            "title": "$:/language/SideBar/Drafts/Caption",
            "text": "Drafts"
        },
        "$:/language/SideBar/Missing/Caption": {
            "title": "$:/language/SideBar/Missing/Caption",
            "text": "Missing"
        },
        "$:/language/SideBar/More/Caption": {
            "title": "$:/language/SideBar/More/Caption",
            "text": "More"
        },
        "$:/language/SideBar/Open/Caption": {
            "title": "$:/language/SideBar/Open/Caption",
            "text": "Open"
        },
        "$:/language/SideBar/Orphans/Caption": {
            "title": "$:/language/SideBar/Orphans/Caption",
            "text": "Orphans"
        },
        "$:/language/SideBar/Recent/Caption": {
            "title": "$:/language/SideBar/Recent/Caption",
            "text": "Recent"
        },
        "$:/language/SideBar/Shadows/Caption": {
            "title": "$:/language/SideBar/Shadows/Caption",
            "text": "Shadows"
        },
        "$:/language/SideBar/System/Caption": {
            "title": "$:/language/SideBar/System/Caption",
            "text": "System"
        },
        "$:/language/SideBar/Tags/Caption": {
            "title": "$:/language/SideBar/Tags/Caption",
            "text": "Tags"
        },
        "$:/language/SideBar/Tags/Untagged/Caption": {
            "title": "$:/language/SideBar/Tags/Untagged/Caption",
            "text": "untagged"
        },
        "$:/language/SideBar/Tools/Caption": {
            "title": "$:/language/SideBar/Tools/Caption",
            "text": "Tools"
        },
        "$:/language/SideBar/Types/Caption": {
            "title": "$:/language/SideBar/Types/Caption",
            "text": "Types"
        },
        "$:/SiteSubtitle": {
            "title": "$:/SiteSubtitle",
            "text": "a non-linear personal web notebook"
        },
        "$:/SiteTitle": {
            "title": "$:/SiteTitle",
            "text": "My ~TiddlyWiki"
        },
        "$:/language/Snippets/ListByTag": {
            "title": "$:/language/Snippets/ListByTag",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "List of tiddlers by tag",
            "text": "<<list-links \"[tag[task]sort[title]]\">>\n"
        },
        "$:/language/Snippets/MacroDefinition": {
            "title": "$:/language/Snippets/MacroDefinition",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "Macro definition",
            "text": "\\define macroName(param1:\"default value\",param2)\nText of the macro\n\\end\n"
        },
        "$:/language/Snippets/Table4x3": {
            "title": "$:/language/Snippets/Table4x3",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "Table with 4 columns by 3 rows",
            "text": "|! |!Alpha |!Beta |!Gamma |!Delta |\n|!One | | | | |\n|!Two | | | | |\n|!Three | | | | |\n"
        },
        "$:/language/Snippets/TableOfContents": {
            "title": "$:/language/Snippets/TableOfContents",
            "tags": "$:/tags/TextEditor/Snippet",
            "caption": "Table of Contents",
            "text": "<div class=\"tc-table-of-contents\">\n\n<<toc-selective-expandable 'TableOfContents'>>\n\n</div>"
        },
        "$:/language/ThemeTweaks/ThemeTweaks": {
            "title": "$:/language/ThemeTweaks/ThemeTweaks",
            "text": "Theme Tweaks"
        },
        "$:/language/ThemeTweaks/ThemeTweaks/Hint": {
            "title": "$:/language/ThemeTweaks/ThemeTweaks/Hint",
            "text": "You can tweak certain aspects of the ''Vanilla'' theme."
        },
        "$:/language/ThemeTweaks/Options": {
            "title": "$:/language/ThemeTweaks/Options",
            "text": "Options"
        },
        "$:/language/ThemeTweaks/Options/SidebarLayout": {
            "title": "$:/language/ThemeTweaks/Options/SidebarLayout",
            "text": "Sidebar layout"
        },
        "$:/language/ThemeTweaks/Options/SidebarLayout/Fixed-Fluid": {
            "title": "$:/language/ThemeTweaks/Options/SidebarLayout/Fixed-Fluid",
            "text": "Fixed story, fluid sidebar"
        },
        "$:/language/ThemeTweaks/Options/SidebarLayout/Fluid-Fixed": {
            "title": "$:/language/ThemeTweaks/Options/SidebarLayout/Fluid-Fixed",
            "text": "Fluid story, fixed sidebar"
        },
        "$:/language/ThemeTweaks/Options/StickyTitles": {
            "title": "$:/language/ThemeTweaks/Options/StickyTitles",
            "text": "Sticky titles"
        },
        "$:/language/ThemeTweaks/Options/StickyTitles/Hint": {
            "title": "$:/language/ThemeTweaks/Options/StickyTitles/Hint",
            "text": "Causes tiddler titles to \"stick\" to the top of the browser window. Caution: Does not work at all with Chrome, and causes some layout issues in Firefox"
        },
        "$:/language/ThemeTweaks/Options/CodeWrapping": {
            "title": "$:/language/ThemeTweaks/Options/CodeWrapping",
            "text": "Wrap long lines in code blocks"
        },
        "$:/language/ThemeTweaks/Settings": {
            "title": "$:/language/ThemeTweaks/Settings",
            "text": "Settings"
        },
        "$:/language/ThemeTweaks/Settings/FontFamily": {
            "title": "$:/language/ThemeTweaks/Settings/FontFamily",
            "text": "Font family"
        },
        "$:/language/ThemeTweaks/Settings/CodeFontFamily": {
            "title": "$:/language/ThemeTweaks/Settings/CodeFontFamily",
            "text": "Code font family"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImage": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImage",
            "text": "Page background image"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment",
            "text": "Page background image attachment"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Scroll": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Scroll",
            "text": "Scroll with tiddlers"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Fixed": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageAttachment/Fixed",
            "text": "Fixed to window"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize",
            "text": "Page background image size"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Auto": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Auto",
            "text": "Auto"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Cover": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Cover",
            "text": "Cover"
        },
        "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Contain": {
            "title": "$:/language/ThemeTweaks/Settings/BackgroundImageSize/Contain",
            "text": "Contain"
        },
        "$:/language/ThemeTweaks/Metrics": {
            "title": "$:/language/ThemeTweaks/Metrics",
            "text": "Sizes"
        },
        "$:/language/ThemeTweaks/Metrics/FontSize": {
            "title": "$:/language/ThemeTweaks/Metrics/FontSize",
            "text": "Font size"
        },
        "$:/language/ThemeTweaks/Metrics/LineHeight": {
            "title": "$:/language/ThemeTweaks/Metrics/LineHeight",
            "text": "Line height"
        },
        "$:/language/ThemeTweaks/Metrics/BodyFontSize": {
            "title": "$:/language/ThemeTweaks/Metrics/BodyFontSize",
            "text": "Font size for tiddler body"
        },
        "$:/language/ThemeTweaks/Metrics/BodyLineHeight": {
            "title": "$:/language/ThemeTweaks/Metrics/BodyLineHeight",
            "text": "Line height for tiddler body"
        },
        "$:/language/ThemeTweaks/Metrics/StoryLeft": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryLeft",
            "text": "Story left position"
        },
        "$:/language/ThemeTweaks/Metrics/StoryLeft/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryLeft/Hint",
            "text": "how far the left margin of the story river<br>(tiddler area) is from the left of the page"
        },
        "$:/language/ThemeTweaks/Metrics/StoryTop": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryTop",
            "text": "Story top position"
        },
        "$:/language/ThemeTweaks/Metrics/StoryTop/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryTop/Hint",
            "text": "how far the top margin of the story river<br>is from the top of the page"
        },
        "$:/language/ThemeTweaks/Metrics/StoryRight": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryRight",
            "text": "Story right"
        },
        "$:/language/ThemeTweaks/Metrics/StoryRight/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryRight/Hint",
            "text": "how far the left margin of the sidebar <br>is from the left of the page"
        },
        "$:/language/ThemeTweaks/Metrics/StoryWidth": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryWidth",
            "text": "Story width"
        },
        "$:/language/ThemeTweaks/Metrics/StoryWidth/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/StoryWidth/Hint",
            "text": "the overall width of the story river"
        },
        "$:/language/ThemeTweaks/Metrics/TiddlerWidth": {
            "title": "$:/language/ThemeTweaks/Metrics/TiddlerWidth",
            "text": "Tiddler width"
        },
        "$:/language/ThemeTweaks/Metrics/TiddlerWidth/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/TiddlerWidth/Hint",
            "text": "within the story river"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint",
            "text": "Sidebar breakpoint"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarBreakpoint/Hint",
            "text": "the minimum page width at which the story<br>river and sidebar will appear side by side"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarWidth": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarWidth",
            "text": "Sidebar width"
        },
        "$:/language/ThemeTweaks/Metrics/SidebarWidth/Hint": {
            "title": "$:/language/ThemeTweaks/Metrics/SidebarWidth/Hint",
            "text": "the width of the sidebar in fluid-fixed layout"
        },
        "$:/language/TiddlerInfo/Advanced/Caption": {
            "title": "$:/language/TiddlerInfo/Advanced/Caption",
            "text": "Advanced"
        },
        "$:/language/TiddlerInfo/Advanced/PluginInfo/Empty/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/PluginInfo/Empty/Hint",
            "text": "none"
        },
        "$:/language/TiddlerInfo/Advanced/PluginInfo/Heading": {
            "title": "$:/language/TiddlerInfo/Advanced/PluginInfo/Heading",
            "text": "Plugin Details"
        },
        "$:/language/TiddlerInfo/Advanced/PluginInfo/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/PluginInfo/Hint",
            "text": "This plugin contains the following shadow tiddlers:"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/Heading": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/Heading",
            "text": "Shadow Status"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/NotShadow/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/NotShadow/Hint",
            "text": "The tiddler <$link to=<<infoTiddler>>><$text text=<<infoTiddler>>/></$link> is not a shadow tiddler"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Hint",
            "text": "The tiddler <$link to=<<infoTiddler>>><$text text=<<infoTiddler>>/></$link> is a shadow tiddler"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Source": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/Shadow/Source",
            "text": "It is defined in the plugin <$link to=<<pluginTiddler>>><$text text=<<pluginTiddler>>/></$link>"
        },
        "$:/language/TiddlerInfo/Advanced/ShadowInfo/OverriddenShadow/Hint": {
            "title": "$:/language/TiddlerInfo/Advanced/ShadowInfo/OverriddenShadow/Hint",
            "text": "It is overridden by an ordinary tiddler"
        },
        "$:/language/TiddlerInfo/Fields/Caption": {
            "title": "$:/language/TiddlerInfo/Fields/Caption",
            "text": "Fields"
        },
        "$:/language/TiddlerInfo/List/Caption": {
            "title": "$:/language/TiddlerInfo/List/Caption",
            "text": "List"
        },
        "$:/language/TiddlerInfo/List/Empty": {
            "title": "$:/language/TiddlerInfo/List/Empty",
            "text": "This tiddler does not have a list"
        },
        "$:/language/TiddlerInfo/Listed/Caption": {
            "title": "$:/language/TiddlerInfo/Listed/Caption",
            "text": "Listed"
        },
        "$:/language/TiddlerInfo/Listed/Empty": {
            "title": "$:/language/TiddlerInfo/Listed/Empty",
            "text": "This tiddler is not listed by any others"
        },
        "$:/language/TiddlerInfo/References/Caption": {
            "title": "$:/language/TiddlerInfo/References/Caption",
            "text": "References"
        },
        "$:/language/TiddlerInfo/References/Empty": {
            "title": "$:/language/TiddlerInfo/References/Empty",
            "text": "No tiddlers link to this one"
        },
        "$:/language/TiddlerInfo/Tagging/Caption": {
            "title": "$:/language/TiddlerInfo/Tagging/Caption",
            "text": "Tagging"
        },
        "$:/language/TiddlerInfo/Tagging/Empty": {
            "title": "$:/language/TiddlerInfo/Tagging/Empty",
            "text": "No tiddlers are tagged with this one"
        },
        "$:/language/TiddlerInfo/Tools/Caption": {
            "title": "$:/language/TiddlerInfo/Tools/Caption",
            "text": "Tools"
        },
        "$:/language/Docs/Types/application/javascript": {
            "title": "$:/language/Docs/Types/application/javascript",
            "description": "JavaScript code",
            "name": "application/javascript",
            "group": "Developer"
        },
        "$:/language/Docs/Types/application/json": {
            "title": "$:/language/Docs/Types/application/json",
            "description": "JSON data",
            "name": "application/json",
            "group": "Developer"
        },
        "$:/language/Docs/Types/application/x-tiddler-dictionary": {
            "title": "$:/language/Docs/Types/application/x-tiddler-dictionary",
            "description": "Data dictionary",
            "name": "application/x-tiddler-dictionary",
            "group": "Developer"
        },
        "$:/language/Docs/Types/image/gif": {
            "title": "$:/language/Docs/Types/image/gif",
            "description": "GIF image",
            "name": "image/gif",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/jpeg": {
            "title": "$:/language/Docs/Types/image/jpeg",
            "description": "JPEG image",
            "name": "image/jpeg",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/png": {
            "title": "$:/language/Docs/Types/image/png",
            "description": "PNG image",
            "name": "image/png",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/svg+xml": {
            "title": "$:/language/Docs/Types/image/svg+xml",
            "description": "Structured Vector Graphics image",
            "name": "image/svg+xml",
            "group": "Image"
        },
        "$:/language/Docs/Types/image/x-icon": {
            "title": "$:/language/Docs/Types/image/x-icon",
            "description": "ICO format icon file",
            "name": "image/x-icon",
            "group": "Image"
        },
        "$:/language/Docs/Types/text/css": {
            "title": "$:/language/Docs/Types/text/css",
            "description": "Static stylesheet",
            "name": "text/css",
            "group": "Developer"
        },
        "$:/language/Docs/Types/text/html": {
            "title": "$:/language/Docs/Types/text/html",
            "description": "HTML markup",
            "name": "text/html",
            "group": "Text"
        },
        "$:/language/Docs/Types/text/plain": {
            "title": "$:/language/Docs/Types/text/plain",
            "description": "Plain text",
            "name": "text/plain",
            "group": "Text"
        },
        "$:/language/Docs/Types/text/vnd.tiddlywiki": {
            "title": "$:/language/Docs/Types/text/vnd.tiddlywiki",
            "description": "TiddlyWiki 5",
            "name": "text/vnd.tiddlywiki",
            "group": "Text"
        },
        "$:/language/Docs/Types/text/x-tiddlywiki": {
            "title": "$:/language/Docs/Types/text/x-tiddlywiki",
            "description": "TiddlyWiki Classic",
            "name": "text/x-tiddlywiki",
            "group": "Text"
        },
        "$:/languages/en-GB": {
            "title": "$:/languages/en-GB",
            "name": "en-GB",
            "description": "English (British)",
            "author": "JeremyRuston",
            "core-version": ">=5.0.0\"",
            "text": "Stub pseudo-plugin for the default language"
        },
        "$:/core/modules/commander.js": {
            "text": "/*\\\ntitle: $:/core/modules/commander.js\ntype: application/javascript\nmodule-type: global\n\nThe $tw.Commander class is a command interpreter\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nParse a sequence of commands\n\tcommandTokens: an array of command string tokens\n\twiki: reference to the wiki store object\n\tstreams: {output:, error:}, each of which has a write(string) method\n\tcallback: a callback invoked as callback(err) where err is null if there was no error\n*/\nvar Commander = function(commandTokens,callback,wiki,streams) {\n\tvar path = require(\"path\");\n\tthis.commandTokens = commandTokens;\n\tthis.nextToken = 0;\n\tthis.callback = callback;\n\tthis.wiki = wiki;\n\tthis.streams = streams;\n\tthis.outputPath = path.resolve($tw.boot.wikiPath,$tw.config.wikiOutputSubDir);\n};\n\n/*\nAdd a string of tokens to the command queue\n*/\nCommander.prototype.addCommandTokens = function(commandTokens) {\n\tvar params = commandTokens.slice(0);\n\tparams.unshift(0);\n\tparams.unshift(this.nextToken);\n\tArray.prototype.splice.apply(this.commandTokens,params);\n};\n\n/*\nExecute the sequence of commands and invoke a callback on completion\n*/\nCommander.prototype.execute = function() {\n\tthis.executeNextCommand();\n};\n\n/*\nExecute the next command in the sequence\n*/\nCommander.prototype.executeNextCommand = function() {\n\tvar self = this;\n\t// Invoke the callback if there are no more commands\n\tif(this.nextToken >= this.commandTokens.length) {\n\t\tthis.callback(null);\n\t} else {\n\t\t// Get and check the command token\n\t\tvar commandName = this.commandTokens[this.nextToken++];\n\t\tif(commandName.substr(0,2) !== \"--\") {\n\t\t\tthis.callback(\"Missing command: \" + commandName);\n\t\t} else {\n\t\t\tcommandName = commandName.substr(2); // Trim off the --\n\t\t\t// Accumulate the parameters to the command\n\t\t\tvar params = [];\n\t\t\twhile(this.nextToken < this.commandTokens.length && \n\t\t\t\tthis.commandTokens[this.nextToken].substr(0,2) !== \"--\") {\n\t\t\t\tparams.push(this.commandTokens[this.nextToken++]);\n\t\t\t}\n\t\t\t// Get the command info\n\t\t\tvar command = $tw.commands[commandName],\n\t\t\t\tc,err;\n\t\t\tif(!command) {\n\t\t\t\tthis.callback(\"Unknown command: \" + commandName);\n\t\t\t} else {\n\t\t\t\tif(this.verbose) {\n\t\t\t\t\tthis.streams.output.write(\"Executing command: \" + commandName + \" \" + params.join(\" \") + \"\\n\");\n\t\t\t\t}\n\t\t\t\tif(command.info.synchronous) {\n\t\t\t\t\t// Synchronous command\n\t\t\t\t\tc = new command.Command(params,this);\n\t\t\t\t\terr = c.execute();\n\t\t\t\t\tif(err) {\n\t\t\t\t\t\tthis.callback(err);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis.executeNextCommand();\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Asynchronous command\n\t\t\t\t\tc = new command.Command(params,this,function(err) {\n\t\t\t\t\t\tif(err) {\n\t\t\t\t\t\t\tself.callback(err);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tself.executeNextCommand();\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\terr = c.execute();\n\t\t\t\t\tif(err) {\n\t\t\t\t\t\tthis.callback(err);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n};\n\nCommander.initCommands = function(moduleType) {\n\tmoduleType = moduleType || \"command\";\n\t$tw.commands = {};\n\t$tw.modules.forEachModuleOfType(moduleType,function(title,module) {\n\t\tvar c = $tw.commands[module.info.name] = {};\n\t\t// Add the methods defined by the module\n\t\tfor(var f in module) {\n\t\t\tif($tw.utils.hop(module,f)) {\n\t\t\t\tc[f] = module[f];\n\t\t\t}\n\t\t}\n\t});\n};\n\nexports.Commander = Commander;\n\n})();\n",
            "title": "$:/core/modules/commander.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/commands/build.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/build.js\ntype: application/javascript\nmodule-type: command\n\nCommand to build a build target\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"build\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\t// Get the build targets defined in the wiki\n\tvar buildTargets = $tw.boot.wikiInfo.build;\n\tif(!buildTargets) {\n\t\treturn \"No build targets defined\";\n\t}\n\t// Loop through each of the specified targets\n\tvar targets;\n\tif(this.params.length > 0) {\n\t\ttargets = this.params;\n\t} else {\n\t\ttargets = Object.keys(buildTargets);\n\t}\n\tfor(var targetIndex=0; targetIndex<targets.length; targetIndex++) {\n\t\tvar target = targets[targetIndex],\n\t\t\tcommands = buildTargets[target];\n\t\tif(!commands) {\n\t\t\treturn \"Build target '\" + target + \"' not found\";\n\t\t}\n\t\t// Add the commands to the queue\n\t\tthis.commander.addCommandTokens(commands);\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/build.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/clearpassword.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/clearpassword.js\ntype: application/javascript\nmodule-type: command\n\nClear password for crypto operations\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"clearpassword\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\t$tw.crypto.setPassword(null);\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/clearpassword.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/editions.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/editions.js\ntype: application/javascript\nmodule-type: command\n\nCommand to list the available editions\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"editions\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tvar self = this;\n\t// Output the list\n\tthis.commander.streams.output.write(\"Available editions:\\n\\n\");\n\tvar editionInfo = $tw.utils.getEditionInfo();\n\t$tw.utils.each(editionInfo,function(info,name) {\n\t\tself.commander.streams.output.write(\"    \" + name + \": \" + info.description + \"\\n\");\n\t});\n\tthis.commander.streams.output.write(\"\\n\");\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/editions.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/help.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/help.js\ntype: application/javascript\nmodule-type: command\n\nHelp command\n\n\\*/\n(function(){\n\n/*jshint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"help\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tvar subhelp = this.params[0] || \"default\",\n\t\thelpBase = \"$:/language/Help/\",\n\t\ttext;\n\tif(!this.commander.wiki.getTiddler(helpBase + subhelp)) {\n\t\tsubhelp = \"notfound\";\n\t}\n\t// Wikify the help as formatted text (ie block elements generate newlines)\n\ttext = this.commander.wiki.renderTiddler(\"text/plain-formatted\",helpBase + subhelp);\n\t// Remove any leading linebreaks\n\ttext = text.replace(/^(\\r?\\n)*/g,\"\");\n\tthis.commander.streams.output.write(text);\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/help.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/init.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/init.js\ntype: application/javascript\nmodule-type: command\n\nCommand to initialise an empty wiki folder\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"init\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tvar fs = require(\"fs\"),\n\t\tpath = require(\"path\");\n\t// Check that we don't already have a valid wiki folder\n\tif($tw.boot.wikiTiddlersPath || ($tw.utils.isDirectory($tw.boot.wikiPath) && !$tw.utils.isDirectoryEmpty($tw.boot.wikiPath))) {\n\t\treturn \"Wiki folder is not empty\";\n\t}\n\t// Loop through each of the specified editions\n\tvar editions = this.params.length > 0 ? this.params : [\"empty\"];\n\tfor(var editionIndex=0; editionIndex<editions.length; editionIndex++) {\n\t\tvar editionName = editions[editionIndex];\n\t\t// Check the edition exists\n\t\tvar editionPath = $tw.findLibraryItem(editionName,$tw.getLibraryItemSearchPaths($tw.config.editionsPath,$tw.config.editionsEnvVar));\n\t\tif(!$tw.utils.isDirectory(editionPath)) {\n\t\t\treturn \"Edition '\" + editionName + \"' not found\";\n\t\t}\n\t\t// Copy the edition content\n\t\tvar err = $tw.utils.copyDirectory(editionPath,$tw.boot.wikiPath);\n\t\tif(!err) {\n\t\t\tthis.commander.streams.output.write(\"Copied edition '\" + editionName + \"' to \" + $tw.boot.wikiPath + \"\\n\");\n\t\t} else {\n\t\t\treturn err;\n\t\t}\n\t}\n\t// Tweak the tiddlywiki.info to remove any included wikis\n\tvar packagePath = $tw.boot.wikiPath + \"/tiddlywiki.info\",\n\t\tpackageJson = JSON.parse(fs.readFileSync(packagePath));\n\tdelete packageJson.includeWikis;\n\tfs.writeFileSync(packagePath,JSON.stringify(packageJson,null,$tw.config.preferences.jsonSpaces));\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/init.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/load.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/load.js\ntype: application/javascript\nmodule-type: command\n\nCommand to load tiddlers from a file\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"load\",\n\tsynchronous: false\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\");\n\tif(this.params.length < 1) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar ext = path.extname(self.params[0]);\n\tfs.readFile(this.params[0],$tw.utils.getTypeEncoding(ext),function(err,data) {\n\t\tif (err) {\n\t\t\tself.callback(err);\n\t\t} else {\n\t\t\tvar fields = {title: self.params[0]},\n\t\t\t\ttype = path.extname(self.params[0]);\n\t\t\tvar tiddlers = self.commander.wiki.deserializeTiddlers(type,data,fields);\n\t\t\tif(!tiddlers) {\n\t\t\t\tself.callback(\"No tiddlers found in file \\\"\" + self.params[0] + \"\\\"\");\n\t\t\t} else {\n\t\t\t\tfor(var t=0; t<tiddlers.length; t++) {\n\t\t\t\t\tself.commander.wiki.importTiddler(new $tw.Tiddler(tiddlers[t]));\n\t\t\t\t}\n\t\t\t\tself.callback(null);\t\n\t\t\t}\n\t\t}\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/load.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/makelibrary.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/makelibrary.js\ntype: application/javascript\nmodule-type: command\n\nCommand to pack all of the plugins in the library into a plugin tiddler of type \"library\"\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"makelibrary\",\n\tsynchronous: true\n};\n\nvar UPGRADE_LIBRARY_TITLE = \"$:/UpgradeLibrary\";\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tvar wiki = this.commander.wiki,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\tupgradeLibraryTitle = this.params[0] || UPGRADE_LIBRARY_TITLE,\n\t\ttiddlers = {};\n\t// Collect up the library plugins\n\tvar collectPlugins = function(folder) {\n\t\t\tvar pluginFolders = fs.readdirSync(folder);\n\t\t\tfor(var p=0; p<pluginFolders.length; p++) {\n\t\t\t\tif(!$tw.boot.excludeRegExp.test(pluginFolders[p])) {\n\t\t\t\t\tpluginFields = $tw.loadPluginFolder(path.resolve(folder,\"./\" + pluginFolders[p]));\n\t\t\t\t\tif(pluginFields && pluginFields.title) {\n\t\t\t\t\t\ttiddlers[pluginFields.title] = pluginFields;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tcollectPublisherPlugins = function(folder) {\n\t\t\tvar publisherFolders = fs.readdirSync(folder);\n\t\t\tfor(var t=0; t<publisherFolders.length; t++) {\n\t\t\t\tif(!$tw.boot.excludeRegExp.test(publisherFolders[t])) {\n\t\t\t\t\tcollectPlugins(path.resolve(folder,\"./\" + publisherFolders[t]));\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\tcollectPublisherPlugins(path.resolve($tw.boot.corePath,$tw.config.pluginsPath));\n\tcollectPublisherPlugins(path.resolve($tw.boot.corePath,$tw.config.themesPath));\n\tcollectPlugins(path.resolve($tw.boot.corePath,$tw.config.languagesPath));\n\t// Save the upgrade library tiddler\n\tvar pluginFields = {\n\t\ttitle: upgradeLibraryTitle,\n\t\ttype: \"application/json\",\n\t\t\"plugin-type\": \"library\",\n\t\t\"text\": JSON.stringify({tiddlers: tiddlers},null,$tw.config.preferences.jsonSpaces)\n\t};\n\twiki.addTiddler(new $tw.Tiddler(pluginFields));\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/makelibrary.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/output.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/output.js\ntype: application/javascript\nmodule-type: command\n\nCommand to set the default output location (defaults to current working directory)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"output\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tvar fs = require(\"fs\"),\n\t\tpath = require(\"path\");\n\tif(this.params.length < 1) {\n\t\treturn \"Missing output path\";\n\t}\n\tthis.commander.outputPath = path.resolve(process.cwd(),this.params[0]);\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/output.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/password.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/password.js\ntype: application/javascript\nmodule-type: command\n\nSave password for crypto operations\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"password\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 1) {\n\t\treturn \"Missing password\";\n\t}\n\t$tw.crypto.setPassword(this.params[0]);\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/password.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/rendertiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/rendertiddler.js\ntype: application/javascript\nmodule-type: command\n\nCommand to render a tiddler and save it to a file\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"rendertiddler\",\n\tsynchronous: false\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\ttitle = this.params[0],\n\t\tfilename = path.resolve(this.commander.outputPath,this.params[1]),\n\t\ttype = this.params[2] || \"text/html\",\n\t\ttemplate = this.params[3],\n\t\tvariables = {};\n\t$tw.utils.createFileDirectories(filename);\n\tif(template) {\n\t\tvariables.currentTiddler = title;\n\t\ttitle = template;\n\t}\n\tfs.writeFile(filename,this.commander.wiki.renderTiddler(type,title,{variables: variables}),\"utf8\",function(err) {\n\t\tself.callback(err);\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/rendertiddler.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/rendertiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/rendertiddlers.js\ntype: application/javascript\nmodule-type: command\n\nCommand to render several tiddlers to a folder of files\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.info = {\n\tname: \"rendertiddlers\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\twiki = this.commander.wiki,\n\t\tfilter = this.params[0],\n\t\ttemplate = this.params[1],\n\t\toutputPath = this.commander.outputPath,\n\t\tpathname = path.resolve(outputPath,this.params[2]),\t\t\n\t\ttype = this.params[3] || \"text/html\",\n\t\textension = this.params[4] || \".html\",\n\t\tdeleteDirectory = (this.params[5] || \"\").toLowerCase() !== \"noclean\",\n\t\ttiddlers = wiki.filterTiddlers(filter);\n\tif(deleteDirectory) {\n\t\t$tw.utils.deleteDirectory(pathname);\n\t}\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar parser = wiki.parseTiddler(template),\n\t\t\twidgetNode = wiki.makeWidget(parser,{variables: {currentTiddler: title}}),\n\t\t\tcontainer = $tw.fakeDocument.createElement(\"div\");\n\t\twidgetNode.render(container,null);\n\t\tvar text = type === \"text/html\" ? container.innerHTML : container.textContent,\n\t\t\texportPath = null;\n\t\tif($tw.utils.hop($tw.macros,\"tv-get-export-path\")) {\n\t\t\tvar macroPath = $tw.macros[\"tv-get-export-path\"].run.apply(self,[title]);\n\t\t\tif(macroPath) {\n\t\t\t\texportPath = path.resolve(outputPath,macroPath + extension);\n\t\t\t}\n\t\t}\n\t\tvar finalPath = exportPath || path.resolve(pathname,encodeURIComponent(title) + extension);\n\t\t$tw.utils.createFileDirectories(finalPath);\n\t\tfs.writeFileSync(finalPath,text,\"utf8\");\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/rendertiddlers.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/savelibrarytiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/savelibrarytiddlers.js\ntype: application/javascript\nmodule-type: command\n\nCommand to save the subtiddlers of a bundle tiddler as a series of JSON files\n\n--savelibrarytiddlers <tiddler> <pathname> <skinnylisting>\n\nThe tiddler identifies the bundle tiddler that contains the subtiddlers.\n\nThe pathname specifies the pathname to the folder in which the JSON files should be saved. The filename is the URL encoded title of the subtiddler.\n\nThe skinnylisting specifies the title of the tiddler to which a JSON catalogue of the subtiddlers will be saved. The JSON file contains the same data as the bundle tiddler but with the `text` field removed.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"savelibrarytiddlers\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\tcontainerTitle = this.params[0],\n\t\tfilter = this.params[1],\n\t\tbasepath = this.params[2],\n\t\tskinnyListTitle = this.params[3];\n\t// Get the container tiddler as data\n\tvar containerData = self.commander.wiki.getTiddlerDataCached(containerTitle,undefined);\n\tif(!containerData) {\n\t\treturn \"'\" + containerTitle + \"' is not a tiddler bundle\";\n\t}\n\t// Filter the list of plugins\n\tvar pluginList = [];\n\t$tw.utils.each(containerData.tiddlers,function(tiddler,title) {\n\t\tpluginList.push(title);\n\t});\n\tvar filteredPluginList;\n\tif(filter) {\n\t\tfilteredPluginList = self.commander.wiki.filterTiddlers(filter,null,self.commander.wiki.makeTiddlerIterator(pluginList));\n\t} else {\n\t\tfilteredPluginList = pluginList;\n\t}\n\t// Iterate through the plugins\n\tvar skinnyList = [];\n\t$tw.utils.each(filteredPluginList,function(title) {\n\t\tvar tiddler = containerData.tiddlers[title];\n\t\t// Save each JSON file and collect the skinny data\n\t\tvar pathname = path.resolve(self.commander.outputPath,basepath + encodeURIComponent(title) + \".json\");\n\t\t$tw.utils.createFileDirectories(pathname);\n\t\tfs.writeFileSync(pathname,JSON.stringify(tiddler,null,$tw.config.preferences.jsonSpaces),\"utf8\");\n\t\t// Collect the skinny list data\n\t\tvar pluginTiddlers = JSON.parse(tiddler.text),\n\t\t\treadmeContent = (pluginTiddlers.tiddlers[title + \"/readme\"] || {}).text,\n\t\t\ticonTiddler = pluginTiddlers.tiddlers[title + \"/icon\"] || {},\n\t\t\ticonType = iconTiddler.type,\n\t\t\ticonText = iconTiddler.text,\n\t\t\ticonContent;\n\t\tif(iconType && iconText) {\n\t\t\ticonContent = $tw.utils.makeDataUri(iconText,iconType);\n\t\t}\n\t\tskinnyList.push($tw.utils.extend({},tiddler,{text: undefined, readme: readmeContent, icon: iconContent}));\n\t});\n\t// Save the catalogue tiddler\n\tif(skinnyListTitle) {\n\t\tself.commander.wiki.setTiddlerData(skinnyListTitle,skinnyList);\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/savelibrarytiddlers.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/savetiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/savetiddler.js\ntype: application/javascript\nmodule-type: command\n\nCommand to save the content of a tiddler to a file\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"savetiddler\",\n\tsynchronous: false\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 2) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\ttitle = this.params[0],\n\t\tfilename = path.resolve(this.commander.outputPath,this.params[1]),\n\t\ttiddler = this.commander.wiki.getTiddler(title);\n\tif(tiddler) {\n\t\tvar type = tiddler.fields.type || \"text/vnd.tiddlywiki\",\n\t\t\tcontentTypeInfo = $tw.config.contentTypeInfo[type] || {encoding: \"utf8\"};\n\t\t$tw.utils.createFileDirectories(filename);\n\t\tfs.writeFile(filename,tiddler.fields.text,contentTypeInfo.encoding,function(err) {\n\t\t\tself.callback(err);\n\t\t});\n\t} else {\n\t\treturn \"Missing tiddler: \" + title;\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/savetiddler.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/savetiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/savetiddlers.js\ntype: application/javascript\nmodule-type: command\n\nCommand to save several tiddlers to a folder of files\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.info = {\n\tname: \"savetiddlers\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 1) {\n\t\treturn \"Missing filename\";\n\t}\n\tvar self = this,\n\t\tfs = require(\"fs\"),\n\t\tpath = require(\"path\"),\n\t\twiki = this.commander.wiki,\n\t\tfilter = this.params[0],\n\t\tpathname = path.resolve(this.commander.outputPath,this.params[1]),\n\t\tdeleteDirectory = (this.params[2] || \"\").toLowerCase() !== \"noclean\",\n\t\ttiddlers = wiki.filterTiddlers(filter);\n\tif(deleteDirectory) {\n\t\t$tw.utils.deleteDirectory(pathname);\n\t}\n\t$tw.utils.createDirectory(pathname);\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = self.commander.wiki.getTiddler(title),\n\t\t\ttype = tiddler.fields.type || \"text/vnd.tiddlywiki\",\n\t\t\tcontentTypeInfo = $tw.config.contentTypeInfo[type] || {encoding: \"utf8\"},\n\t\t\tfilename = path.resolve(pathname,encodeURIComponent(title));\n\t\tfs.writeFileSync(filename,tiddler.fields.text,contentTypeInfo.encoding);\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/savetiddlers.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/server.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/server.js\ntype: application/javascript\nmodule-type: command\n\nServe tiddlers over http\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nif($tw.node) {\n\tvar util = require(\"util\"),\n\t\tfs = require(\"fs\"),\n\t\turl = require(\"url\"),\n\t\tpath = require(\"path\"),\n\t\thttp = require(\"http\");\n}\n\nexports.info = {\n\tname: \"server\",\n\tsynchronous: true\n};\n\n/*\nA simple HTTP server with regexp-based routes\n*/\nfunction SimpleServer(options) {\n\tthis.routes = options.routes || [];\n\tthis.wiki = options.wiki;\n\tthis.variables = options.variables || {};\n}\n\nSimpleServer.prototype.set = function(obj) {\n\tvar self = this;\n\t$tw.utils.each(obj,function(value,name) {\n\t\tself.variables[name] = value;\n\t});\n};\n\nSimpleServer.prototype.get = function(name) {\n\treturn this.variables[name];\n};\n\nSimpleServer.prototype.addRoute = function(route) {\n\tthis.routes.push(route);\n};\n\nSimpleServer.prototype.findMatchingRoute = function(request,state) {\n\tvar pathprefix = this.get(\"pathprefix\") || \"\";\n\tfor(var t=0; t<this.routes.length; t++) {\n\t\tvar potentialRoute = this.routes[t],\n\t\t\tpathRegExp = potentialRoute.path,\n\t\t\tpathname = state.urlInfo.pathname,\n\t\t\tmatch;\n\t\tif(pathprefix) {\n\t\t\tif(pathname.substr(0,pathprefix.length) === pathprefix) {\n\t\t\t\tpathname = pathname.substr(pathprefix.length);\n\t\t\t\tmatch = potentialRoute.path.exec(pathname);\n\t\t\t} else {\n\t\t\t\tmatch = false;\n\t\t\t}\n\t\t} else {\n\t\t\tmatch = potentialRoute.path.exec(pathname);\n\t\t}\n\t\tif(match && request.method === potentialRoute.method) {\n\t\t\tstate.params = [];\n\t\t\tfor(var p=1; p<match.length; p++) {\n\t\t\t\tstate.params.push(match[p]);\n\t\t\t}\n\t\t\treturn potentialRoute;\n\t\t}\n\t}\n\treturn null;\n};\n\nSimpleServer.prototype.checkCredentials = function(request,incomingUsername,incomingPassword) {\n\tvar header = request.headers.authorization || \"\",\n\t\ttoken = header.split(/\\s+/).pop() || \"\",\n\t\tauth = $tw.utils.base64Decode(token),\n\t\tparts = auth.split(/:/),\n\t\tusername = parts[0],\n\t\tpassword = parts[1];\n\tif(incomingUsername === username && incomingPassword === password) {\n\t\treturn \"ALLOWED\";\n\t} else {\n\t\treturn \"DENIED\";\n\t}\n};\n\nSimpleServer.prototype.listen = function(port,host) {\n\tvar self = this;\n\thttp.createServer(function(request,response) {\n\t\t// Compose the state object\n\t\tvar state = {};\n\t\tstate.wiki = self.wiki;\n\t\tstate.server = self;\n\t\tstate.urlInfo = url.parse(request.url);\n\t\t// Find the route that matches this path\n\t\tvar route = self.findMatchingRoute(request,state);\n\t\t// Check for the username and password if we've got one\n\t\tvar username = self.get(\"username\"),\n\t\t\tpassword = self.get(\"password\");\n\t\tif(username && password) {\n\t\t\t// Check they match\n\t\t\tif(self.checkCredentials(request,username,password) !== \"ALLOWED\") {\n\t\t\t\tvar servername = state.wiki.getTiddlerText(\"$:/SiteTitle\") || \"TiddlyWiki5\";\n\t\t\t\tresponse.writeHead(401,\"Authentication required\",{\n\t\t\t\t\t\"WWW-Authenticate\": 'Basic realm=\"Please provide your username and password to login to ' + servername + '\"'\n\t\t\t\t});\n\t\t\t\tresponse.end();\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t// Return a 404 if we didn't find a route\n\t\tif(!route) {\n\t\t\tresponse.writeHead(404);\n\t\t\tresponse.end();\n\t\t\treturn;\n\t\t}\n\t\t// Set the encoding for the incoming request\n\t\t// TODO: Presumably this would need tweaking if we supported PUTting binary tiddlers\n\t\trequest.setEncoding(\"utf8\");\n\t\t// Dispatch the appropriate method\n\t\tswitch(request.method) {\n\t\t\tcase \"GET\": // Intentional fall-through\n\t\t\tcase \"DELETE\":\n\t\t\t\troute.handler(request,response,state);\n\t\t\t\tbreak;\n\t\t\tcase \"PUT\":\n\t\t\t\tvar data = \"\";\n\t\t\t\trequest.on(\"data\",function(chunk) {\n\t\t\t\t\tdata += chunk.toString();\n\t\t\t\t});\n\t\t\t\trequest.on(\"end\",function() {\n\t\t\t\t\tstate.data = data;\n\t\t\t\t\troute.handler(request,response,state);\n\t\t\t\t});\n\t\t\t\tbreak;\n\t\t}\n\t}).listen(port,host);\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n\t// Set up server\n\tthis.server = new SimpleServer({\n\t\twiki: this.commander.wiki\n\t});\n\t// Add route handlers\n\tthis.server.addRoute({\n\t\tmethod: \"PUT\",\n\t\tpath: /^\\/recipes\\/default\\/tiddlers\\/(.+)$/,\n\t\thandler: function(request,response,state) {\n\t\t\tvar title = decodeURIComponent(state.params[0]),\n\t\t\t\tfields = JSON.parse(state.data);\n\t\t\t// Pull up any subfields in the `fields` object\n\t\t\tif(fields.fields) {\n\t\t\t\t$tw.utils.each(fields.fields,function(field,name) {\n\t\t\t\t\tfields[name] = field;\n\t\t\t\t});\n\t\t\t\tdelete fields.fields;\n\t\t\t}\n\t\t\t// Remove any revision field\n\t\t\tif(fields.revision) {\n\t\t\t\tdelete fields.revision;\n\t\t\t}\n\t\t\tstate.wiki.addTiddler(new $tw.Tiddler(state.wiki.getCreationFields(),fields,{title: title},state.wiki.getModificationFields()));\n\t\t\tvar changeCount = state.wiki.getChangeCount(title).toString();\n\t\t\tresponse.writeHead(204, \"OK\",{\n\t\t\t\tEtag: \"\\\"default/\" + encodeURIComponent(title) + \"/\" + changeCount + \":\\\"\",\n\t\t\t\t\"Content-Type\": \"text/plain\"\n\t\t\t});\n\t\t\tresponse.end();\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"DELETE\",\n\t\tpath: /^\\/bags\\/default\\/tiddlers\\/(.+)$/,\n\t\thandler: function(request,response,state) {\n\t\t\tvar title = decodeURIComponent(state.params[0]);\n\t\t\tstate.wiki.deleteTiddler(title);\n\t\t\tresponse.writeHead(204, \"OK\", {\n\t\t\t\t\"Content-Type\": \"text/plain\"\n\t\t\t});\n\t\t\tresponse.end();\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": state.server.get(\"serveType\")});\n\t\t\tvar text = state.wiki.renderTiddler(state.server.get(\"renderType\"),state.server.get(\"rootTiddler\"));\n\t\t\tresponse.end(text,\"utf8\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/status$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": \"application/json\"});\n\t\t\tvar text = JSON.stringify({\n\t\t\t\tusername: state.server.get(\"username\"),\n\t\t\t\tspace: {\n\t\t\t\t\trecipe: \"default\"\n\t\t\t\t},\n\t\t\t\ttiddlywiki_version: $tw.version\n\t\t\t});\n\t\t\tresponse.end(text,\"utf8\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/favicon.ico$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": \"image/x-icon\"});\n\t\t\tvar buffer = state.wiki.getTiddlerText(\"$:/favicon.ico\",\"\");\n\t\t\tresponse.end(buffer,\"base64\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/recipes\\/default\\/tiddlers.json$/,\n\t\thandler: function(request,response,state) {\n\t\t\tresponse.writeHead(200, {\"Content-Type\": \"application/json\"});\n\t\t\tvar tiddlers = [];\n\t\t\tstate.wiki.forEachTiddler({sortField: \"title\"},function(title,tiddler) {\n\t\t\t\tvar tiddlerFields = {};\n\t\t\t\t$tw.utils.each(tiddler.fields,function(field,name) {\n\t\t\t\t\tif(name !== \"text\") {\n\t\t\t\t\t\ttiddlerFields[name] = tiddler.getFieldString(name);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\ttiddlerFields.revision = state.wiki.getChangeCount(title);\n\t\t\t\ttiddlerFields.type = tiddlerFields.type || \"text/vnd.tiddlywiki\";\n\t\t\t\ttiddlers.push(tiddlerFields);\n\t\t\t});\n\t\t\tvar text = JSON.stringify(tiddlers);\n\t\t\tresponse.end(text,\"utf8\");\n\t\t}\n\t});\n\tthis.server.addRoute({\n\t\tmethod: \"GET\",\n\t\tpath: /^\\/recipes\\/default\\/tiddlers\\/(.+)$/,\n\t\thandler: function(request,response,state) {\n\t\t\tvar title = decodeURIComponent(state.params[0]),\n\t\t\t\ttiddler = state.wiki.getTiddler(title),\n\t\t\t\ttiddlerFields = {},\n\t\t\t\tknownFields = [\n\t\t\t\t\t\"bag\", \"created\", \"creator\", \"modified\", \"modifier\", \"permissions\", \"recipe\", \"revision\", \"tags\", \"text\", \"title\", \"type\", \"uri\"\n\t\t\t\t];\n\t\t\tif(tiddler) {\n\t\t\t\t$tw.utils.each(tiddler.fields,function(field,name) {\n\t\t\t\t\tvar value = tiddler.getFieldString(name);\n\t\t\t\t\tif(knownFields.indexOf(name) !== -1) {\n\t\t\t\t\t\ttiddlerFields[name] = value;\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttiddlerFields.fields = tiddlerFields.fields || {};\n\t\t\t\t\t\ttiddlerFields.fields[name] = value;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\ttiddlerFields.revision = state.wiki.getChangeCount(title);\n\t\t\t\ttiddlerFields.type = tiddlerFields.type || \"text/vnd.tiddlywiki\";\n\t\t\t\tresponse.writeHead(200, {\"Content-Type\": \"application/json\"});\n\t\t\t\tresponse.end(JSON.stringify(tiddlerFields),\"utf8\");\n\t\t\t} else {\n\t\t\t\tresponse.writeHead(404);\n\t\t\t\tresponse.end();\n\t\t\t}\n\t\t}\n\t});\n};\n\nCommand.prototype.execute = function() {\n\tif(!$tw.boot.wikiTiddlersPath) {\n\t\t$tw.utils.warning(\"Warning: Wiki folder '\" + $tw.boot.wikiPath + \"' does not exist or is missing a tiddlywiki.info file\");\n\t}\n\tvar port = this.params[0] || \"8080\",\n\t\trootTiddler = this.params[1] || \"$:/core/save/all\",\n\t\trenderType = this.params[2] || \"text/plain\",\n\t\tserveType = this.params[3] || \"text/html\",\n\t\tusername = this.params[4],\n\t\tpassword = this.params[5],\n\t\thost = this.params[6] || \"127.0.0.1\",\n\t\tpathprefix = this.params[7];\n\tthis.server.set({\n\t\trootTiddler: rootTiddler,\n\t\trenderType: renderType,\n\t\tserveType: serveType,\n\t\tusername: username,\n\t\tpassword: password,\n\t\tpathprefix: pathprefix\n\t});\n\tthis.server.listen(port,host);\n\tconsole.log(\"Serving on \" + host + \":\" + port);\n\tconsole.log(\"(press ctrl-C to exit)\");\n\t// Warn if required plugins are missing\n\tif(!$tw.wiki.getTiddler(\"$:/plugins/tiddlywiki/tiddlyweb\") || !$tw.wiki.getTiddler(\"$:/plugins/tiddlywiki/filesystem\")) {\n\t\t$tw.utils.warning(\"Warning: Plugins required for client-server operation (\\\"tiddlywiki/filesystem\\\" and \\\"tiddlywiki/tiddlyweb\\\") are missing from tiddlywiki.info file\");\n\t}\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/server.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/setfield.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/setfield.js\ntype: application/javascript\nmodule-type: command\n\nCommand to modify selected tiddlers to set a field to the text of a template tiddler that has been wikified with the selected tiddler as the current tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.info = {\n\tname: \"setfield\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 4) {\n\t\treturn \"Missing parameters\";\n\t}\n\tvar self = this,\n\t\twiki = this.commander.wiki,\n\t\tfilter = this.params[0],\n\t\tfieldname = this.params[1] || \"text\",\n\t\ttemplatetitle = this.params[2],\n\t\trendertype = this.params[3] || \"text/plain\",\n\t\ttiddlers = wiki.filterTiddlers(filter);\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar parser = wiki.parseTiddler(templatetitle),\n\t\t\tnewFields = {},\n\t\t\ttiddler = wiki.getTiddler(title);\n\t\tif(parser) {\n\t\t\tvar widgetNode = wiki.makeWidget(parser,{variables: {currentTiddler: title}});\n\t\t\tvar container = $tw.fakeDocument.createElement(\"div\");\n\t\t\twidgetNode.render(container,null);\n\t\t\tnewFields[fieldname] = rendertype === \"text/html\" ? container.innerHTML : container.textContent;\n\t\t} else {\n\t\t\tnewFields[fieldname] = undefined;\n\t\t}\n\t\twiki.addTiddler(new $tw.Tiddler(tiddler,newFields));\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/setfield.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/unpackplugin.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/unpackplugin.js\ntype: application/javascript\nmodule-type: command\n\nCommand to extract the shadow tiddlers from within a plugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"unpackplugin\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander,callback) {\n\tthis.params = params;\n\tthis.commander = commander;\n\tthis.callback = callback;\n};\n\nCommand.prototype.execute = function() {\n\tif(this.params.length < 1) {\n\t\treturn \"Missing plugin name\";\n\t}\n\tvar self = this,\n\t\ttitle = this.params[0],\n\t\tpluginData = this.commander.wiki.getTiddlerDataCached(title);\n\tif(!pluginData) {\n\t\treturn \"Plugin '\" + title + \"' not found\";\n\t}\n\t$tw.utils.each(pluginData.tiddlers,function(tiddler) {\n\t\tself.commander.wiki.addTiddler(new $tw.Tiddler(tiddler));\n\t});\n\treturn null;\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/unpackplugin.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/verbose.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/verbose.js\ntype: application/javascript\nmodule-type: command\n\nVerbose command\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"verbose\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tthis.commander.verbose = true;\n\t// Output the boot message log\n\tthis.commander.streams.output.write(\"Boot log:\\n  \" + $tw.boot.logMessages.join(\"\\n  \") + \"\\n\");\n\treturn null; // No error\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/verbose.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/commands/version.js": {
            "text": "/*\\\ntitle: $:/core/modules/commands/version.js\ntype: application/javascript\nmodule-type: command\n\nVersion command\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.info = {\n\tname: \"version\",\n\tsynchronous: true\n};\n\nvar Command = function(params,commander) {\n\tthis.params = params;\n\tthis.commander = commander;\n};\n\nCommand.prototype.execute = function() {\n\tthis.commander.streams.output.write($tw.version + \"\\n\");\n\treturn null; // No error\n};\n\nexports.Command = Command;\n\n})();\n",
            "title": "$:/core/modules/commands/version.js",
            "type": "application/javascript",
            "module-type": "command"
        },
        "$:/core/modules/config.js": {
            "text": "/*\\\ntitle: $:/core/modules/config.js\ntype: application/javascript\nmodule-type: config\n\nCore configuration constants\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.preferences = {};\n\nexports.preferences.notificationDuration = 3 * 1000;\nexports.preferences.jsonSpaces = 4;\n\nexports.textPrimitives = {\n\tupperLetter: \"[A-Z\\u00c0-\\u00d6\\u00d8-\\u00de\\u0150\\u0170]\",\n\tlowerLetter: \"[a-z\\u00df-\\u00f6\\u00f8-\\u00ff\\u0151\\u0171]\",\n\tanyLetter:   \"[A-Za-z0-9\\u00c0-\\u00d6\\u00d8-\\u00de\\u00df-\\u00f6\\u00f8-\\u00ff\\u0150\\u0170\\u0151\\u0171]\",\n\tblockPrefixLetters:\t\"[A-Za-z0-9-_\\u00c0-\\u00d6\\u00d8-\\u00de\\u00df-\\u00f6\\u00f8-\\u00ff\\u0150\\u0170\\u0151\\u0171]\"\n};\n\nexports.textPrimitives.unWikiLink = \"~\";\nexports.textPrimitives.wikiLink = exports.textPrimitives.upperLetter + \"+\" +\n\texports.textPrimitives.lowerLetter + \"+\" +\n\texports.textPrimitives.upperLetter +\n\texports.textPrimitives.anyLetter + \"*\";\n\nexports.htmlEntities = {quot:34, amp:38, apos:39, lt:60, gt:62, nbsp:160, iexcl:161, cent:162, pound:163, curren:164, yen:165, brvbar:166, sect:167, uml:168, copy:169, ordf:170, laquo:171, not:172, shy:173, reg:174, macr:175, deg:176, plusmn:177, sup2:178, sup3:179, acute:180, micro:181, para:182, middot:183, cedil:184, sup1:185, ordm:186, raquo:187, frac14:188, frac12:189, frac34:190, iquest:191, Agrave:192, Aacute:193, Acirc:194, Atilde:195, Auml:196, Aring:197, AElig:198, Ccedil:199, Egrave:200, Eacute:201, Ecirc:202, Euml:203, Igrave:204, Iacute:205, Icirc:206, Iuml:207, ETH:208, Ntilde:209, Ograve:210, Oacute:211, Ocirc:212, Otilde:213, Ouml:214, times:215, Oslash:216, Ugrave:217, Uacute:218, Ucirc:219, Uuml:220, Yacute:221, THORN:222, szlig:223, agrave:224, aacute:225, acirc:226, atilde:227, auml:228, aring:229, aelig:230, ccedil:231, egrave:232, eacute:233, ecirc:234, euml:235, igrave:236, iacute:237, icirc:238, iuml:239, eth:240, ntilde:241, ograve:242, oacute:243, ocirc:244, otilde:245, ouml:246, divide:247, oslash:248, ugrave:249, uacute:250, ucirc:251, uuml:252, yacute:253, thorn:254, yuml:255, OElig:338, oelig:339, Scaron:352, scaron:353, Yuml:376, fnof:402, circ:710, tilde:732, Alpha:913, Beta:914, Gamma:915, Delta:916, Epsilon:917, Zeta:918, Eta:919, Theta:920, Iota:921, Kappa:922, Lambda:923, Mu:924, Nu:925, Xi:926, Omicron:927, Pi:928, Rho:929, Sigma:931, Tau:932, Upsilon:933, Phi:934, Chi:935, Psi:936, Omega:937, alpha:945, beta:946, gamma:947, delta:948, epsilon:949, zeta:950, eta:951, theta:952, iota:953, kappa:954, lambda:955, mu:956, nu:957, xi:958, omicron:959, pi:960, rho:961, sigmaf:962, sigma:963, tau:964, upsilon:965, phi:966, chi:967, psi:968, omega:969, thetasym:977, upsih:978, piv:982, ensp:8194, emsp:8195, thinsp:8201, zwnj:8204, zwj:8205, lrm:8206, rlm:8207, ndash:8211, mdash:8212, lsquo:8216, rsquo:8217, sbquo:8218, ldquo:8220, rdquo:8221, bdquo:8222, dagger:8224, Dagger:8225, bull:8226, hellip:8230, permil:8240, prime:8242, Prime:8243, lsaquo:8249, rsaquo:8250, oline:8254, frasl:8260, euro:8364, image:8465, weierp:8472, real:8476, trade:8482, alefsym:8501, larr:8592, uarr:8593, rarr:8594, darr:8595, harr:8596, crarr:8629, lArr:8656, uArr:8657, rArr:8658, dArr:8659, hArr:8660, forall:8704, part:8706, exist:8707, empty:8709, nabla:8711, isin:8712, notin:8713, ni:8715, prod:8719, sum:8721, minus:8722, lowast:8727, radic:8730, prop:8733, infin:8734, ang:8736, and:8743, or:8744, cap:8745, cup:8746, int:8747, there4:8756, sim:8764, cong:8773, asymp:8776, ne:8800, equiv:8801, le:8804, ge:8805, sub:8834, sup:8835, nsub:8836, sube:8838, supe:8839, oplus:8853, otimes:8855, perp:8869, sdot:8901, lceil:8968, rceil:8969, lfloor:8970, rfloor:8971, lang:9001, rang:9002, loz:9674, spades:9824, clubs:9827, hearts:9829, diams:9830 };\n\nexports.htmlVoidElements = \"area,base,br,col,command,embed,hr,img,input,keygen,link,meta,param,source,track,wbr\".split(\",\");\n\nexports.htmlBlockElements = \"address,article,aside,audio,blockquote,canvas,dd,div,dl,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,hgroup,hr,li,noscript,ol,output,p,pre,section,table,tfoot,ul,video\".split(\",\");\n\nexports.htmlUnsafeElements = \"script\".split(\",\");\n\n})();\n",
            "title": "$:/core/modules/config.js",
            "type": "application/javascript",
            "module-type": "config"
        },
        "$:/core/modules/deserializers.js": {
            "text": "/*\\\ntitle: $:/core/modules/deserializers.js\ntype: application/javascript\nmodule-type: tiddlerdeserializer\n\nFunctions to deserialise tiddlers from a block of text\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nUtility function to parse an old-style tiddler DIV in a *.tid file. It looks like this:\n\n<div title=\"Title\" creator=\"JoeBloggs\" modifier=\"JoeBloggs\" created=\"201102111106\" modified=\"201102111310\" tags=\"myTag [[my long tag]]\">\n<pre>The text of the tiddler (without the expected HTML encoding).\n</pre>\n</div>\n\nNote that the field attributes are HTML encoded, but that the body of the <PRE> tag is not encoded.\n\nWhen these tiddler DIVs are encountered within a TiddlyWiki HTML file then the body is encoded in the usual way.\n*/\nvar parseTiddlerDiv = function(text /* [,fields] */) {\n\t// Slot together the default results\n\tvar result = {};\n\tif(arguments.length > 1) {\n\t\tfor(var f=1; f<arguments.length; f++) {\n\t\t\tvar fields = arguments[f];\n\t\t\tfor(var t in fields) {\n\t\t\t\tresult[t] = fields[t];\t\t\n\t\t\t}\n\t\t}\n\t}\n\t// Parse the DIV body\n\tvar startRegExp = /^\\s*<div\\s+([^>]*)>(\\s*<pre>)?/gi,\n\t\tendRegExp,\n\t\tmatch = startRegExp.exec(text);\n\tif(match) {\n\t\t// Old-style DIVs don't have the <pre> tag\n\t\tif(match[2]) {\n\t\t\tendRegExp = /<\\/pre>\\s*<\\/div>\\s*$/gi;\n\t\t} else {\n\t\t\tendRegExp = /<\\/div>\\s*$/gi;\n\t\t}\n\t\tvar endMatch = endRegExp.exec(text);\n\t\tif(endMatch) {\n\t\t\t// Extract the text\n\t\t\tresult.text = text.substring(match.index + match[0].length,endMatch.index);\n\t\t\t// Process the attributes\n\t\t\tvar attrRegExp = /\\s*([^=\\s]+)\\s*=\\s*(?:\"([^\"]*)\"|'([^']*)')/gi,\n\t\t\t\tattrMatch;\n\t\t\tdo {\n\t\t\t\tattrMatch = attrRegExp.exec(match[1]);\n\t\t\t\tif(attrMatch) {\n\t\t\t\t\tvar name = attrMatch[1];\n\t\t\t\t\tvar value = attrMatch[2] !== undefined ? attrMatch[2] : attrMatch[3];\n\t\t\t\t\tresult[name] = value;\n\t\t\t\t}\n\t\t\t} while(attrMatch);\n\t\t\treturn result;\n\t\t}\n\t}\n\treturn undefined;\n};\n\nexports[\"application/x-tiddler-html-div\"] = function(text,fields) {\n\treturn [parseTiddlerDiv(text,fields)];\n};\n\nexports[\"application/json\"] = function(text,fields) {\n\tvar incoming = JSON.parse(text),\n\t\tresults = [];\n\tif($tw.utils.isArray(incoming)) {\n\t\tfor(var t=0; t<incoming.length; t++) {\n\t\t\tvar incomingFields = incoming[t],\n\t\t\t\tfields = {};\n\t\t\tfor(var f in incomingFields) {\n\t\t\t\tif(typeof incomingFields[f] === \"string\") {\n\t\t\t\t\tfields[f] = incomingFields[f];\n\t\t\t\t}\n\t\t\t}\n\t\t\tresults.push(fields);\n\t\t}\n\t}\n\treturn results;\n};\n\n/*\nParse an HTML file into tiddlers. There are three possibilities:\n# A TiddlyWiki classic HTML file containing `text/x-tiddlywiki` tiddlers\n# A TiddlyWiki5 HTML file containing `text/vnd.tiddlywiki` tiddlers\n# An ordinary HTML file\n*/\nexports[\"text/html\"] = function(text,fields) {\n\t// Check if we've got a store area\n\tvar storeAreaMarkerRegExp = /<div id=[\"']?storeArea['\"]?( style=[\"']?display:none;[\"']?)?>/gi,\n\t\tmatch = storeAreaMarkerRegExp.exec(text);\n\tif(match) {\n\t\t// If so, it's either a classic TiddlyWiki file or an unencrypted TW5 file\n\t\t// First read the normal tiddlers\n\t\tvar results = deserializeTiddlyWikiFile(text,storeAreaMarkerRegExp.lastIndex,!!match[1],fields);\n\t\t// Then any system tiddlers\n\t\tvar systemAreaMarkerRegExp = /<div id=[\"']?systemArea['\"]?( style=[\"']?display:none;[\"']?)?>/gi,\n\t\t\tsysMatch = systemAreaMarkerRegExp.exec(text);\n\t\tif(sysMatch) {\n\t\t\tresults.push.apply(results,deserializeTiddlyWikiFile(text,systemAreaMarkerRegExp.lastIndex,!!sysMatch[1],fields));\n\t\t}\n\t\treturn results;\n\t} else {\n\t\t// Check whether we've got an encrypted file\n\t\tvar encryptedStoreArea = $tw.utils.extractEncryptedStoreArea(text);\n\t\tif(encryptedStoreArea) {\n\t\t\t// If so, attempt to decrypt it using the current password\n\t\t\treturn $tw.utils.decryptStoreArea(encryptedStoreArea);\n\t\t} else {\n\t\t\t// It's not a TiddlyWiki so we'll return the entire HTML file as a tiddler\n\t\t\treturn deserializeHtmlFile(text,fields);\n\t\t}\n\t}\n};\n\nfunction deserializeHtmlFile(text,fields) {\n\tvar result = {};\n\t$tw.utils.each(fields,function(value,name) {\n\t\tresult[name] = value;\n\t});\n\tresult.text = text;\n\tresult.type = \"text/html\";\n\treturn [result];\n}\n\nfunction deserializeTiddlyWikiFile(text,storeAreaEnd,isTiddlyWiki5,fields) {\n\tvar results = [],\n\t\tendOfDivRegExp = /(<\\/div>\\s*)/gi,\n\t\tstartPos = storeAreaEnd,\n\t\tdefaultType = isTiddlyWiki5 ? undefined : \"text/x-tiddlywiki\";\n\tendOfDivRegExp.lastIndex = startPos;\n\tvar match = endOfDivRegExp.exec(text);\n\twhile(match) {\n\t\tvar endPos = endOfDivRegExp.lastIndex,\n\t\t\ttiddlerFields = parseTiddlerDiv(text.substring(startPos,endPos),fields,{type: defaultType});\n\t\tif(!tiddlerFields) {\n\t\t\tbreak;\n\t\t}\n\t\t$tw.utils.each(tiddlerFields,function(value,name) {\n\t\t\tif(typeof value === \"string\") {\n\t\t\t\ttiddlerFields[name] = $tw.utils.htmlDecode(value);\n\t\t\t}\n\t\t});\n\t\tif(tiddlerFields.text !== null) {\n\t\t\tresults.push(tiddlerFields);\n\t\t}\n\t\tstartPos = endPos;\n\t\tmatch = endOfDivRegExp.exec(text);\n\t}\n\treturn results;\n}\n\n})();\n",
            "title": "$:/core/modules/deserializers.js",
            "type": "application/javascript",
            "module-type": "tiddlerdeserializer"
        },
        "$:/core/modules/editor/engines/framed.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/engines/framed.js\ntype: application/javascript\nmodule-type: library\n\nText editor engine based on a simple input or textarea within an iframe. This is done so that the selection is preserved even when clicking away from the textarea\n\n\\*/\n(function(){\n\n/*jslint node: true,browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar HEIGHT_VALUE_TITLE = \"$:/config/TextEditor/EditorHeight/Height\";\n\nfunction FramedEngine(options) {\n\t// Save our options\n\toptions = options || {};\n\tthis.widget = options.widget;\n\tthis.value = options.value;\n\tthis.parentNode = options.parentNode;\n\tthis.nextSibling = options.nextSibling;\n\t// Create our hidden dummy text area for reading styles\n\tthis.dummyTextArea = this.widget.document.createElement(\"textarea\");\n\tif(this.widget.editClass) {\n\t\tthis.dummyTextArea.className = this.widget.editClass;\n\t}\n\tthis.dummyTextArea.setAttribute(\"hidden\",\"true\");\n\tthis.parentNode.insertBefore(this.dummyTextArea,this.nextSibling);\n\tthis.widget.domNodes.push(this.dummyTextArea);\n\t// Create the iframe\n\tthis.iframeNode = this.widget.document.createElement(\"iframe\");\n\tthis.parentNode.insertBefore(this.iframeNode,this.nextSibling);\n\tthis.iframeDoc = this.iframeNode.contentWindow.document;\n\t// (Firefox requires us to put some empty content in the iframe)\n\tthis.iframeDoc.open();\n\tthis.iframeDoc.write(\"\");\n\tthis.iframeDoc.close();\n\t// Style the iframe\n\tthis.iframeNode.className = this.dummyTextArea.className;\n\tthis.iframeNode.style.border = \"none\";\n\tthis.iframeNode.style.padding = \"0\";\n\tthis.iframeNode.style.resize = \"none\";\n\tthis.iframeDoc.body.style.margin = \"0\";\n\tthis.iframeDoc.body.style.padding = \"0\";\n\tthis.widget.domNodes.push(this.iframeNode);\n\t// Construct the textarea or input node\n\tvar tag = this.widget.editTag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"input\";\n\t}\n\tthis.domNode = this.iframeDoc.createElement(tag);\n\t// Set the text\n\tif(this.widget.editTag === \"textarea\") {\n\t\tthis.domNode.appendChild(this.iframeDoc.createTextNode(this.value));\n\t} else {\n\t\tthis.domNode.value = this.value;\n\t}\n\t// Set the attributes\n\tif(this.widget.editType) {\n\t\tthis.domNode.setAttribute(\"type\",this.widget.editType);\n\t}\n\tif(this.widget.editPlaceholder) {\n\t\tthis.domNode.setAttribute(\"placeholder\",this.widget.editPlaceholder);\n\t}\n\tif(this.widget.editSize) {\n\t\tthis.domNode.setAttribute(\"size\",this.widget.editSize);\n\t}\n\tif(this.widget.editRows) {\n\t\tthis.domNode.setAttribute(\"rows\",this.widget.editRows);\n\t}\n\t// Copy the styles from the dummy textarea\n\tthis.copyStyles();\n\t// Add event listeners\n\t$tw.utils.addEventListeners(this.domNode,[\n\t\t{name: \"input\",handlerObject: this,handlerMethod: \"handleInputEvent\"},\n\t\t{name: \"keydown\",handlerObject: this.widget,handlerMethod: \"handleKeydownEvent\"}\n\t]);\n\t// Insert the element into the DOM\n\tthis.iframeDoc.body.appendChild(this.domNode);\n}\n\n/*\nCopy styles from the dummy text area to the textarea in the iframe\n*/\nFramedEngine.prototype.copyStyles = function() {\n\t// Copy all styles\n\t$tw.utils.copyStyles(this.dummyTextArea,this.domNode);\n\t// Override the ones that should not be set the same as the dummy textarea\n\tthis.domNode.style.display = \"block\";\n\tthis.domNode.style.width = \"100%\";\n\tthis.domNode.style.margin = \"0\";\n\t// In Chrome setting -webkit-text-fill-color overrides the placeholder text colour\n\tthis.domNode.style[\"-webkit-text-fill-color\"] = \"currentcolor\";\n};\n\n/*\nSet the text of the engine if it doesn't currently have focus\n*/\nFramedEngine.prototype.setText = function(text,type) {\n\tif(!this.domNode.isTiddlyWikiFakeDom) {\n\t\tif(this.domNode.ownerDocument.activeElement !== this.domNode) {\n\t\t\tthis.domNode.value = text;\n\t\t}\n\t\t// Fix the height if needed\n\t\tthis.fixHeight();\n\t}\n};\n\n/*\nGet the text of the engine\n*/\nFramedEngine.prototype.getText = function() {\n\treturn this.domNode.value;\n};\n\n/*\nFix the height of textarea to fit content\n*/\nFramedEngine.prototype.fixHeight = function() {\n\t// Make sure styles are updated\n\tthis.copyStyles();\n\t// Adjust height\n\tif(this.widget.editTag === \"textarea\") {\n\t\tif(this.widget.editAutoHeight) {\n\t\t\tif(this.domNode && !this.domNode.isTiddlyWikiFakeDom) {\n\t\t\t\tvar newHeight = $tw.utils.resizeTextAreaToFit(this.domNode,this.widget.editMinHeight);\n\t\t\t\tthis.iframeNode.style.height = (newHeight + 14) + \"px\"; // +14 for the border on the textarea\n\t\t\t}\n\t\t} else {\n\t\t\tvar fixedHeight = parseInt(this.widget.wiki.getTiddlerText(HEIGHT_VALUE_TITLE,\"400px\"),10);\n\t\t\tfixedHeight = Math.max(fixedHeight,20);\n\t\t\tthis.domNode.style.height = fixedHeight + \"px\";\n\t\t\tthis.iframeNode.style.height = (fixedHeight + 14) + \"px\";\n\t\t}\n\t}\n};\n\n/*\nFocus the engine node\n*/\nFramedEngine.prototype.focus  = function() {\n\tif(this.domNode.focus && this.domNode.select) {\n\t\tthis.domNode.focus();\n\t\tthis.domNode.select();\n\t}\n};\n\n/*\nHandle a dom \"input\" event which occurs when the text has changed\n*/\nFramedEngine.prototype.handleInputEvent = function(event) {\n\tthis.widget.saveChanges(this.getText());\n\tthis.fixHeight();\n\treturn true;\n};\n\n/*\nCreate a blank structure representing a text operation\n*/\nFramedEngine.prototype.createTextOperation = function() {\n\tvar operation = {\n\t\ttext: this.domNode.value,\n\t\tselStart: this.domNode.selectionStart,\n\t\tselEnd: this.domNode.selectionEnd,\n\t\tcutStart: null,\n\t\tcutEnd: null,\n\t\treplacement: null,\n\t\tnewSelStart: null,\n\t\tnewSelEnd: null\n\t};\n\toperation.selection = operation.text.substring(operation.selStart,operation.selEnd);\n\treturn operation;\n};\n\n/*\nExecute a text operation\n*/\nFramedEngine.prototype.executeTextOperation = function(operation) {\n\t// Perform the required changes to the text area and the underlying tiddler\n\tvar newText = operation.text;\n\tif(operation.replacement !== null) {\n\t\tnewText = operation.text.substring(0,operation.cutStart) + operation.replacement + operation.text.substring(operation.cutEnd);\n\t\t// Attempt to use a execCommand to modify the value of the control\n\t\tif(this.iframeDoc.queryCommandSupported(\"insertText\") && this.iframeDoc.queryCommandSupported(\"delete\") && !$tw.browser.isFirefox) {\n\t\t\tthis.domNode.focus();\n\t\t\tthis.domNode.setSelectionRange(operation.cutStart,operation.cutEnd);\n\t\t\tif(operation.replacement === \"\") {\n\t\t\t\tthis.iframeDoc.execCommand(\"delete\",false,\"\");\n\t\t\t} else {\n\t\t\t\tthis.iframeDoc.execCommand(\"insertText\",false,operation.replacement);\n\t\t\t}\n\t\t} else {\n\t\t\tthis.domNode.value = newText;\n\t\t}\n\t\tthis.domNode.focus();\n\t\tthis.domNode.setSelectionRange(operation.newSelStart,operation.newSelEnd);\n\t}\n\tthis.domNode.focus();\n\treturn newText;\n};\n\nexports.FramedEngine = FramedEngine;\n\n})();\n",
            "title": "$:/core/modules/editor/engines/framed.js",
            "type": "application/javascript",
            "module-type": "library"
        },
        "$:/core/modules/editor/engines/simple.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/engines/simple.js\ntype: application/javascript\nmodule-type: library\n\nText editor engine based on a simple input or textarea tag\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar HEIGHT_VALUE_TITLE = \"$:/config/TextEditor/EditorHeight/Height\";\n\nfunction SimpleEngine(options) {\n\t// Save our options\n\toptions = options || {};\n\tthis.widget = options.widget;\n\tthis.value = options.value;\n\tthis.parentNode = options.parentNode;\n\tthis.nextSibling = options.nextSibling;\n\t// Construct the textarea or input node\n\tvar tag = this.widget.editTag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"input\";\n\t}\n\tthis.domNode = this.widget.document.createElement(tag);\n\t// Set the text\n\tif(this.widget.editTag === \"textarea\") {\n\t\tthis.domNode.appendChild(this.widget.document.createTextNode(this.value));\n\t} else {\n\t\tthis.domNode.value = this.value;\n\t}\n\t// Set the attributes\n\tif(this.widget.editType) {\n\t\tthis.domNode.setAttribute(\"type\",this.widget.editType);\n\t}\n\tif(this.widget.editPlaceholder) {\n\t\tthis.domNode.setAttribute(\"placeholder\",this.widget.editPlaceholder);\n\t}\n\tif(this.widget.editSize) {\n\t\tthis.domNode.setAttribute(\"size\",this.widget.editSize);\n\t}\n\tif(this.widget.editRows) {\n\t\tthis.domNode.setAttribute(\"rows\",this.widget.editRows);\n\t}\n\tif(this.widget.editClass) {\n\t\tthis.domNode.className = this.widget.editClass;\n\t}\n\t// Add an input event handler\n\t$tw.utils.addEventListeners(this.domNode,[\n\t\t{name: \"focus\", handlerObject: this, handlerMethod: \"handleFocusEvent\"},\n\t\t{name: \"input\", handlerObject: this, handlerMethod: \"handleInputEvent\"}\n\t]);\n\t// Insert the element into the DOM\n\tthis.parentNode.insertBefore(this.domNode,this.nextSibling);\n\tthis.widget.domNodes.push(this.domNode);\n}\n\n/*\nSet the text of the engine if it doesn't currently have focus\n*/\nSimpleEngine.prototype.setText = function(text,type) {\n\tif(!this.domNode.isTiddlyWikiFakeDom) {\n\t\tif(this.domNode.ownerDocument.activeElement !== this.domNode) {\n\t\t\tthis.domNode.value = text;\n\t\t}\n\t\t// Fix the height if needed\n\t\tthis.fixHeight();\n\t}\n};\n\n/*\nGet the text of the engine\n*/\nSimpleEngine.prototype.getText = function() {\n\treturn this.domNode.value;\n};\n\n/*\nFix the height of textarea to fit content\n*/\nSimpleEngine.prototype.fixHeight = function() {\n\tif(this.widget.editTag === \"textarea\") {\n\t\tif(this.widget.editAutoHeight) {\n\t\t\tif(this.domNode && !this.domNode.isTiddlyWikiFakeDom) {\n\t\t\t\t$tw.utils.resizeTextAreaToFit(this.domNode,this.widget.editMinHeight);\n\t\t\t}\n\t\t} else {\n\t\t\tvar fixedHeight = parseInt(this.widget.wiki.getTiddlerText(HEIGHT_VALUE_TITLE,\"400px\"),10);\n\t\t\tfixedHeight = Math.max(fixedHeight,20);\n\t\t\tthis.domNode.style.height = fixedHeight + \"px\";\n\t\t}\n\t}\n};\n\n/*\nFocus the engine node\n*/\nSimpleEngine.prototype.focus  = function() {\n\tif(this.domNode.focus && this.domNode.select) {\n\t\tthis.domNode.focus();\n\t\tthis.domNode.select();\n\t}\n};\n\n/*\nHandle a dom \"input\" event which occurs when the text has changed\n*/\nSimpleEngine.prototype.handleInputEvent = function(event) {\n\tthis.widget.saveChanges(this.getText());\n\tthis.fixHeight();\n\treturn true;\n};\n\n/*\nHandle a dom \"focus\" event\n*/\nSimpleEngine.prototype.handleFocusEvent = function(event) {\n\tif(this.widget.editFocusPopup) {\n\t\t$tw.popup.triggerPopup({\n\t\t\tdomNode: this.domNode,\n\t\t\ttitle: this.widget.editFocusPopup,\n\t\t\twiki: this.widget.wiki,\n\t\t\tforce: true\n\t\t});\n\t}\n\treturn true;\n};\n\n/*\nCreate a blank structure representing a text operation\n*/\nSimpleEngine.prototype.createTextOperation = function() {\n\treturn null;\n};\n\n/*\nExecute a text operation\n*/\nSimpleEngine.prototype.executeTextOperation = function(operation) {\n};\n\nexports.SimpleEngine = SimpleEngine;\n\n})();\n",
            "title": "$:/core/modules/editor/engines/simple.js",
            "type": "application/javascript",
            "module-type": "library"
        },
        "$:/core/modules/editor/factory.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/factory.js\ntype: application/javascript\nmodule-type: library\n\nFactory for constructing text editor widgets with specified engines for the toolbar and non-toolbar cases\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar DEFAULT_MIN_TEXT_AREA_HEIGHT = \"100px\"; // Minimum height of textareas in pixels\n\n// Configuration tiddlers\nvar HEIGHT_MODE_TITLE = \"$:/config/TextEditor/EditorHeight/Mode\";\nvar ENABLE_TOOLBAR_TITLE = \"$:/config/TextEditor/EnableToolbar\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nfunction editTextWidgetFactory(toolbarEngine,nonToolbarEngine) {\n\n\tvar EditTextWidget = function(parseTreeNode,options) {\n\t\t// Initialise the editor operations if they've not been done already\n\t\tif(!this.editorOperations) {\n\t\t\tEditTextWidget.prototype.editorOperations = {};\n\t\t\t$tw.modules.applyMethods(\"texteditoroperation\",this.editorOperations);\n\t\t}\n\t\tthis.initialise(parseTreeNode,options);\n\t};\n\n\t/*\n\tInherit from the base widget class\n\t*/\n\tEditTextWidget.prototype = new Widget();\n\n\t/*\n\tRender this widget into the DOM\n\t*/\n\tEditTextWidget.prototype.render = function(parent,nextSibling) {\n\t\t// Save the parent dom node\n\t\tthis.parentDomNode = parent;\n\t\t// Compute our attributes\n\t\tthis.computeAttributes();\n\t\t// Execute our logic\n\t\tthis.execute();\n\t\t// Create the wrapper for the toolbar and render its content\n\t\tif(this.editShowToolbar) {\n\t\t\tthis.toolbarNode = this.document.createElement(\"div\");\n\t\t\tthis.toolbarNode.className = \"tc-editor-toolbar\";\n\t\t\tparent.insertBefore(this.toolbarNode,nextSibling);\n\t\t\tthis.renderChildren(this.toolbarNode,null);\n\t\t\tthis.domNodes.push(this.toolbarNode);\n\t\t}\n\t\t// Create our element\n\t\tvar editInfo = this.getEditInfo(),\n\t\t\tEngine = this.editShowToolbar ? toolbarEngine : nonToolbarEngine;\n\t\tthis.engine = new Engine({\n\t\t\t\twidget: this,\n\t\t\t\tvalue: editInfo.value,\n\t\t\t\ttype: editInfo.type,\n\t\t\t\tparentNode: parent,\n\t\t\t\tnextSibling: nextSibling\n\t\t\t});\n\t\t// Call the postRender hook\n\t\tif(this.postRender) {\n\t\t\tthis.postRender();\n\t\t}\n\t\t// Fix height\n\t\tthis.engine.fixHeight();\n\t\t// Focus if required\n\t\tif(this.editFocus === \"true\" || this.editFocus === \"yes\") {\n\t\t\tthis.engine.focus();\n\t\t}\n\t\t// Add widget message listeners\n\t\tthis.addEventListeners([\n\t\t\t{type: \"tm-edit-text-operation\", handler: \"handleEditTextOperationMessage\"}\n\t\t]);\n\t};\n\n\t/*\n\tGet the tiddler being edited and current value\n\t*/\n\tEditTextWidget.prototype.getEditInfo = function() {\n\t\t// Get the edit value\n\t\tvar self = this,\n\t\t\tvalue,\n\t\t\ttype = \"text/plain\",\n\t\t\tupdate;\n\t\tif(this.editIndex) {\n\t\t\tvalue = this.wiki.extractTiddlerDataItem(this.editTitle,this.editIndex,this.editDefault);\n\t\t\tupdate = function(value) {\n\t\t\t\tvar data = self.wiki.getTiddlerData(self.editTitle,{});\n\t\t\t\tif(data[self.editIndex] !== value) {\n\t\t\t\t\tdata[self.editIndex] = value;\n\t\t\t\t\tself.wiki.setTiddlerData(self.editTitle,data);\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Get the current tiddler and the field name\n\t\t\tvar tiddler = this.wiki.getTiddler(this.editTitle);\n\t\t\tif(tiddler) {\n\t\t\t\t// If we've got a tiddler, the value to display is the field string value\n\t\t\t\tvalue = tiddler.getFieldString(this.editField);\n\t\t\t\tif(this.editField === \"text\") {\n\t\t\t\t\ttype = tiddler.fields.type || \"text/vnd.tiddlywiki\";\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Otherwise, we need to construct a default value for the editor\n\t\t\t\tswitch(this.editField) {\n\t\t\t\t\tcase \"text\":\n\t\t\t\t\t\tvalue = \"Type the text for the tiddler '\" + this.editTitle + \"'\";\n\t\t\t\t\t\ttype = \"text/vnd.tiddlywiki\";\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"title\":\n\t\t\t\t\t\tvalue = this.editTitle;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvalue = \"\";\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tif(this.editDefault !== undefined) {\n\t\t\t\t\tvalue = this.editDefault;\n\t\t\t\t}\n\t\t\t}\n\t\t\tupdate = function(value) {\n\t\t\t\tvar tiddler = self.wiki.getTiddler(self.editTitle),\n\t\t\t\t\tupdateFields = {\n\t\t\t\t\t\ttitle: self.editTitle\n\t\t\t\t\t};\n\t\t\t\tupdateFields[self.editField] = value;\n\t\t\t\tself.wiki.addTiddler(new $tw.Tiddler(self.wiki.getCreationFields(),tiddler,updateFields,self.wiki.getModificationFields()));\n\t\t\t};\n\t\t}\n\t\tif(this.editType) {\n\t\t\ttype = this.editType;\n\t\t}\n\t\treturn {value: value || \"\", type: type, update: update};\n\t};\n\n\t/*\n\tHandle an edit text operation message from the toolbar\n\t*/\n\tEditTextWidget.prototype.handleEditTextOperationMessage = function(event) {\n\t\t// Prepare information about the operation\n\t\tvar operation = this.engine.createTextOperation();\n\t\t// Invoke the handler for the selected operation\n\t\tvar handler = this.editorOperations[event.param];\n\t\tif(handler) {\n\t\t\thandler.call(this,event,operation);\n\t\t}\n\t\t// Execute the operation via the engine\n\t\tvar newText = this.engine.executeTextOperation(operation);\n\t\t// Fix the tiddler height and save changes\n\t\tthis.engine.fixHeight();\n\t\tthis.saveChanges(newText);\n\t};\n\n\t/*\n\tCompute the internal state of the widget\n\t*/\n\tEditTextWidget.prototype.execute = function() {\n\t\t// Get our parameters\n\t\tthis.editTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t\tthis.editField = this.getAttribute(\"field\",\"text\");\n\t\tthis.editIndex = this.getAttribute(\"index\");\n\t\tthis.editDefault = this.getAttribute(\"default\");\n\t\tthis.editClass = this.getAttribute(\"class\");\n\t\tthis.editPlaceholder = this.getAttribute(\"placeholder\");\n\t\tthis.editSize = this.getAttribute(\"size\");\n\t\tthis.editRows = this.getAttribute(\"rows\");\n\t\tthis.editAutoHeight = this.wiki.getTiddlerText(HEIGHT_MODE_TITLE,\"auto\");\n\t\tthis.editAutoHeight = this.getAttribute(\"autoHeight\",this.editAutoHeight === \"auto\" ? \"yes\" : \"no\") === \"yes\";\n\t\tthis.editMinHeight = this.getAttribute(\"minHeight\",DEFAULT_MIN_TEXT_AREA_HEIGHT);\n\t\tthis.editFocusPopup = this.getAttribute(\"focusPopup\");\n\t\tthis.editFocus = this.getAttribute(\"focus\");\n\t\t// Get the default editor element tag and type\n\t\tvar tag,type;\n\t\tif(this.editField === \"text\") {\n\t\t\ttag = \"textarea\";\n\t\t} else {\n\t\t\ttag = \"input\";\n\t\t\tvar fieldModule = $tw.Tiddler.fieldModules[this.editField];\n\t\t\tif(fieldModule && fieldModule.editTag) {\n\t\t\t\ttag = fieldModule.editTag;\n\t\t\t}\n\t\t\tif(fieldModule && fieldModule.editType) {\n\t\t\t\ttype = fieldModule.editType;\n\t\t\t}\n\t\t\ttype = type || \"text\";\n\t\t}\n\t\t// Get the rest of our parameters\n\t\tthis.editTag = this.getAttribute(\"tag\",tag);\n\t\tthis.editType = this.getAttribute(\"type\",type);\n\t\t// Make the child widgets\n\t\tthis.makeChildWidgets();\n\t\t// Determine whether to show the toolbar\n\t\tthis.editShowToolbar = this.wiki.getTiddlerText(ENABLE_TOOLBAR_TITLE,\"yes\");\n\t\tthis.editShowToolbar = (this.editShowToolbar === \"yes\") && !!(this.children && this.children.length > 0);\n\t};\n\n\t/*\n\tSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n\t*/\n\tEditTextWidget.prototype.refresh = function(changedTiddlers) {\n\t\tvar changedAttributes = this.computeAttributes();\n\t\t// Completely rerender if any of our attributes have changed\n\t\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedAttributes[\"default\"] || changedAttributes[\"class\"] || changedAttributes.placeholder || changedAttributes.size || changedAttributes.autoHeight || changedAttributes.minHeight || changedAttributes.focusPopup ||  changedAttributes.rows || changedTiddlers[HEIGHT_MODE_TITLE] || changedTiddlers[ENABLE_TOOLBAR_TITLE]) {\n\t\t\tthis.refreshSelf();\n\t\t\treturn true;\n\t\t} else if(changedTiddlers[this.editTitle]) {\n\t\t\tvar editInfo = this.getEditInfo();\n\t\t\tthis.updateEditor(editInfo.value,editInfo.type);\n\t\t}\n\t\tthis.engine.fixHeight();\n\t\tif(this.editShowToolbar) {\n\t\t\treturn this.refreshChildren(changedTiddlers);\t\t\t\n\t\t} else {\n\t\t\treturn false;\n\t\t}\n\t};\n\n\t/*\n\tUpdate the editor with new text. This method is separate from updateEditorDomNode()\n\tso that subclasses can override updateEditor() and still use updateEditorDomNode()\n\t*/\n\tEditTextWidget.prototype.updateEditor = function(text,type) {\n\t\tthis.updateEditorDomNode(text,type);\n\t};\n\n\t/*\n\tUpdate the editor dom node with new text\n\t*/\n\tEditTextWidget.prototype.updateEditorDomNode = function(text,type) {\n\t\tthis.engine.setText(text,type);\n\t};\n\n\t/*\n\tSave changes back to the tiddler store\n\t*/\n\tEditTextWidget.prototype.saveChanges = function(text) {\n\t\tvar editInfo = this.getEditInfo();\n\t\tif(text !== editInfo.value) {\n\t\t\teditInfo.update(text);\n\t\t}\n\t};\n\n\t/*\n\tHandle a dom \"keydown\" event, which we'll bubble up to our container for the keyboard widgets benefit\n\t*/\n\tEditTextWidget.prototype.handleKeydownEvent = function(event) {\n\t\t// Check for a keyboard shortcut\n\t\tif(this.toolbarNode) {\n\t\t\tvar shortcutElements = this.toolbarNode.querySelectorAll(\"[data-tw-keyboard-shortcut]\");\n\t\t\tfor(var index=0; index<shortcutElements.length; index++) {\n\t\t\t\tvar el = shortcutElements[index],\n\t\t\t\t\tshortcutData = el.getAttribute(\"data-tw-keyboard-shortcut\"),\n\t\t\t\t\tkeyInfoArray = $tw.keyboardManager.parseKeyDescriptors(shortcutData,{\n\t\t\t\t\t\twiki: this.wiki\n\t\t\t\t\t});\n\t\t\t\tif($tw.keyboardManager.checkKeyDescriptors(event,keyInfoArray)) {\n\t\t\t\t\tvar clickEvent = this.document.createEvent(\"Events\");\n\t\t\t\t    clickEvent.initEvent(\"click\",true,false);\n\t\t\t\t    el.dispatchEvent(clickEvent);\n\t\t\t\t\tevent.preventDefault();\n\t\t\t\t\tevent.stopPropagation();\n\t\t\t\t\treturn true;\t\t\t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Propogate the event to the container\n\t\tif(this.propogateKeydownEvent(event)) {\n\t\t\t// Ignore the keydown if it was already handled\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n\t\t\treturn true;\n\t\t}\n\t\t// Otherwise, process the keydown normally\n\t\treturn false;\n\t};\n\n\t/*\n\tPropogate keydown events to our container for the keyboard widgets benefit\n\t*/\n\tEditTextWidget.prototype.propogateKeydownEvent = function(event) {\n\t\tvar newEvent = this.document.createEventObject ? this.document.createEventObject() : this.document.createEvent(\"Events\");\n\t\tif(newEvent.initEvent) {\n\t\t\tnewEvent.initEvent(\"keydown\", true, true);\n\t\t}\n\t\tnewEvent.keyCode = event.keyCode;\n\t\tnewEvent.which = event.which;\n\t\tnewEvent.metaKey = event.metaKey;\n\t\tnewEvent.ctrlKey = event.ctrlKey;\n\t\tnewEvent.altKey = event.altKey;\n\t\tnewEvent.shiftKey = event.shiftKey;\n\t\treturn !this.parentDomNode.dispatchEvent(newEvent);\n\t};\n\n\treturn EditTextWidget;\n\n}\n\nexports.editTextWidgetFactory = editTextWidgetFactory;\n\n})();\n",
            "title": "$:/core/modules/editor/factory.js",
            "type": "application/javascript",
            "module-type": "library"
        },
        "$:/core/modules/editor/operations/bitmap/clear.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/bitmap/clear.js\ntype: application/javascript\nmodule-type: bitmapeditoroperation\n\nBitmap editor operation to clear the image\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"clear\"] = function(event) {\n\tvar ctx = this.canvasDomNode.getContext(\"2d\");\n\tctx.globalAlpha = 1;\n\tctx.fillStyle = event.paramObject.colour || \"white\";\n\tctx.fillRect(0,0,this.canvasDomNode.width,this.canvasDomNode.height);\n\t// Save changes\n\tthis.strokeEnd();\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/bitmap/clear.js",
            "type": "application/javascript",
            "module-type": "bitmapeditoroperation"
        },
        "$:/core/modules/editor/operations/bitmap/resize.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/bitmap/resize.js\ntype: application/javascript\nmodule-type: bitmapeditoroperation\n\nBitmap editor operation to resize the image\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"resize\"] = function(event) {\n\t// Get the new width\n\tvar newWidth = parseInt(event.paramObject.width || this.canvasDomNode.width,10),\n\t\tnewHeight = parseInt(event.paramObject.height || this.canvasDomNode.height,10);\n\t// Update if necessary\n\tif(newWidth > 0 && newHeight > 0 && !(newWidth === this.currCanvas.width && newHeight === this.currCanvas.height)) {\n\t\tthis.changeCanvasSize(newWidth,newHeight);\n\t}\n\t// Update the input controls\n\tthis.refreshToolbar();\n\t// Save the image into the tiddler\n\tthis.saveChanges();\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/bitmap/resize.js",
            "type": "application/javascript",
            "module-type": "bitmapeditoroperation"
        },
        "$:/core/modules/editor/operations/text/excise.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/excise.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to excise the selection to a new tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"excise\"] = function(event,operation) {\n\tvar editTiddler = this.wiki.getTiddler(this.editTitle),\n\t\teditTiddlerTitle = this.editTitle;\n\tif(editTiddler && editTiddler.fields[\"draft.of\"]) {\n\t\teditTiddlerTitle = editTiddler.fields[\"draft.of\"];\n\t}\n\tvar excisionTitle = event.paramObject.title || this.wiki.generateNewTitle(\"New Excision\");\n\tthis.wiki.addTiddler(new $tw.Tiddler(\n\t\tthis.wiki.getCreationFields(),\n\t\tthis.wiki.getModificationFields(),\n\t\t{\n\t\t\ttitle: excisionTitle,\n\t\t\ttext: operation.selection,\n\t\t\ttags: event.paramObject.tagnew === \"yes\" ?  [editTiddlerTitle] : []\n\t\t}\n\t));\n\toperation.replacement = excisionTitle;\n\tswitch(event.paramObject.type || \"transclude\") {\n\t\tcase \"transclude\":\n\t\t\toperation.replacement = \"{{\" + operation.replacement+ \"}}\";\n\t\t\tbreak;\n\t\tcase \"link\":\n\t\t\toperation.replacement = \"[[\" + operation.replacement+ \"]]\";\n\t\t\tbreak;\n\t\tcase \"macro\":\n\t\t\toperation.replacement = \"<<\" + (event.paramObject.macro || \"translink\") + \" \\\"\\\"\\\"\" + operation.replacement + \"\\\"\\\"\\\">>\";\n\t\t\tbreak;\n\t}\n\toperation.cutStart = operation.selStart;\n\toperation.cutEnd = operation.selEnd;\n\toperation.newSelStart = operation.selStart;\n\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/excise.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/make-link.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/make-link.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to make a link\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"make-link\"] = function(event,operation) {\n\tif(operation.selection) {\n\t\toperation.replacement = \"[[\" + operation.selection + \"|\" + event.paramObject.text + \"]]\";\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t} else {\n\t\toperation.replacement = \"[[\" + event.paramObject.text + \"]]\";\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t}\n\toperation.newSelStart = operation.selStart + operation.replacement.length;\n\toperation.newSelEnd = operation.newSelStart;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/make-link.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/prefix-lines.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/prefix-lines.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to add a prefix to the selected lines\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"prefix-lines\"] = function(event,operation) {\n\t// Cut just past the preceding line break, or the start of the text\n\toperation.cutStart = $tw.utils.findPrecedingLineBreak(operation.text,operation.selStart);\n\t// Cut to just past the following line break, or to the end of the text\n\toperation.cutEnd = $tw.utils.findFollowingLineBreak(operation.text,operation.selEnd);\n\t// Compose the required prefix\n\tvar prefix = $tw.utils.repeat(event.paramObject.character,event.paramObject.count);\n\t// Process each line\n\tvar lines = operation.text.substring(operation.cutStart,operation.cutEnd).split(/\\r?\\n/mg);\n\t$tw.utils.each(lines,function(line,index) {\n\t\t// Remove and count any existing prefix characters\n\t\tvar count = 0;\n\t\twhile(line.charAt(0) === event.paramObject.character) {\n\t\t\tline = line.substring(1);\n\t\t\tcount++;\n\t\t}\n\t\t// Remove any whitespace\n\t\twhile(line.charAt(0) === \" \") {\n\t\t\tline = line.substring(1);\n\t\t}\n\t\t// We're done if we removed the exact required prefix, otherwise add it\n\t\tif(count !== event.paramObject.count) {\n\t\t\t// Apply the prefix\n\t\t\tline =  prefix + \" \" + line;\n\t\t}\n\t\t// Save the modified line\n\t\tlines[index] = line;\n\t});\n\t// Stitch the replacement text together and set the selection\n\toperation.replacement = lines.join(\"\\n\");\n\tif(lines.length === 1) {\n\t\toperation.newSelStart = operation.cutStart + operation.replacement.length;\n\t\toperation.newSelEnd = operation.newSelStart;\n\t} else {\n\t\toperation.newSelStart = operation.cutStart;\n\t\toperation.newSelEnd = operation.newSelStart + operation.replacement.length;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/prefix-lines.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/replace-all.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/replace-all.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to replace the entire text\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"replace-all\"] = function(event,operation) {\n\toperation.cutStart = 0;\n\toperation.cutEnd = operation.text.length;\n\toperation.replacement = event.paramObject.text;\n\toperation.newSelStart = 0;\n\toperation.newSelEnd = operation.replacement.length;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/replace-all.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/replace-selection.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/replace-selection.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to replace the selection\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"replace-selection\"] = function(event,operation) {\n\toperation.replacement = event.paramObject.text;\n\toperation.cutStart = operation.selStart;\n\toperation.cutEnd = operation.selEnd;\n\toperation.newSelStart = operation.selStart;\n\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/replace-selection.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/wrap-lines.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/wrap-lines.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to wrap the selected lines with a prefix and suffix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"wrap-lines\"] = function(event,operation) {\n\t// Cut just past the preceding line break, or the start of the text\n\toperation.cutStart = $tw.utils.findPrecedingLineBreak(operation.text,operation.selStart);\n\t// Cut to just past the following line break, or to the end of the text\n\toperation.cutEnd = $tw.utils.findFollowingLineBreak(operation.text,operation.selEnd);\n\t// Add the prefix and suffix\n\toperation.replacement = event.paramObject.prefix + \"\\n\" +\n\t\t\t\toperation.text.substring(operation.cutStart,operation.cutEnd) + \"\\n\" +\n\t\t\t\tevent.paramObject.suffix + \"\\n\";\n\toperation.newSelStart = operation.cutStart + event.paramObject.prefix.length + 1;\n\toperation.newSelEnd = operation.newSelStart + (operation.cutEnd - operation.cutStart);\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/wrap-lines.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/editor/operations/text/wrap-selection.js": {
            "text": "/*\\\ntitle: $:/core/modules/editor/operations/text/wrap-selection.js\ntype: application/javascript\nmodule-type: texteditoroperation\n\nText editor operation to wrap the selection with the specified prefix and suffix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports[\"wrap-selection\"] = function(event,operation) {\n\tif(operation.selStart === operation.selEnd) {\n\t\t// No selection; check if we're within the prefix/suffix\n\t\tif(operation.text.substring(operation.selStart - event.paramObject.prefix.length,operation.selStart + event.paramObject.suffix.length) === event.paramObject.prefix + event.paramObject.suffix) {\n\t\t\t// Remove the prefix and suffix unless they comprise the entire text\n\t\t\tif(operation.selStart > event.paramObject.prefix.length || (operation.selEnd + event.paramObject.suffix.length) < operation.text.length ) {\n\t\t\t\toperation.cutStart = operation.selStart - event.paramObject.prefix.length;\n\t\t\t\toperation.cutEnd = operation.selEnd + event.paramObject.suffix.length;\n\t\t\t\toperation.replacement = \"\";\n\t\t\t\toperation.newSelStart = operation.cutStart;\n\t\t\t\toperation.newSelEnd = operation.newSelStart;\n\t\t\t}\n\t\t} else {\n\t\t\t// Wrap the cursor instead\n\t\t\toperation.cutStart = operation.selStart;\n\t\t\toperation.cutEnd = operation.selEnd;\n\t\t\toperation.replacement = event.paramObject.prefix + event.paramObject.suffix;\n\t\t\toperation.newSelStart = operation.selStart + event.paramObject.prefix.length;\n\t\t\toperation.newSelEnd = operation.newSelStart;\n\t\t}\n\t} else if(operation.text.substring(operation.selStart,operation.selStart + event.paramObject.prefix.length) === event.paramObject.prefix && operation.text.substring(operation.selEnd - event.paramObject.suffix.length,operation.selEnd) === event.paramObject.suffix) {\n\t\t// Prefix and suffix are already present, so remove them\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t\toperation.replacement = operation.selection.substring(event.paramObject.prefix.length,operation.selection.length - event.paramObject.suffix.length);\n\t\toperation.newSelStart = operation.selStart;\n\t\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n\t} else {\n\t\t// Add the prefix and suffix\n\t\toperation.cutStart = operation.selStart;\n\t\toperation.cutEnd = operation.selEnd;\n\t\toperation.replacement = event.paramObject.prefix + operation.selection + event.paramObject.suffix;\n\t\toperation.newSelStart = operation.selStart;\n\t\toperation.newSelEnd = operation.selStart + operation.replacement.length;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/editor/operations/text/wrap-selection.js",
            "type": "application/javascript",
            "module-type": "texteditoroperation"
        },
        "$:/core/modules/filters/addprefix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/addprefix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for adding a prefix to each title in the list. This is\nespecially useful in contexts where only a filter expression is allowed\nand macro substitution isn't available.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.addprefix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(operator.operand + title);\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/addprefix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/addsuffix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/addsuffix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for adding a suffix to each title in the list. This is\nespecially useful in contexts where only a filter expression is allowed\nand macro substitution isn't available.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.addsuffix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title + operator.operand);\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/addsuffix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/after.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/after.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler from the current list that is after the tiddler named in the operand.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.after = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\tvar index = results.indexOf(operator.operand);\n\tif(index === -1 || index > (results.length - 2)) {\n\t\treturn [];\n\t} else {\n\t\treturn [results[index + 1]];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/after.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/all/current.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/current.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[current]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.current = function(source,prefix,options) {\n\tvar currTiddlerTitle = options.widget && options.widget.getVariable(\"currentTiddler\");\n\tif(currTiddlerTitle) {\n\t\treturn [currTiddlerTitle];\n\t} else {\n\t\treturn [];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/current.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/missing.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/missing.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[missing]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.missing = function(source,prefix,options) {\n\treturn options.wiki.getMissingTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/missing.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/orphans.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/orphans.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[orphans]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.orphans = function(source,prefix,options) {\n\treturn options.wiki.getOrphanTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/orphans.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/shadows.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/shadows.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[shadows]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.shadows = function(source,prefix,options) {\n\treturn options.wiki.allShadowTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/shadows.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all/tiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all/tiddlers.js\ntype: application/javascript\nmodule-type: allfilteroperator\n\nFilter function for [all[tiddlers]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tiddlers = function(source,prefix,options) {\n\treturn options.wiki.allTitles();\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all/tiddlers.js",
            "type": "application/javascript",
            "module-type": "allfilteroperator"
        },
        "$:/core/modules/filters/all.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/all.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for selecting tiddlers\n\n[all[shadows+tiddlers]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar allFilterOperators;\n\nfunction getAllFilterOperators() {\n\tif(!allFilterOperators) {\n\t\tallFilterOperators = {};\n\t\t$tw.modules.applyMethods(\"allfilteroperator\",allFilterOperators);\n\t}\n\treturn allFilterOperators;\n}\n\n/*\nExport our filter function\n*/\nexports.all = function(source,operator,options) {\n\t// Get our suboperators\n\tvar allFilterOperators = getAllFilterOperators();\n\t// Cycle through the suboperators accumulating their results\n\tvar results = [],\n\t\tsubops = operator.operand.split(\"+\");\n\t// Check for common optimisations\n\tif(subops.length === 1 && subops[0] === \"\") {\n\t\treturn source;\n\t} else if(subops.length === 1 && subops[0] === \"tiddlers\") {\n\t\treturn options.wiki.each;\n\t} else if(subops.length === 1 && subops[0] === \"shadows\") {\n\t\treturn options.wiki.eachShadow;\n\t} else if(subops.length === 2 && subops[0] === \"tiddlers\" && subops[1] === \"shadows\") {\n\t\treturn options.wiki.eachTiddlerPlusShadows;\n\t} else if(subops.length === 2 && subops[0] === \"shadows\" && subops[1] === \"tiddlers\") {\n\t\treturn options.wiki.eachShadowPlusTiddlers;\n\t}\n\t// Do it the hard way\n\tfor(var t=0; t<subops.length; t++) {\n\t\tvar subop = allFilterOperators[subops[t]];\n\t\tif(subop) {\n\t\t\t$tw.utils.pushTop(results,subop(source,operator.prefix,options));\n\t\t}\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/all.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/backlinks.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/backlinks.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning all the backlinks from a tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.backlinks = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.getTiddlerBacklinks(title));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/backlinks.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/before.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/before.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler from the current list that is before the tiddler named in the operand.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.before = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\tvar index = results.indexOf(operator.operand);\n\tif(index <= 0) {\n\t\treturn [];\n\t} else {\n\t\treturn [results[index - 1]];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/before.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/commands.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/commands.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the commands available in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.commands = function(source,operator,options) {\n\tvar results = [];\n\t$tw.utils.each($tw.commands,function(commandInfo,name) {\n\t\tresults.push(name);\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/commands.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/days.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/days.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects tiddlers with a specified date field within a specified date interval.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.days = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldName = operator.suffix || \"modified\",\n\t\tdayInterval = (parseInt(operator.operand,10)||0),\n\t\tdayIntervalSign = $tw.utils.sign(dayInterval),\n\t\ttargetTimeStamp = (new Date()).setHours(0,0,0,0) + 1000*60*60*24*dayInterval,\n\t\tisWithinDays = function(dateField) {\n\t\t\tvar sign = $tw.utils.sign(targetTimeStamp - (new Date(dateField)).setHours(0,0,0,0));\n\t\t\treturn sign === 0 || sign === dayIntervalSign;\n\t\t};\n\n\tif(operator.prefix === \"!\") {\n\t\ttargetTimeStamp = targetTimeStamp - 1000*60*60*24*dayIntervalSign;\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\t\tif(!isWithinDays($tw.utils.parseDate(tiddler.fields[fieldName]))) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\t\tif(isWithinDays($tw.utils.parseDate(tiddler.fields[fieldName]))) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/days.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/each.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/each.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects one tiddler for each unique value of the specified field.\nWith suffix \"list\", selects all tiddlers that are values in a specified list field.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.each = function(source,operator,options) {\n\tvar results =[] ,\n\t\tvalue,values = {},\n\t\tfield = operator.operand || \"title\";\n\tif(operator.suffix !== \"list-item\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler) {\n\t\t\t\tvalue = (field === \"title\") ? title : tiddler.getFieldString(field);\n\t\t\t\tif(!$tw.utils.hop(values,value)) {\n\t\t\t\t\tvalues[value] = true;\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler) {\n\t\t\t\t$tw.utils.each(\n\t\t\t\t\toptions.wiki.getTiddlerList(title,field),\n\t\t\t\t\tfunction(value) {\n\t\t\t\t\t\tif(!$tw.utils.hop(values,value)) {\n\t\t\t\t\t\t\tvalues[value] = true;\n\t\t\t\t\t\t\tresults.push(value);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/each.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/eachday.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/eachday.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects one tiddler for each unique day covered by the specified date field\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.eachday = function(source,operator,options) {\n\tvar results = [],\n\t\tvalues = [],\n\t\tfieldName = operator.operand || \"modified\";\n\t// Function to convert a date/time to a date integer\n\tvar toDate = function(value) {\n\t\tvalue = (new Date(value)).setHours(0,0,0,0);\n\t\treturn value+0;\n\t};\n\tsource(function(tiddler,title) {\n\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\tvar value = toDate($tw.utils.parseDate(tiddler.fields[fieldName]));\n\t\t\tif(values.indexOf(value) === -1) {\n\t\t\t\tvalues.push(value);\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/eachday.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/editiondescription.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/editiondescription.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the descriptions of the specified edition names\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.editiondescription = function(source,operator,options) {\n\tvar results = [],\n\t\teditionInfo = $tw.utils.getEditionInfo();\n\tif(editionInfo) {\n\t\tsource(function(tiddler,title) {\n\t\t\tif($tw.utils.hop(editionInfo,title)) {\n\t\t\t\tresults.push(editionInfo[title].description || \"\");\t\t\t\t\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/editiondescription.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/editions.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/editions.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the available editions in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.editions = function(source,operator,options) {\n\tvar results = [],\n\t\teditionInfo = $tw.utils.getEditionInfo();\n\tif(editionInfo) {\n\t\t$tw.utils.each(editionInfo,function(info,name) {\n\t\t\tresults.push(name);\n\t\t});\n\t}\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/editions.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/field.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/field.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for comparing fields for equality\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.field = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldname = (operator.suffix || operator.operator || \"title\").toLowerCase();\n\tif(operator.prefix === \"!\") {\n\t\tif(operator.regexp) {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && !operator.regexp.exec(text)) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t});\n\t\t} else {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && text !== operator.operand) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t} else {\n\t\tif(operator.regexp) {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && !!operator.regexp.exec(text)) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t} else {\n\t\t\tsource(function(tiddler,title) {\n\t\t\t\tif(tiddler) {\n\t\t\t\t\tvar text = tiddler.getFieldString(fieldname);\n\t\t\t\t\tif(text !== null && text === operator.operand) {\n\t\t\t\t\t\tresults.push(title);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/field.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/fields.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/fields.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the fields on the selected tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.fields = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(tiddler) {\n\t\t\tfor(var fieldName in tiddler.fields) {\n\t\t\t\t$tw.utils.pushTop(results,fieldName);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/fields.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/get.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/get.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for replacing tiddler titles by the value of the field specified in the operand.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.get = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(tiddler) {\n\t\t\tvar value = tiddler.getFieldString(operator.operand);\n\t\t\tif(value) {\n\t\t\t\tresults.push(value);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/get.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/getindex.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/getindex.js\ntype: application/javascript\nmodule-type: filteroperator\n\nreturns the value at a given index of datatiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.getindex = function(source,operator,options) {\n\tvar data,title,results = [];\n\tif(operator.operand){\n\t\tsource(function(tiddler,title) {\n\t\t\ttitle = tiddler ? tiddler.fields.title : title;\n\t\t\tdata = options.wiki.extractTiddlerDataItem(tiddler,operator.operand);\n\t\t\tif(data) {\n\t\t\t\tresults.push(data);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/getindex.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/has.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/has.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking if a tiddler has the specified field\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.has = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!tiddler || (tiddler && (!$tw.utils.hop(tiddler.fields,operator.operand) || tiddler.fields[operator.operand] === \"\"))) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && $tw.utils.hop(tiddler.fields,operator.operand) && !(tiddler.fields[operator.operand] === \"\" || tiddler.fields[operator.operand].length === 0)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/has.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/haschanged.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/haschanged.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returns tiddlers from the list that have a non-zero changecount.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.haschanged = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.getChangeCount(title) === 0) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.getChangeCount(title) > 0) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/haschanged.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/indexes.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/indexes.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the indexes of a data tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.indexes = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar data = options.wiki.getTiddlerDataCached(title);\n\t\tif(data) {\n\t\t\t$tw.utils.pushTop(results,Object.keys(data));\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/indexes.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/is/current.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/current.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[current]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.current = function(source,prefix,options) {\n\tvar results = [],\n\t\tcurrTiddlerTitle = options.widget && options.widget.getVariable(\"currentTiddler\");\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title !== currTiddlerTitle) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title === currTiddlerTitle) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/current.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/image.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/image.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[image]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.image = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.isImageTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.isImageTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/image.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/missing.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/missing.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[missing]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.missing = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/missing.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/orphan.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/orphan.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[orphan]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.orphan = function(source,prefix,options) {\n\tvar results = [],\n\t\torphanTitles = options.wiki.getOrphanTitles();\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(orphanTitles.indexOf(title) === -1) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(orphanTitles.indexOf(title) !== -1) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/orphan.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/shadow.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/shadow.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[shadow]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.shadow = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.isShadowTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.isShadowTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/shadow.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/system.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/system.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[system]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.system = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.isSystemTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.isSystemTiddler(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/system.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/tag.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/tag.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[tag]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tag = function(source,prefix,options) {\n\tvar results = [],\n\t\ttagMap = options.wiki.getTagMap();\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!$tw.utils.hop(tagMap,title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif($tw.utils.hop(tagMap,title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/tag.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is/tiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is/tiddler.js\ntype: application/javascript\nmodule-type: isfilteroperator\n\nFilter function for [is[tiddler]]\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tiddler = function(source,prefix,options) {\n\tvar results = [];\n\tif(prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(options.wiki.tiddlerExists(title)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is/tiddler.js",
            "type": "application/javascript",
            "module-type": "isfilteroperator"
        },
        "$:/core/modules/filters/is.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/is.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking tiddler properties\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar isFilterOperators;\n\nfunction getIsFilterOperators() {\n\tif(!isFilterOperators) {\n\t\tisFilterOperators = {};\n\t\t$tw.modules.applyMethods(\"isfilteroperator\",isFilterOperators);\n\t}\n\treturn isFilterOperators;\n}\n\n/*\nExport our filter function\n*/\nexports.is = function(source,operator,options) {\n\t// Dispatch to the correct isfilteroperator\n\tvar isFilterOperators = getIsFilterOperators();\n\tvar isFilterOperator = isFilterOperators[operator.operand];\n\tif(isFilterOperator) {\n\t\treturn isFilterOperator(source,operator.prefix,options);\n\t} else {\n\t\treturn [$tw.language.getString(\"Error/IsFilterOperator\")];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/is.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/limit.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/limit.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for chopping the results to a specified maximum number of entries\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.limit = function(source,operator,options) {\n\tvar results = [];\n\t// Convert to an array\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\t// Slice the array if necessary\n\tvar limit = Math.min(results.length,parseInt(operator.operand,10));\n\tif(operator.prefix === \"!\") {\n\t\tresults = results.slice(-limit);\n\t} else {\n\t\tresults = results.slice(0,limit);\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/limit.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/links.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/links.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning all the links from a tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.links = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.getTiddlerLinks(title));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/links.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/list.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/list.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddlers whose title is listed in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.list = function(source,operator,options) {\n\tvar results = [],\n\t\ttr = $tw.utils.parseTextReference(operator.operand),\n\t\tcurrTiddlerTitle = options.widget && options.widget.getVariable(\"currentTiddler\"),\n\t\tlist = options.wiki.getTiddlerList(tr.title || currTiddlerTitle,tr.field,tr.index);\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(list.indexOf(title) === -1) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tresults = list;\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/list.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/listed.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/listed.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all tiddlers that have the selected tiddlers in a list\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.listed = function(source,operator,options) {\n\tvar field = operator.operand || \"list\",\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.findListingsOfTiddler(title,field));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/listed.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/listops.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/listops.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operators for manipulating the current selection list\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nReverse list\n*/\nexports.reverse = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.unshift(title);\n\t});\n\treturn results;\n};\n\n/*\nFirst entry/entries in list\n*/\nexports.first = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(0,count);\n};\n\n/*\nLast entry/entries in list\n*/\nexports.last = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(-count);\n};\n\n/*\nAll but the first entry/entries of the list\n*/\nexports.rest = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(count);\n};\nexports.butfirst = exports.rest;\nexports.bf = exports.rest;\n\n/*\nAll but the last entry/entries of the list\n*/\nexports.butlast = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(0,-count);\n};\nexports.bl = exports.butlast;\n\n/*\nThe nth member of the list\n*/\nexports.nth = function(source,operator,options) {\n\tvar count = parseInt(operator.operand) || 1,\n\t\tresults = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results.slice(count - 1,count);\n};\n\n})();\n",
            "title": "$:/core/modules/filters/listops.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/modules.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/modules.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the titles of the modules of a given type in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.modules = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.each($tw.modules.types[title],function(moduleInfo,moduleName) {\n\t\t\tresults.push(moduleName);\n\t\t});\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/modules.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/moduletypes.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/moduletypes.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the module types in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.moduletypes = function(source,operator,options) {\n\tvar results = [];\n\t$tw.utils.each($tw.modules.types,function(moduleInfo,type) {\n\t\tresults.push(type);\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/moduletypes.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/next.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/next.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler whose title occurs next in the list supplied in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.next = function(source,operator,options) {\n\tvar results = [],\n\t\tlist = options.wiki.getTiddlerList(operator.operand);\n\tsource(function(tiddler,title) {\n\t\tvar match = list.indexOf(title);\n\t\t// increment match and then test if result is in range\n\t\tmatch++;\n\t\tif(match > 0 && match < list.length) {\n\t\t\tresults.push(list[match]);\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/next.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/plugintiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/plugintiddlers.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the titles of the shadow tiddlers within a plugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.plugintiddlers = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar pluginInfo = options.wiki.getPluginInfo(title) || options.wiki.getTiddlerDataCached(title,{tiddlers:[]});\n\t\tif(pluginInfo && pluginInfo.tiddlers) {\n\t\t\t$tw.utils.each(pluginInfo.tiddlers,function(fields,title) {\n\t\t\t\tresults.push(title);\n\t\t\t});\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/plugintiddlers.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/prefix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/prefix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking if a title starts with a prefix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.prefix = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(0,operator.operand.length) !== operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(0,operator.operand.length) === operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/prefix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/previous.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/previous.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning the tiddler whose title occurs immediately prior in the list supplied in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.previous = function(source,operator,options) {\n\tvar results = [],\n\t\tlist = options.wiki.getTiddlerList(operator.operand);\n\tsource(function(tiddler,title) {\n\t\tvar match = list.indexOf(title);\n\t\t// increment match and then test if result is in range\n\t\tmatch--;\n\t\tif(match >= 0) {\n\t\t\tresults.push(list[match]);\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/previous.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/regexp.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/regexp.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for regexp matching\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.regexp = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldname = (operator.suffix || \"title\").toLowerCase(),\n\t\tregexpString, regexp, flags = \"\", match,\n\t\tgetFieldString = function(tiddler,title) {\n\t\t\tif(tiddler) {\n\t\t\t\treturn tiddler.getFieldString(fieldname);\n\t\t\t} else if(fieldname === \"title\") {\n\t\t\t\treturn title;\n\t\t\t} else {\n\t\t\t\treturn null;\n\t\t\t}\n\t\t};\n\t// Process flags and construct regexp\n\tregexpString = operator.operand;\n\tmatch = /^\\(\\?([gim]+)\\)/.exec(regexpString);\n\tif(match) {\n\t\tflags = match[1];\n\t\tregexpString = regexpString.substr(match[0].length);\n\t} else {\n\t\tmatch = /\\(\\?([gim]+)\\)$/.exec(regexpString);\n\t\tif(match) {\n\t\t\tflags = match[1];\n\t\t\tregexpString = regexpString.substr(0,regexpString.length - match[0].length);\n\t\t}\n\t}\n\ttry {\n\t\tregexp = new RegExp(regexpString,flags);\n\t} catch(e) {\n\t\treturn [\"\" + e];\n\t}\n\t// Process the incoming tiddlers\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tvar text = getFieldString(tiddler,title);\n\t\t\tif(text !== null) {\n\t\t\t\tif(!regexp.exec(text)) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tvar text = getFieldString(tiddler,title);\n\t\t\tif(text !== null) {\n\t\t\t\tif(!!regexp.exec(text)) {\n\t\t\t\t\tresults.push(title);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/regexp.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/removeprefix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/removeprefix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for removing a prefix from each title in the list. Titles that do not start with the prefix are removed.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.removeprefix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(title.substr(0,operator.operand.length) === operator.operand) {\n\t\t\tresults.push(title.substr(operator.operand.length));\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/removeprefix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/removesuffix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/removesuffix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for removing a suffix from each title in the list. Titles that do not end with the suffix are removed.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.removesuffix = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tif(title.substr(-operator.operand.length) === operator.operand) {\n\t\t\tresults.push(title.substr(0,title.length - operator.operand.length));\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/removesuffix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/sameday.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/sameday.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that selects tiddlers with a modified date field on the same day as the provided value.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.sameday = function(source,operator,options) {\n\tvar results = [],\n\t\tfieldName = operator.suffix || \"modified\",\n\t\ttargetDate = (new Date($tw.utils.parseDate(operator.operand))).setHours(0,0,0,0);\n\t// Function to convert a date/time to a date integer\n\tvar isSameDay = function(dateField) {\n\t\t\treturn (new Date(dateField)).setHours(0,0,0,0) === targetDate;\n\t\t};\n\tsource(function(tiddler,title) {\n\t\tif(tiddler && tiddler.fields[fieldName]) {\n\t\t\tif(isSameDay($tw.utils.parseDate(tiddler.fields[fieldName]))) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/sameday.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/search.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/search.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for searching for the text in the operand tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.search = function(source,operator,options) {\n\tvar invert = operator.prefix === \"!\";\n\tif(operator.suffix) {\n\t\treturn options.wiki.search(operator.operand,{\n\t\t\tsource: source,\n\t\t\tinvert: invert,\n\t\t\tfield: operator.suffix\n\t\t});\n\t} else {\n\t\treturn options.wiki.search(operator.operand,{\n\t\t\tsource: source,\n\t\t\tinvert: invert\n\t\t});\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/filters/search.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/shadowsource.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/shadowsource.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the source plugins for shadow tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.shadowsource = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar source = options.wiki.getShadowSource(title);\n\t\tif(source) {\n\t\t\t$tw.utils.pushTop(results,source);\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/shadowsource.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/sort.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/sort.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for sorting\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.sort = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",false,false);\n\treturn results;\n};\n\nexports.nsort = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",false,true);\n\treturn results;\n};\n\nexports.sortcs = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",true,false);\n\treturn results;\n};\n\nexports.nsortcs = function(source,operator,options) {\n\tvar results = prepare_results(source);\n\toptions.wiki.sortTiddlers(results,operator.operand || \"title\",operator.prefix === \"!\",true,true);\n\treturn results;\n};\n\nvar prepare_results = function (source) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tresults.push(title);\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/sort.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/splitbefore.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/splitbefore.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator that splits each result on the first occurance of the specified separator and returns the unique values.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.splitbefore = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\tvar parts = title.split(operator.operand);\n\t\tif(parts.length === 1) {\n\t\t\t$tw.utils.pushTop(results,parts[0]);\n\t\t} else {\n\t\t\t$tw.utils.pushTop(results,parts[0] + operator.operand);\n\t\t}\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/splitbefore.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/storyviews.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/storyviews.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the story views in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.storyviews = function(source,operator,options) {\n\tvar results = [],\n\t\tstoryviews = {};\n\t$tw.modules.applyMethods(\"storyview\",storyviews);\n\t$tw.utils.each(storyviews,function(info,name) {\n\t\tresults.push(name);\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/storyviews.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/suffix.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/suffix.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking if a title ends with a suffix\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.suffix = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(-operator.operand.length) !== operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(title.substr(-operator.operand.length) === operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/suffix.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/tag.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/tag.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for checking for the presence of a tag\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tag = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && !tiddler.hasTag(operator.operand)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.hasTag(operator.operand)) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t\tresults = options.wiki.sortByList(results,operator.operand);\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/tag.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/tagging.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/tagging.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all tiddlers that are tagged with the selected tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tagging = function(source,operator,options) {\n\tvar results = [];\n\tsource(function(tiddler,title) {\n\t\t$tw.utils.pushTop(results,options.wiki.getTiddlersWithTag(title));\n\t});\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/tagging.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/tags.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/tags.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all the tags of the selected tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.tags = function(source,operator,options) {\n\tvar tags = {};\n\tsource(function(tiddler,title) {\n\t\tvar t, length;\n\t\tif(tiddler && tiddler.fields.tags) {\n\t\t\tfor(t=0, length=tiddler.fields.tags.length; t<length; t++) {\n\t\t\t\ttags[tiddler.fields.tags[t]] = true;\n\t\t\t}\n\t\t}\n\t});\n\treturn Object.keys(tags);\n};\n\n})();\n",
            "title": "$:/core/modules/filters/tags.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/title.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/title.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for comparing title fields for equality\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.title = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && tiddler.fields.title !== operator.operand) {\n\t\t\t\tresults.push(title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tresults.push(operator.operand);\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/title.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/untagged.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/untagged.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator returning all the selected tiddlers that are untagged\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.untagged = function(source,operator,options) {\n\tvar results = [];\n\tif(operator.prefix === \"!\") {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(tiddler && $tw.utils.isArray(tiddler.fields.tags) && tiddler.fields.tags.length > 0) {\n\t\t\t\t$tw.utils.pushTop(results,title);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tsource(function(tiddler,title) {\n\t\t\tif(!tiddler || !tiddler.hasField(\"tags\") || ($tw.utils.isArray(tiddler.fields.tags) && tiddler.fields.tags.length === 0)) {\n\t\t\t\t$tw.utils.pushTop(results,title);\n\t\t\t}\n\t\t});\n\t}\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/untagged.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/wikiparserrules.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/wikiparserrules.js\ntype: application/javascript\nmodule-type: filteroperator\n\nFilter operator for returning the names of the wiki parser rules in this wiki\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nExport our filter function\n*/\nexports.wikiparserrules = function(source,operator,options) {\n\tvar results = [];\n\t$tw.utils.each($tw.modules.types.wikirule,function(mod) {\n\t\tvar exp = mod.exports;\n\t\tif(exp.types[operator.operand]) {\n\t\t\tresults.push(exp.name);\n\t\t}\n\t});\n\tresults.sort();\n\treturn results;\n};\n\n})();\n",
            "title": "$:/core/modules/filters/wikiparserrules.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters/x-listops.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters/x-listops.js\ntype: application/javascript\nmodule-type: filteroperator\n\nExtended filter operators to manipulate the current list.\n\n\\*/\n(function () {\n\n    /*jslint node: true, browser: true */\n    /*global $tw: false */\n    \"use strict\";\n\n    /*\n    Fetch titles from the current list\n    */\n    var prepare_results = function (source) {\n    var results = [];\n        source(function (tiddler, title) {\n            results.push(title);\n        });\n        return results;\n    };\n\n    /*\n    Moves a number of items from the tail of the current list before the item named in the operand\n    */\n    exports.putbefore = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1;\n        return (index === -1) ?\n            results.slice(0, -1) :\n            results.slice(0, index).concat(results.slice(-count)).concat(results.slice(index, -count));\n    };\n\n    /*\n    Moves a number of items from the tail of the current list after the item named in the operand\n    */\n    exports.putafter = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1;\n        return (index === -1) ?\n            results.slice(0, -1) :\n            results.slice(0, index + 1).concat(results.slice(-count)).concat(results.slice(index + 1, -count));\n    };\n\n    /*\n    Replaces the item named in the operand with a number of items from the tail of the current list\n    */\n    exports.replace = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1;\n        return (index === -1) ?\n            results.slice(0, -count) :\n            results.slice(0, index).concat(results.slice(-count)).concat(results.slice(index + 1, -count));\n    };\n\n    /*\n    Moves a number of items from the tail of the current list to the head of the list\n    */\n    exports.putfirst = function (source, operator) {\n        var results = prepare_results(source),\n            count = parseInt(operator.suffix) || 1;\n        return results.slice(-count).concat(results.slice(0, -count));\n    };\n\n    /*\n    Moves a number of items from the head of the current list to the tail of the list\n    */\n    exports.putlast = function (source, operator) {\n        var results = prepare_results(source),\n            count = parseInt(operator.suffix) || 1;\n        return results.slice(count).concat(results.slice(0, count));\n    };\n\n    /*\n    Moves the item named in the operand a number of places forward or backward in the list\n    */\n    exports.move = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand),\n            count = parseInt(operator.suffix) || 1,\n            marker = results.splice(index, 1);\n        return results.slice(0, index + count).concat(marker).concat(results.slice(index + count));\n    };\n\n    /*\n    Returns the items from the current list that are after the item named in the operand\n    */\n    exports.allafter = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand);\n        return (index === -1 || index > (results.length - 2)) ? [] :\n            (operator.suffix) ? results.slice(index) :\n            results.slice(index + 1);\n    };\n\n    /*\n    Returns the items from the current list that are before the item named in the operand\n    */\n    exports.allbefore = function (source, operator) {\n        var results = prepare_results(source),\n            index = results.indexOf(operator.operand);\n        return (index <= 0) ? [] :\n            (operator.suffix) ? results.slice(0, index + 1) :\n            results.slice(0, index);\n    };\n\n    /*\n    Appends the items listed in the operand array to the tail of the current list\n    */\n    exports.append = function (source, operator) {\n        var append = $tw.utils.parseStringArray(operator.operand, \"true\"),\n            results = prepare_results(source),\n            count = parseInt(operator.suffix) || append.length;\n        return (append.length === 0) ? results :\n            (operator.prefix) ? results.concat(append.slice(-count)) :\n            results.concat(append.slice(0, count));\n    };\n\n    /*\n    Prepends the items listed in the operand array to the head of the current list\n    */\n    exports.prepend = function (source, operator) {\n        var prepend = $tw.utils.parseStringArray(operator.operand, \"true\"),\n            results = prepare_results(source),\n            count = parseInt(operator.suffix) || prepend.length;\n        return (prepend.length === 0) ? results :\n            (operator.prefix) ? prepend.slice(-count).concat(results) :\n            prepend.slice(0, count).concat(results);\n    };\n\n    /*\n    Returns all items from the current list except the items listed in the operand array\n    */\n    exports.remove = function (source, operator) {\n        var array = $tw.utils.parseStringArray(operator.operand, \"true\"),\n            results = prepare_results(source),\n            count = parseInt(operator.suffix) || array.length,\n            p,\n            len,\n            index;\n        len = array.length - 1;\n        for (p = 0; p < count; ++p) {\n            if (operator.prefix) {\n                index = results.indexOf(array[len - p]);\n            } else {\n                index = results.indexOf(array[p]);\n            }\n            if (index !== -1) {\n                results.splice(index, 1);\n            }\n        }\n        return results;\n    };\n\n    /*\n    Returns all items from the current list sorted in the order of the items in the operand array\n    */\n    exports.sortby = function (source, operator) {\n        var results = prepare_results(source);\n        if (!results || results.length < 2) {\n            return results;\n        }\n        var lookup = $tw.utils.parseStringArray(operator.operand, \"true\");\n        results.sort(function (a, b) {\n            return lookup.indexOf(a) - lookup.indexOf(b);\n        });\n        return results;\n    };\n\n    /*\n    Removes all duplicate items from the current list\n    */\n    exports.unique = function (source, operator) {\n        var results = prepare_results(source);\n        var set = results.reduce(function (a, b) {\n            if (a.indexOf(b) < 0) {\n                a.push(b);\n            }\n            return a;\n        }, []);\n        return set;\n    };\n})();\n",
            "title": "$:/core/modules/filters/x-listops.js",
            "type": "application/javascript",
            "module-type": "filteroperator"
        },
        "$:/core/modules/filters.js": {
            "text": "/*\\\ntitle: $:/core/modules/filters.js\ntype: application/javascript\nmodule-type: wikimethod\n\nAdds tiddler filtering methods to the $tw.Wiki object.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nParses an operation (i.e. a run) within a filter string\n\toperators: Array of array of operator nodes into which results should be inserted\n\tfilterString: filter string\n\tp: start position within the string\nReturns the new start position, after the parsed operation\n*/\nfunction parseFilterOperation(operators,filterString,p) {\n\tvar operator, operand, bracketPos, curlyBracketPos;\n\t// Skip the starting square bracket\n\tif(filterString.charAt(p++) !== \"[\") {\n\t\tthrow \"Missing [ in filter expression\";\n\t}\n\t// Process each operator in turn\n\tdo {\n\t\toperator = {};\n\t\t// Check for an operator prefix\n\t\tif(filterString.charAt(p) === \"!\") {\n\t\t\toperator.prefix = filterString.charAt(p++);\n\t\t}\n\t\t// Get the operator name\n\t\tvar nextBracketPos = filterString.substring(p).search(/[\\[\\{<\\/]/);\n\t\tif(nextBracketPos === -1) {\n\t\t\tthrow \"Missing [ in filter expression\";\n\t\t}\n\t\tnextBracketPos += p;\n\t\tvar bracket = filterString.charAt(nextBracketPos);\n\t\toperator.operator = filterString.substring(p,nextBracketPos);\n\t\t\n\t\t// Any suffix?\n\t\tvar colon = operator.operator.indexOf(':');\n\t\tif(colon > -1) {\n\t\t\toperator.suffix = operator.operator.substring(colon + 1);\n\t\t\toperator.operator = operator.operator.substring(0,colon) || \"field\";\n\t\t}\n\t\t// Empty operator means: title\n\t\telse if(operator.operator === \"\") {\n\t\t\toperator.operator = \"title\";\n\t\t}\n\n\t\tp = nextBracketPos + 1;\n\t\tswitch (bracket) {\n\t\t\tcase \"{\": // Curly brackets\n\t\t\t\toperator.indirect = true;\n\t\t\t\tnextBracketPos = filterString.indexOf(\"}\",p);\n\t\t\t\tbreak;\n\t\t\tcase \"[\": // Square brackets\n\t\t\t\tnextBracketPos = filterString.indexOf(\"]\",p);\n\t\t\t\tbreak;\n\t\t\tcase \"<\": // Angle brackets\n\t\t\t\toperator.variable = true;\n\t\t\t\tnextBracketPos = filterString.indexOf(\">\",p);\n\t\t\t\tbreak;\n\t\t\tcase \"/\": // regexp brackets\n\t\t\t\tvar rex = /^((?:[^\\\\\\/]*|\\\\.)*)\\/(?:\\(([mygi]+)\\))?/g,\n\t\t\t\t\trexMatch = rex.exec(filterString.substring(p));\n\t\t\t\tif(rexMatch) {\n\t\t\t\t\toperator.regexp = new RegExp(rexMatch[1], rexMatch[2]);\n// DEPRECATION WARNING\nconsole.log(\"WARNING: Filter\",operator.operator,\"has a deprecated regexp operand\",operator.regexp);\n\t\t\t\t\tnextBracketPos = p + rex.lastIndex - 1;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthrow \"Unterminated regular expression in filter expression\";\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t}\n\t\t\n\t\tif(nextBracketPos === -1) {\n\t\t\tthrow \"Missing closing bracket in filter expression\";\n\t\t}\n\t\tif(!operator.regexp) {\n\t\t\toperator.operand = filterString.substring(p,nextBracketPos);\n\t\t}\n\t\tp = nextBracketPos + 1;\n\t\t\t\n\t\t// Push this operator\n\t\toperators.push(operator);\n\t} while(filterString.charAt(p) !== \"]\");\n\t// Skip the ending square bracket\n\tif(filterString.charAt(p++) !== \"]\") {\n\t\tthrow \"Missing ] in filter expression\";\n\t}\n\t// Return the parsing position\n\treturn p;\n}\n\n/*\nParse a filter string\n*/\nexports.parseFilter = function(filterString) {\n\tfilterString = filterString || \"\";\n\tvar results = [], // Array of arrays of operator nodes {operator:,operand:}\n\t\tp = 0, // Current position in the filter string\n\t\tmatch;\n\tvar whitespaceRegExp = /(\\s+)/mg,\n\t\toperandRegExp = /((?:\\+|\\-)?)(?:(\\[)|(?:\"([^\"]*)\")|(?:'([^']*)')|([^\\s\\[\\]]+))/mg;\n\twhile(p < filterString.length) {\n\t\t// Skip any whitespace\n\t\twhitespaceRegExp.lastIndex = p;\n\t\tmatch = whitespaceRegExp.exec(filterString);\n\t\tif(match && match.index === p) {\n\t\t\tp = p + match[0].length;\n\t\t}\n\t\t// Match the start of the operation\n\t\tif(p < filterString.length) {\n\t\t\toperandRegExp.lastIndex = p;\n\t\t\tmatch = operandRegExp.exec(filterString);\n\t\t\tif(!match || match.index !== p) {\n\t\t\t\tthrow $tw.language.getString(\"Error/FilterSyntax\");\n\t\t\t}\n\t\t\tvar operation = {\n\t\t\t\tprefix: \"\",\n\t\t\t\toperators: []\n\t\t\t};\n\t\t\tif(match[1]) {\n\t\t\t\toperation.prefix = match[1];\n\t\t\t\tp++;\n\t\t\t}\n\t\t\tif(match[2]) { // Opening square bracket\n\t\t\t\tp = parseFilterOperation(operation.operators,filterString,p);\n\t\t\t} else {\n\t\t\t\tp = match.index + match[0].length;\n\t\t\t}\n\t\t\tif(match[3] || match[4] || match[5]) { // Double quoted string, single quoted string or unquoted title\n\t\t\t\toperation.operators.push(\n\t\t\t\t\t{operator: \"title\", operand: match[3] || match[4] || match[5]}\n\t\t\t\t);\n\t\t\t}\n\t\t\tresults.push(operation);\n\t\t}\n\t}\n\treturn results;\n};\n\nexports.getFilterOperators = function() {\n\tif(!this.filterOperators) {\n\t\t$tw.Wiki.prototype.filterOperators = {};\n\t\t$tw.modules.applyMethods(\"filteroperator\",this.filterOperators);\n\t}\n\treturn this.filterOperators;\n};\n\nexports.filterTiddlers = function(filterString,widget,source) {\n\tvar fn = this.compileFilter(filterString);\n\treturn fn.call(this,source,widget);\n};\n\n/*\nCompile a filter into a function with the signature fn(source,widget) where:\nsource: an iterator function for the source tiddlers, called source(iterator), where iterator is called as iterator(tiddler,title)\nwidget: an optional widget node for retrieving the current tiddler etc.\n*/\nexports.compileFilter = function(filterString) {\n\tvar filterParseTree;\n\ttry {\n\t\tfilterParseTree = this.parseFilter(filterString);\n\t} catch(e) {\n\t\treturn function(source,widget) {\n\t\t\treturn [$tw.language.getString(\"Error/Filter\") + \": \" + e];\n\t\t};\n\t}\n\t// Get the hashmap of filter operator functions\n\tvar filterOperators = this.getFilterOperators();\n\t// Assemble array of functions, one for each operation\n\tvar operationFunctions = [];\n\t// Step through the operations\n\tvar self = this;\n\t$tw.utils.each(filterParseTree,function(operation) {\n\t\t// Create a function for the chain of operators in the operation\n\t\tvar operationSubFunction = function(source,widget) {\n\t\t\tvar accumulator = source,\n\t\t\t\tresults = [],\n\t\t\t\tcurrTiddlerTitle = widget && widget.getVariable(\"currentTiddler\");\n\t\t\t$tw.utils.each(operation.operators,function(operator) {\n\t\t\t\tvar operand = operator.operand,\n\t\t\t\t\toperatorFunction;\n\t\t\t\tif(!operator.operator) {\n\t\t\t\t\toperatorFunction = filterOperators.title;\n\t\t\t\t} else if(!filterOperators[operator.operator]) {\n\t\t\t\t\toperatorFunction = filterOperators.field;\n\t\t\t\t} else {\n\t\t\t\t\toperatorFunction = filterOperators[operator.operator];\n\t\t\t\t}\n\t\t\t\tif(operator.indirect) {\n\t\t\t\t\toperand = self.getTextReference(operator.operand,\"\",currTiddlerTitle);\n\t\t\t\t}\n\t\t\t\tif(operator.variable) {\n\t\t\t\t\toperand = widget.getVariable(operator.operand,{defaultValue: \"\"});\n\t\t\t\t}\n\t\t\t\t// Invoke the appropriate filteroperator module\n\t\t\t\tresults = operatorFunction(accumulator,{\n\t\t\t\t\t\t\toperator: operator.operator,\n\t\t\t\t\t\t\toperand: operand,\n\t\t\t\t\t\t\tprefix: operator.prefix,\n\t\t\t\t\t\t\tsuffix: operator.suffix,\n\t\t\t\t\t\t\tregexp: operator.regexp\n\t\t\t\t\t\t},{\n\t\t\t\t\t\t\twiki: self,\n\t\t\t\t\t\t\twidget: widget\n\t\t\t\t\t\t});\n\t\t\t\tif($tw.utils.isArray(results)) {\n\t\t\t\t\taccumulator = self.makeTiddlerIterator(results);\n\t\t\t\t} else {\n\t\t\t\t\taccumulator = results;\n\t\t\t\t}\n\t\t\t});\n\t\t\tif($tw.utils.isArray(results)) {\n\t\t\t\treturn results;\n\t\t\t} else {\n\t\t\t\tvar resultArray = [];\n\t\t\t\tresults(function(tiddler,title) {\n\t\t\t\t\tresultArray.push(title);\n\t\t\t\t});\n\t\t\t\treturn resultArray;\n\t\t\t}\n\t\t};\n\t\t// Wrap the operator functions in a wrapper function that depends on the prefix\n\t\toperationFunctions.push((function() {\n\t\t\tswitch(operation.prefix || \"\") {\n\t\t\t\tcase \"\": // No prefix means that the operation is unioned into the result\n\t\t\t\t\treturn function(results,source,widget) {\n\t\t\t\t\t\t$tw.utils.pushTop(results,operationSubFunction(source,widget));\n\t\t\t\t\t};\n\t\t\t\tcase \"-\": // The results of this operation are removed from the main result\n\t\t\t\t\treturn function(results,source,widget) {\n\t\t\t\t\t\t$tw.utils.removeArrayEntries(results,operationSubFunction(source,widget));\n\t\t\t\t\t};\n\t\t\t\tcase \"+\": // This operation is applied to the main results so far\n\t\t\t\t\treturn function(results,source,widget) {\n\t\t\t\t\t\t// This replaces all the elements of the array, but keeps the actual array so that references to it are preserved\n\t\t\t\t\t\tsource = self.makeTiddlerIterator(results);\n\t\t\t\t\t\tresults.splice(0,results.length);\n\t\t\t\t\t\t$tw.utils.pushTop(results,operationSubFunction(source,widget));\n\t\t\t\t\t};\n\t\t\t}\n\t\t})());\n\t});\n\t// Return a function that applies the operations to a source iterator of tiddler titles\n\treturn $tw.perf.measure(\"filter\",function filterFunction(source,widget) {\n\t\tif(!source) {\n\t\t\tsource = self.each;\n\t\t} else if(typeof source === \"object\") { // Array or hashmap\n\t\t\tsource = self.makeTiddlerIterator(source);\n\t\t}\n\t\tvar results = [];\n\t\t$tw.utils.each(operationFunctions,function(operationFunction) {\n\t\t\toperationFunction(results,source,widget);\n\t\t});\n\t\treturn results;\n\t});\n};\n\n})();\n",
            "title": "$:/core/modules/filters.js",
            "type": "application/javascript",
            "module-type": "wikimethod"
        },
        "$:/core/modules/info/platform.js": {
            "text": "/*\\\ntitle: $:/core/modules/info/platform.js\ntype: application/javascript\nmodule-type: info\n\nInitialise basic platform $:/info/ tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.getInfoTiddlerFields = function() {\n\tvar mapBoolean = function(value) {return value ? \"yes\" : \"no\";},\n\t\tinfoTiddlerFields = [];\n\t// Basics\n\tinfoTiddlerFields.push({title: \"$:/info/browser\", text: mapBoolean(!!$tw.browser)});\n\tinfoTiddlerFields.push({title: \"$:/info/node\", text: mapBoolean(!!$tw.node)});\n\treturn infoTiddlerFields;\n};\n\n})();\n",
            "title": "$:/core/modules/info/platform.js",
            "type": "application/javascript",
            "module-type": "info"
        },
        "$:/core/modules/keyboard.js": {
            "text": "/*\\\ntitle: $:/core/modules/keyboard.js\ntype: application/javascript\nmodule-type: global\n\nKeyboard handling utilities\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar namedKeys = {\n\t\"cancel\": 3,\n\t\"help\": 6,\n\t\"backspace\": 8,\n\t\"tab\": 9,\n\t\"clear\": 12,\n\t\"return\": 13,\n\t\"enter\": 13,\n\t\"pause\": 19,\n\t\"escape\": 27,\n\t\"space\": 32,\n\t\"page_up\": 33,\n\t\"page_down\": 34,\n\t\"end\": 35,\n\t\"home\": 36,\n\t\"left\": 37,\n\t\"up\": 38,\n\t\"right\": 39,\n\t\"down\": 40,\n\t\"printscreen\": 44,\n\t\"insert\": 45,\n\t\"delete\": 46,\n\t\"0\": 48,\n\t\"1\": 49,\n\t\"2\": 50,\n\t\"3\": 51,\n\t\"4\": 52,\n\t\"5\": 53,\n\t\"6\": 54,\n\t\"7\": 55,\n\t\"8\": 56,\n\t\"9\": 57,\n\t\"firefoxsemicolon\": 59,\n\t\"firefoxequals\": 61,\n\t\"a\": 65,\n\t\"b\": 66,\n\t\"c\": 67,\n\t\"d\": 68,\n\t\"e\": 69,\n\t\"f\": 70,\n\t\"g\": 71,\n\t\"h\": 72,\n\t\"i\": 73,\n\t\"j\": 74,\n\t\"k\": 75,\n\t\"l\": 76,\n\t\"m\": 77,\n\t\"n\": 78,\n\t\"o\": 79,\n\t\"p\": 80,\n\t\"q\": 81,\n\t\"r\": 82,\n\t\"s\": 83,\n\t\"t\": 84,\n\t\"u\": 85,\n\t\"v\": 86,\n\t\"w\": 87,\n\t\"x\": 88,\n\t\"y\": 89,\n\t\"z\": 90,\n\t\"numpad0\": 96,\n\t\"numpad1\": 97,\n\t\"numpad2\": 98,\n\t\"numpad3\": 99,\n\t\"numpad4\": 100,\n\t\"numpad5\": 101,\n\t\"numpad6\": 102,\n\t\"numpad7\": 103,\n\t\"numpad8\": 104,\n\t\"numpad9\": 105,\n\t\"multiply\": 106,\n\t\"add\": 107,\n\t\"separator\": 108,\n\t\"subtract\": 109,\n\t\"decimal\": 110,\n\t\"divide\": 111,\n\t\"f1\": 112,\n\t\"f2\": 113,\n\t\"f3\": 114,\n\t\"f4\": 115,\n\t\"f5\": 116,\n\t\"f6\": 117,\n\t\"f7\": 118,\n\t\"f8\": 119,\n\t\"f9\": 120,\n\t\"f10\": 121,\n\t\"f11\": 122,\n\t\"f12\": 123,\n\t\"f13\": 124,\n\t\"f14\": 125,\n\t\"f15\": 126,\n\t\"f16\": 127,\n\t\"f17\": 128,\n\t\"f18\": 129,\n\t\"f19\": 130,\n\t\"f20\": 131,\n\t\"f21\": 132,\n\t\"f22\": 133,\n\t\"f23\": 134,\n\t\"f24\": 135,\n\t\"firefoxminus\": 173,\n\t\"semicolon\": 186,\n\t\"equals\": 187,\n\t\"comma\": 188,\n\t\"dash\": 189,\n\t\"period\": 190,\n\t\"slash\": 191,\n\t\"backquote\": 192,\n\t\"openbracket\": 219,\n\t\"backslash\": 220,\n\t\"closebracket\": 221,\n\t\"quote\": 222\n};\n\nfunction KeyboardManager(options) {\n\tvar self = this;\n\toptions = options || \"\";\n\t// Save the named key hashmap\n\tthis.namedKeys = namedKeys;\n\t// Create a reverse mapping of code to keyname\n\tthis.keyNames = [];\n\t$tw.utils.each(namedKeys,function(keyCode,name) {\n\t\tself.keyNames[keyCode] = name.substr(0,1).toUpperCase() + name.substr(1);\n\t});\n\t// Save the platform-specific name of the \"meta\" key\n\tthis.metaKeyName = $tw.platform.isMac ? \"cmd-\" : \"win-\";\n}\n\n/*\nReturn an array of keycodes for the modifier keys ctrl, shift, alt, meta\n*/\nKeyboardManager.prototype.getModifierKeys = function() {\n\treturn [\n\t\t16, // Shift\n\t\t17, // Ctrl\n\t\t18, // Alt\n\t\t20, // CAPS LOCK\n\t\t91, // Meta (left)\n\t\t93, // Meta (right)\n\t\t224 // Meta (Firefox)\n\t]\n};\n\n/*\nParses a key descriptor into the structure:\n{\n\tkeyCode: numeric keycode\n\tshiftKey: boolean\n\taltKey: boolean\n\tctrlKey: boolean\n\tmetaKey: boolean\n}\nKey descriptors have the following format:\n\tctrl+enter\n\tctrl+shift+alt+A\n*/\nKeyboardManager.prototype.parseKeyDescriptor = function(keyDescriptor) {\n\tvar components = keyDescriptor.split(/\\+|\\-/),\n\t\tinfo = {\n\t\t\tkeyCode: 0,\n\t\t\tshiftKey: false,\n\t\t\taltKey: false,\n\t\t\tctrlKey: false,\n\t\t\tmetaKey: false\n\t\t};\n\tfor(var t=0; t<components.length; t++) {\n\t\tvar s = components[t].toLowerCase(),\n\t\t\tc = s.charCodeAt(0);\n\t\t// Look for modifier keys\n\t\tif(s === \"ctrl\") {\n\t\t\tinfo.ctrlKey = true;\n\t\t} else if(s === \"shift\") {\n\t\t\tinfo.shiftKey = true;\n\t\t} else if(s === \"alt\") {\n\t\t\tinfo.altKey = true;\n\t\t} else if(s === \"meta\" || s === \"cmd\" || s === \"win\") {\n\t\t\tinfo.metaKey = true;\n\t\t}\n\t\t// Replace named keys with their code\n\t\tif(this.namedKeys[s]) {\n\t\t\tinfo.keyCode = this.namedKeys[s];\n\t\t}\n\t}\n\tif(info.keyCode) {\n\t\treturn info;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nParse a list of key descriptors into an array of keyInfo objects. The key descriptors can be passed as an array of strings or a space separated string\n*/\nKeyboardManager.prototype.parseKeyDescriptors = function(keyDescriptors,options) {\n\tvar self = this;\n\toptions = options || {};\n\toptions.stack = options.stack || [];\n\tvar wiki = options.wiki || $tw.wiki;\n\tif(typeof keyDescriptors === \"string\" && keyDescriptors === \"\") {\n\t\treturn [];\n\t}\n\tif(!$tw.utils.isArray(keyDescriptors)) {\n\t\tkeyDescriptors = keyDescriptors.split(\" \");\n\t}\n\tvar result = [];\n\t$tw.utils.each(keyDescriptors,function(keyDescriptor) {\n\t\t// Look for a named shortcut\n\t\tif(keyDescriptor.substr(0,2) === \"((\" && keyDescriptor.substr(-2,2) === \"))\") {\n\t\t\tif(options.stack.indexOf(keyDescriptor) === -1) {\n\t\t\t\toptions.stack.push(keyDescriptor);\n\t\t\t\tvar name = keyDescriptor.substring(2,keyDescriptor.length - 2),\n\t\t\t\t\tlookupName = function(configName) {\n\t\t\t\t\t\tvar keyDescriptors = wiki.getTiddlerText(\"$:/config/\" + configName + \"/\" + name);\n\t\t\t\t\t\tif(keyDescriptors) {\n\t\t\t\t\t\t\tresult.push.apply(result,self.parseKeyDescriptors(keyDescriptors,options));\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\tlookupName(\"shortcuts\");\n\t\t\t\tlookupName($tw.platform.isMac ? \"shortcuts-mac\" : \"shortcuts-not-mac\");\n\t\t\t\tlookupName($tw.platform.isWindows ? \"shortcuts-windows\" : \"shortcuts-not-windows\");\n\t\t\t\tlookupName($tw.platform.isLinux ? \"shortcuts-linux\" : \"shortcuts-not-linux\");\n\t\t\t}\n\t\t} else {\n\t\t\tresult.push(self.parseKeyDescriptor(keyDescriptor));\n\t\t}\n\t});\n\treturn result;\n};\n\nKeyboardManager.prototype.getPrintableShortcuts = function(keyInfoArray) {\n\tvar self = this,\n\t\tresult = [];\n\t$tw.utils.each(keyInfoArray,function(keyInfo) {\n\t\tif(keyInfo) {\n\t\t\tresult.push((keyInfo.ctrlKey ? \"ctrl-\" : \"\") + \n\t\t\t\t   (keyInfo.shiftKey ? \"shift-\" : \"\") + \n\t\t\t\t   (keyInfo.altKey ? \"alt-\" : \"\") + \n\t\t\t\t   (keyInfo.metaKey ? self.metaKeyName : \"\") + \n\t\t\t\t   (self.keyNames[keyInfo.keyCode]));\n\t\t}\n\t});\n\treturn result;\n}\n\nKeyboardManager.prototype.checkKeyDescriptor = function(event,keyInfo) {\n\treturn keyInfo &&\n\t\t\tevent.keyCode === keyInfo.keyCode && \n\t\t\tevent.shiftKey === keyInfo.shiftKey && \n\t\t\tevent.altKey === keyInfo.altKey && \n\t\t\tevent.ctrlKey === keyInfo.ctrlKey && \n\t\t\tevent.metaKey === keyInfo.metaKey;\n};\n\nKeyboardManager.prototype.checkKeyDescriptors = function(event,keyInfoArray) {\n\tfor(var t=0; t<keyInfoArray.length; t++) {\n\t\tif(this.checkKeyDescriptor(event,keyInfoArray[t])) {\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n};\n\nexports.KeyboardManager = KeyboardManager;\n\n})();\n",
            "title": "$:/core/modules/keyboard.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/language.js": {
            "text": "/*\\\ntitle: $:/core/modules/language.js\ntype: application/javascript\nmodule-type: global\n\nThe $tw.Language() manages translateable strings\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nCreate an instance of the language manager. Options include:\nwiki: wiki from which to retrieve translation tiddlers\n*/\nfunction Language(options) {\n\toptions = options || \"\";\n\tthis.wiki = options.wiki || $tw.wiki;\n}\n\n/*\nReturn a wikified translateable string. The title is automatically prefixed with \"$:/language/\"\nOptions include:\nvariables: optional hashmap of variables to supply to the language wikification\n*/\nLanguage.prototype.getString = function(title,options) {\n\toptions = options || {};\n\ttitle = \"$:/language/\" + title;\n\treturn this.wiki.renderTiddler(\"text/plain\",title,{variables: options.variables});\n};\n\n/*\nReturn a raw, unwikified translateable string. The title is automatically prefixed with \"$:/language/\"\n*/\nLanguage.prototype.getRawString = function(title) {\n\ttitle = \"$:/language/\" + title;\n\treturn this.wiki.getTiddlerText(title);\n};\n\nexports.Language = Language;\n\n})();\n",
            "title": "$:/core/modules/language.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/macros/changecount.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/changecount.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to return the changecount for the current tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"changecount\";\n\nexports.params = [];\n\n/*\nRun the macro\n*/\nexports.run = function() {\n\treturn this.wiki.getChangeCount(this.getVariable(\"currentTiddler\")) + \"\";\n};\n\n})();\n",
            "title": "$:/core/modules/macros/changecount.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/contrastcolour.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/contrastcolour.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to choose which of two colours has the highest contrast with a base colour\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"contrastcolour\";\n\nexports.params = [\n\t{name: \"target\"},\n\t{name: \"fallbackTarget\"},\n\t{name: \"colourA\"},\n\t{name: \"colourB\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(target,fallbackTarget,colourA,colourB) {\n\tvar rgbTarget = $tw.utils.parseCSSColor(target) || $tw.utils.parseCSSColor(fallbackTarget);\n\tif(!rgbTarget) {\n\t\treturn colourA;\n\t}\n\tvar rgbColourA = $tw.utils.parseCSSColor(colourA),\n\t\trgbColourB = $tw.utils.parseCSSColor(colourB);\n\tif(rgbColourA && !rgbColourB) {\n\t\treturn rgbColourA;\n\t}\n\tif(rgbColourB && !rgbColourA) {\n\t\treturn rgbColourB;\n\t}\n\tif(!rgbColourA && !rgbColourB) {\n\t\t// If neither colour is readable, return a crude inverse of the target\n\t\treturn [255 - rgbTarget[0],255 - rgbTarget[1],255 - rgbTarget[2],rgbTarget[3]];\n\t}\n\t// Colour brightness formula derived from http://www.w3.org/WAI/ER/WD-AERT/#color-contrast\n\tvar brightnessTarget = rgbTarget[0] * 0.299 + rgbTarget[1] * 0.587 + rgbTarget[2] * 0.114,\n\t\tbrightnessA = rgbColourA[0] * 0.299 + rgbColourA[1] * 0.587 + rgbColourA[2] * 0.114,\n\t\tbrightnessB = rgbColourB[0] * 0.299 + rgbColourB[1] * 0.587 + rgbColourB[2] * 0.114;\n\treturn Math.abs(brightnessTarget - brightnessA) > Math.abs(brightnessTarget - brightnessB) ? colourA : colourB;\n};\n\n})();\n",
            "title": "$:/core/modules/macros/contrastcolour.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/csvtiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/csvtiddlers.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to output tiddlers matching a filter to CSV\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"csvtiddlers\";\n\nexports.params = [\n\t{name: \"filter\"},\n\t{name: \"format\"},\n];\n\n/*\nRun the macro\n*/\nexports.run = function(filter,format) {\n\tvar self = this,\n\t\ttiddlers = this.wiki.filterTiddlers(filter),\n\t\ttiddler,\n\t\tfields = [],\n\t\tt,f;\n\t// Collect all the fields\n\tfor(t=0;t<tiddlers.length; t++) {\n\t\ttiddler = this.wiki.getTiddler(tiddlers[t]);\n\t\tfor(f in tiddler.fields) {\n\t\t\tif(fields.indexOf(f) === -1) {\n\t\t\t\tfields.push(f);\n\t\t\t}\n\t\t}\n\t}\n\t// Sort the fields and bring the standard ones to the front\n\tfields.sort();\n\t\"title text modified modifier created creator\".split(\" \").reverse().forEach(function(value,index) {\n\t\tvar p = fields.indexOf(value);\n\t\tif(p !== -1) {\n\t\t\tfields.splice(p,1);\n\t\t\tfields.unshift(value)\n\t\t}\n\t});\n\t// Output the column headings\n\tvar output = [], row = [];\n\tfields.forEach(function(value) {\n\t\trow.push(quoteAndEscape(value))\n\t});\n\toutput.push(row.join(\",\"));\n\t// Output each tiddler\n\tfor(var t=0;t<tiddlers.length; t++) {\n\t\trow = [];\n\t\ttiddler = this.wiki.getTiddler(tiddlers[t]);\n\t\t\tfor(f=0; f<fields.length; f++) {\n\t\t\t\trow.push(quoteAndEscape(tiddler ? tiddler.getFieldString(fields[f]) || \"\" : \"\"));\n\t\t\t}\n\t\toutput.push(row.join(\",\"));\n\t}\n\treturn output.join(\"\\n\");\n};\n\nfunction quoteAndEscape(value) {\n\treturn \"\\\"\" + value.replace(/\"/mg,\"\\\"\\\"\") + \"\\\"\";\n}\n\n})();\n",
            "title": "$:/core/modules/macros/csvtiddlers.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/displayshortcuts.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/displayshortcuts.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to display a list of keyboard shortcuts in human readable form. Notably, it resolves named shortcuts like `((bold))` to the underlying keystrokes.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"displayshortcuts\";\n\nexports.params = [\n\t{name: \"shortcuts\"},\n\t{name: \"prefix\"},\n\t{name: \"separator\"},\n\t{name: \"suffix\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(shortcuts,prefix,separator,suffix) {\n\tvar shortcutArray = $tw.keyboardManager.getPrintableShortcuts($tw.keyboardManager.parseKeyDescriptors(shortcuts,{\n\t\twiki: this.wiki\n\t}));\n\tif(shortcutArray.length > 0) {\n\t\tshortcutArray.sort(function(a,b) {\n\t\t    return a.toLowerCase().localeCompare(b.toLowerCase());\n\t\t})\n\t\treturn prefix + shortcutArray.join(separator) + suffix;\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/macros/displayshortcuts.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/dumpvariables.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/dumpvariables.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to dump all active variable values\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"dumpvariables\";\n\nexports.params = [\n];\n\n/*\nRun the macro\n*/\nexports.run = function() {\n\tvar output = [\"|!Variable |!Value |\"],\n\t\tvariables = [], variable;\n\tfor(variable in this.variables) {\n\t\tvariables.push(variable);\n\t}\n\tvariables.sort();\n\tfor(var index=0; index<variables.length; index++) {\n\t\tvar variable = variables[index];\n\t\toutput.push(\"|\" + variable + \" |<input size=50 value=<<\" + variable + \">>/> |\")\n\t}\n\treturn output.join(\"\\n\");\n};\n\n})();\n",
            "title": "$:/core/modules/macros/dumpvariables.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/jsontiddlers.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/jsontiddlers.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to output tiddlers matching a filter to JSON\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"jsontiddlers\";\n\nexports.params = [\n\t{name: \"filter\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(filter) {\n\tvar tiddlers = this.wiki.filterTiddlers(filter),\n\t\tdata = [];\n\tfor(var t=0;t<tiddlers.length; t++) {\n\t\tvar tiddler = this.wiki.getTiddler(tiddlers[t]);\n\t\tif(tiddler) {\n\t\t\tvar fields = new Object();\n\t\t\tfor(var field in tiddler.fields) {\n\t\t\t\tfields[field] = tiddler.getFieldString(field);\n\t\t\t}\n\t\t\tdata.push(fields);\n\t\t}\n\t}\n\treturn JSON.stringify(data,null,$tw.config.preferences.jsonSpaces);\n};\n\n})();\n",
            "title": "$:/core/modules/macros/jsontiddlers.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/makedatauri.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/makedatauri.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to convert a string of text to a data URI\n\n<<makedatauri text:\"Text to be converted\" type:\"text/vnd.tiddlywiki\">>\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"makedatauri\";\n\nexports.params = [\n\t{name: \"text\"},\n\t{name: \"type\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(text,type) {\n\treturn $tw.utils.makeDataUri(text,type);\n};\n\n})();\n",
            "title": "$:/core/modules/macros/makedatauri.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/now.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/now.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to return a formatted version of the current time\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"now\";\n\nexports.params = [\n\t{name: \"format\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(format) {\n\treturn $tw.utils.formatDateString(new Date(),format || \"0hh:0mm, DDth MMM YYYY\");\n};\n\n})();\n",
            "title": "$:/core/modules/macros/now.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/qualify.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/qualify.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to qualify a state tiddler title according\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"qualify\";\n\nexports.params = [\n\t{name: \"title\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(title) {\n\treturn title + \"-\" + this.getStateQualifier();\n};\n\n})();\n",
            "title": "$:/core/modules/macros/qualify.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/resolvepath.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/resolvepath.js\ntype: application/javascript\nmodule-type: macro\n\nResolves a relative path for an absolute rootpath.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"resolvepath\";\n\nexports.params = [\n\t{name: \"source\"},\n\t{name: \"root\"}\n];\n\n/*\nRun the macro\n*/\nexports.run = function(source, root) {\n\treturn $tw.utils.resolvePath(source, root);\n};\n\n})();\n",
            "title": "$:/core/modules/macros/resolvepath.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/macros/version.js": {
            "text": "/*\\\ntitle: $:/core/modules/macros/version.js\ntype: application/javascript\nmodule-type: macro\n\nMacro to return the TiddlyWiki core version number\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInformation about this macro\n*/\n\nexports.name = \"version\";\n\nexports.params = [];\n\n/*\nRun the macro\n*/\nexports.run = function() {\n\treturn $tw.version;\n};\n\n})();\n",
            "title": "$:/core/modules/macros/version.js",
            "type": "application/javascript",
            "module-type": "macro"
        },
        "$:/core/modules/parsers/audioparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/audioparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe audio parser parses an audio tiddler into an embeddable HTML element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar AudioParser = function(type,text,options) {\n\tvar element = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"audio\",\n\t\t\tattributes: {\n\t\t\t\tcontrols: {type: \"string\", value: \"controls\"}\n\t\t\t}\n\t\t},\n\t\tsrc;\n\tif(options._canonical_uri) {\n\t\telement.attributes.src = {type: \"string\", value: options._canonical_uri};\n\t} else if(text) {\n\t\telement.attributes.src = {type: \"string\", value: \"data:\" + type + \";base64,\" + text};\n\t}\n\tthis.tree = [element];\n};\n\nexports[\"audio/ogg\"] = AudioParser;\nexports[\"audio/mpeg\"] = AudioParser;\nexports[\"audio/mp3\"] = AudioParser;\nexports[\"audio/mp4\"] = AudioParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/audioparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/csvparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/csvparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe CSV text parser processes CSV files into a table wrapped in a scrollable widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar CsvParser = function(type,text,options) {\n\t// Table framework\n\tthis.tree = [{\n\t\t\"type\": \"scrollable\", \"children\": [{\n\t\t\t\"type\": \"element\", \"tag\": \"table\", \"children\": [{\n\t\t\t\t\"type\": \"element\", \"tag\": \"tbody\", \"children\": []\n\t\t\t}], \"attributes\": {\n\t\t\t\t\"class\": {\"type\": \"string\", \"value\": \"tc-csv-table\"}\n\t\t\t}\n\t\t}]\n\t}];\n\t// Split the text into lines\n\tvar lines = text.split(/\\r?\\n/mg),\n\t\ttag = \"th\";\n\tfor(var line=0; line<lines.length; line++) {\n\t\tvar lineText = lines[line];\n\t\tif(lineText) {\n\t\t\tvar row = {\n\t\t\t\t\t\"type\": \"element\", \"tag\": \"tr\", \"children\": []\n\t\t\t\t};\n\t\t\tvar columns = lineText.split(\",\");\n\t\t\tfor(var column=0; column<columns.length; column++) {\n\t\t\t\trow.children.push({\n\t\t\t\t\t\t\"type\": \"element\", \"tag\": tag, \"children\": [{\n\t\t\t\t\t\t\t\"type\": \"text\",\n\t\t\t\t\t\t\t\"text\": columns[column]\n\t\t\t\t\t\t}]\n\t\t\t\t\t});\n\t\t\t}\n\t\t\ttag = \"td\";\n\t\t\tthis.tree[0].children[0].children[0].children.push(row);\n\t\t}\n\t}\n};\n\nexports[\"text/csv\"] = CsvParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/csvparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/htmlparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/htmlparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe HTML parser displays text as raw HTML\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar HtmlParser = function(type,text,options) {\n\tvar src;\n\tif(options._canonical_uri) {\n\t\tsrc = options._canonical_uri;\n\t} else if(text) {\n\t\tsrc = \"data:text/html;charset=utf-8,\" + encodeURIComponent(text);\n\t}\n\tthis.tree = [{\n\t\ttype: \"element\",\n\t\ttag: \"iframe\",\n\t\tattributes: {\n\t\t\tsrc: {type: \"string\", value: src},\n\t\t\tsandbox: {type: \"string\", value: \"\"}\n\t\t}\n\t}];\n};\n\nexports[\"text/html\"] = HtmlParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/htmlparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/imageparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/imageparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe image parser parses an image into an embeddable HTML element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar ImageParser = function(type,text,options) {\n\tvar element = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"img\",\n\t\t\tattributes: {}\n\t\t},\n\t\tsrc;\n\tif(options._canonical_uri) {\n\t\telement.attributes.src = {type: \"string\", value: options._canonical_uri};\n\t\tif(type === \"application/pdf\" || type === \".pdf\") {\n\t\t\telement.tag = \"embed\";\n\t\t}\n\t} else if(text) {\n\t\tif(type === \"application/pdf\" || type === \".pdf\") {\n\t\t\telement.attributes.src = {type: \"string\", value: \"data:application/pdf;base64,\" + text};\n\t\t\telement.tag = \"embed\";\n\t\t} else if(type === \"image/svg+xml\" || type === \".svg\") {\n\t\t\telement.attributes.src = {type: \"string\", value: \"data:image/svg+xml,\" + encodeURIComponent(text)};\n\t\t} else {\n\t\t\telement.attributes.src = {type: \"string\", value: \"data:\" + type + \";base64,\" + text};\n\t\t}\n\t}\n\tthis.tree = [element];\n};\n\nexports[\"image/svg+xml\"] = ImageParser;\nexports[\"image/jpg\"] = ImageParser;\nexports[\"image/jpeg\"] = ImageParser;\nexports[\"image/png\"] = ImageParser;\nexports[\"image/gif\"] = ImageParser;\nexports[\"application/pdf\"] = ImageParser;\nexports[\"image/x-icon\"] = ImageParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/imageparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/utils/parseutils.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/parseutils.js\ntype: application/javascript\nmodule-type: utils\n\nUtility functions concerned with parsing text into tokens.\n\nMost functions have the following pattern:\n\n* The parameters are:\n** `source`: the source string being parsed\n** `pos`: the current parse position within the string\n** Any further parameters are used to identify the token that is being parsed\n* The return value is:\n** null if the token was not found at the specified position\n** an object representing the token with the following standard fields:\n*** `type`: string indicating the type of the token\n*** `start`: start position of the token in the source string\n*** `end`: end position of the token in the source string\n*** Any further fields required to describe the token\n\nThe exception is `skipWhiteSpace`, which just returns the position after the whitespace.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nLook for a whitespace token. Returns null if not found, otherwise returns {type: \"whitespace\", start:, end:,}\n*/\nexports.parseWhiteSpace = function(source,pos) {\n\tvar p = pos,c;\n\twhile(true) {\n\t\tc = source.charAt(p);\n\t\tif((c === \" \") || (c === \"\\f\") || (c === \"\\n\") || (c === \"\\r\") || (c === \"\\t\") || (c === \"\\v\") || (c === \"\\u00a0\")) { // Ignores some obscure unicode spaces\n\t\t\tp++;\n\t\t} else {\n\t\t\tbreak;\n\t\t}\n\t}\n\tif(p === pos) {\n\t\treturn null;\n\t} else {\n\t\treturn {\n\t\t\ttype: \"whitespace\",\n\t\t\tstart: pos,\n\t\t\tend: p\n\t\t}\n\t}\n};\n\n/*\nConvenience wrapper for parseWhiteSpace. Returns the position after the whitespace\n*/\nexports.skipWhiteSpace = function(source,pos) {\n\tvar c;\n\twhile(true) {\n\t\tc = source.charAt(pos);\n\t\tif((c === \" \") || (c === \"\\f\") || (c === \"\\n\") || (c === \"\\r\") || (c === \"\\t\") || (c === \"\\v\") || (c === \"\\u00a0\")) { // Ignores some obscure unicode spaces\n\t\t\tpos++;\n\t\t} else {\n\t\t\treturn pos;\n\t\t}\n\t}\n};\n\n/*\nLook for a given string token. Returns null if not found, otherwise returns {type: \"token\", value:, start:, end:,}\n*/\nexports.parseTokenString = function(source,pos,token) {\n\tvar match = source.indexOf(token,pos) === pos;\n\tif(match) {\n\t\treturn {\n\t\t\ttype: \"token\",\n\t\t\tvalue: token,\n\t\t\tstart: pos,\n\t\t\tend: pos + token.length\n\t\t};\n\t}\n\treturn null;\n};\n\n/*\nLook for a token matching a regex. Returns null if not found, otherwise returns {type: \"regexp\", match:, start:, end:,}\n*/\nexports.parseTokenRegExp = function(source,pos,reToken) {\n\tvar node = {\n\t\ttype: \"regexp\",\n\t\tstart: pos\n\t};\n\treToken.lastIndex = pos;\n\tnode.match = reToken.exec(source);\n\tif(node.match && node.match.index === pos) {\n\t\tnode.end = pos + node.match[0].length;\n\t\treturn node;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nLook for a string literal. Returns null if not found, otherwise returns {type: \"string\", value:, start:, end:,}\n*/\nexports.parseStringLiteral = function(source,pos) {\n\tvar node = {\n\t\ttype: \"string\",\n\t\tstart: pos\n\t};\n\tvar reString = /(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\")|(?:'([^']*)')/g;\n\treString.lastIndex = pos;\n\tvar match = reString.exec(source);\n\tif(match && match.index === pos) {\n\t\tnode.value = match[1] !== undefined ? match[1] :(\n\t\t\tmatch[2] !== undefined ? match[2] : match[3] \n\t\t\t\t\t);\n\t\tnode.end = pos + match[0].length;\n\t\treturn node;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nLook for a macro invocation parameter. Returns null if not found, or {type: \"macro-parameter\", name:, value:, start:, end:}\n*/\nexports.parseMacroParameter = function(source,pos) {\n\tvar node = {\n\t\ttype: \"macro-parameter\",\n\t\tstart: pos\n\t};\n\t// Define our regexp\n\tvar reMacroParameter = /(?:([A-Za-z0-9\\-_]+)\\s*:)?(?:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\\s>\"'=]+)))/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the parameter\n\tvar token = $tw.utils.parseTokenRegExp(source,pos,reMacroParameter);\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Get the parameter details\n\tnode.value = token.match[2] !== undefined ? token.match[2] : (\n\t\t\t\t\ttoken.match[3] !== undefined ? token.match[3] : (\n\t\t\t\t\t\ttoken.match[4] !== undefined ? token.match[4] : (\n\t\t\t\t\t\t\ttoken.match[5] !== undefined ? token.match[5] : (\n\t\t\t\t\t\t\t\ttoken.match[6] !== undefined ? token.match[6] : (\n\t\t\t\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t);\n\tif(token.match[1]) {\n\t\tnode.name = token.match[1];\n\t}\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n/*\nLook for a macro invocation. Returns null if not found, or {type: \"macrocall\", name:, parameters:, start:, end:}\n*/\nexports.parseMacroInvocation = function(source,pos) {\n\tvar node = {\n\t\ttype: \"macrocall\",\n\t\tstart: pos,\n\t\tparams: []\n\t};\n\t// Define our regexps\n\tvar reMacroName = /([^\\s>\"'=]+)/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a double less than sign\n\tvar token = $tw.utils.parseTokenString(source,pos,\"<<\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Get the macro name\n\tvar name = $tw.utils.parseTokenRegExp(source,pos,reMacroName);\n\tif(!name) {\n\t\treturn null;\n\t}\n\tnode.name = name.match[1];\n\tpos = name.end;\n\t// Process parameters\n\tvar parameter = $tw.utils.parseMacroParameter(source,pos);\n\twhile(parameter) {\n\t\tnode.params.push(parameter);\n\t\tpos = parameter.end;\n\t\t// Get the next parameter\n\t\tparameter = $tw.utils.parseMacroParameter(source,pos);\n\t}\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a double greater than sign\n\ttoken = $tw.utils.parseTokenString(source,pos,\">>\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n/*\nLook for an HTML attribute definition. Returns null if not found, otherwise returns {type: \"attribute\", name:, valueType: \"string|indirect|macro\", value:, start:, end:,}\n*/\nexports.parseAttribute = function(source,pos) {\n\tvar node = {\n\t\tstart: pos\n\t};\n\t// Define our regexps\n\tvar reAttributeName = /([^\\/\\s>\"'=]+)/g,\n\t\treUnquotedAttribute = /([^\\/\\s<>\"'=]+)/g,\n\t\treIndirectValue = /\\{\\{([^\\}]+)\\}\\}/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Get the attribute name\n\tvar name = $tw.utils.parseTokenRegExp(source,pos,reAttributeName);\n\tif(!name) {\n\t\treturn null;\n\t}\n\tnode.name = name.match[1];\n\tpos = name.end;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for an equals sign\n\tvar token = $tw.utils.parseTokenString(source,pos,\"=\");\n\tif(token) {\n\t\tpos = token.end;\n\t\t// Skip whitespace\n\t\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t\t// Look for a string literal\n\t\tvar stringLiteral = $tw.utils.parseStringLiteral(source,pos);\n\t\tif(stringLiteral) {\n\t\t\tpos = stringLiteral.end;\n\t\t\tnode.type = \"string\";\n\t\t\tnode.value = stringLiteral.value;\n\t\t} else {\n\t\t\t// Look for an indirect value\n\t\t\tvar indirectValue = $tw.utils.parseTokenRegExp(source,pos,reIndirectValue);\n\t\t\tif(indirectValue) {\n\t\t\t\tpos = indirectValue.end;\n\t\t\t\tnode.type = \"indirect\";\n\t\t\t\tnode.textReference = indirectValue.match[1];\n\t\t\t} else {\n\t\t\t\t// Look for a unquoted value\n\t\t\t\tvar unquotedValue = $tw.utils.parseTokenRegExp(source,pos,reUnquotedAttribute);\n\t\t\t\tif(unquotedValue) {\n\t\t\t\t\tpos = unquotedValue.end;\n\t\t\t\t\tnode.type = \"string\";\n\t\t\t\t\tnode.value = unquotedValue.match[1];\n\t\t\t\t} else {\n\t\t\t\t\t// Look for a macro invocation value\n\t\t\t\t\tvar macroInvocation = $tw.utils.parseMacroInvocation(source,pos);\n\t\t\t\t\tif(macroInvocation) {\n\t\t\t\t\t\tpos = macroInvocation.end;\n\t\t\t\t\t\tnode.type = \"macro\";\n\t\t\t\t\t\tnode.value = macroInvocation;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.type = \"string\";\n\t\t\t\t\t\tnode.value = \"true\";\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnode.type = \"string\";\n\t\tnode.value = \"true\";\n\t}\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n})();\n",
            "title": "$:/core/modules/utils/parseutils.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/parsers/textparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/textparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe plain text parser processes blocks of source text into a degenerate parse tree consisting of a single text node\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar TextParser = function(type,text,options) {\n\tthis.tree = [{\n\t\ttype: \"codeblock\",\n\t\tattributes: {\n\t\t\tcode: {type: \"string\", value: text},\n\t\t\tlanguage: {type: \"string\", value: type}\n\t\t}\n\t}];\n};\n\nexports[\"text/plain\"] = TextParser;\nexports[\"text/x-tiddlywiki\"] = TextParser;\nexports[\"application/javascript\"] = TextParser;\nexports[\"application/json\"] = TextParser;\nexports[\"text/css\"] = TextParser;\nexports[\"application/x-tiddler-dictionary\"] = TextParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/textparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/videoparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/videoparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe video parser parses a video tiddler into an embeddable HTML element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar AudioParser = function(type,text,options) {\n\tvar element = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"video\",\n\t\t\tattributes: {\n\t\t\t\tcontrols: {type: \"string\", value: \"controls\"}\n\t\t\t}\n\t\t},\n\t\tsrc;\n\tif(options._canonical_uri) {\n\t\telement.attributes.src = {type: \"string\", value: options._canonical_uri};\n\t} else if(text) {\n\t\telement.attributes.src = {type: \"string\", value: \"data:\" + type + \";base64,\" + text};\n\t}\n\tthis.tree = [element];\n};\n\nexports[\"video/mp4\"] = AudioParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/videoparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/wikiparser/rules/codeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/codeblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for code blocks. For example:\n\n```\n\t```\n\tThis text will not be //wikified//\n\t```\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"codeblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match and get language if defined\n\tthis.matchRegExp = /```([\\w-]*)\\r?\\n/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /(\\r?\\n```$)/mg;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Look for the end of the block\n\treEnd.lastIndex = this.parser.pos;\n\tvar match = reEnd.exec(this.parser.source),\n\t\ttext;\n\t// Process the block\n\tif(match) {\n\t\ttext = this.parser.source.substring(this.parser.pos,match.index);\n\t\tthis.parser.pos = match.index + match[0].length;\n\t} else {\n\t\ttext = this.parser.source.substr(this.parser.pos);\n\t\tthis.parser.pos = this.parser.sourceLength;\n\t}\n\t// Return the $codeblock widget\n\treturn [{\n\t\t\ttype: \"codeblock\",\n\t\t\tattributes: {\n\t\t\t\t\tcode: {type: \"string\", value: text},\n\t\t\t\t\tlanguage: {type: \"string\", value: this.match[1]}\n\t\t\t}\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/codeblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/codeinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/codeinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for code runs. For example:\n\n```\n\tThis is a `code run`.\n\tThis is another ``code run``\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"codeinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(``?)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar reEnd = new RegExp(this.match[1], \"mg\");\n\t// Look for the end marker\n\treEnd.lastIndex = this.parser.pos;\n\tvar match = reEnd.exec(this.parser.source),\n\t\ttext;\n\t// Process the text\n\tif(match) {\n\t\ttext = this.parser.source.substring(this.parser.pos,match.index);\n\t\tthis.parser.pos = match.index + match[0].length;\n\t} else {\n\t\ttext = this.parser.source.substr(this.parser.pos);\n\t\tthis.parser.pos = this.parser.sourceLength;\n\t}\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"code\",\n\t\tchildren: [{\n\t\t\ttype: \"text\",\n\t\t\ttext: text\n\t\t}]\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/codeinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/commentblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/commentblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for HTML comments. For example:\n\n```\n<!-- This is a comment -->\n```\n\nNote that the syntax for comments is simplified to an opening \"<!--\" sequence and a closing \"-->\" sequence -- HTML itself implements a more complex format (see http://ostermiller.org/findhtmlcomment.html)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"commentblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\tthis.matchRegExp = /<!--/mg;\n\tthis.endMatchRegExp = /-->/mg;\n};\n\nexports.findNextMatch = function(startPos) {\n\tthis.matchRegExp.lastIndex = startPos;\n\tthis.match = this.matchRegExp.exec(this.parser.source);\n\tif(this.match) {\n\t\tthis.endMatchRegExp.lastIndex = startPos + this.match[0].length;\n\t\tthis.endMatch = this.endMatchRegExp.exec(this.parser.source);\n\t\tif(this.endMatch) {\n\t\t\treturn this.match.index;\n\t\t}\n\t}\n\treturn undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.endMatchRegExp.lastIndex;\n\t// Don't return any elements\n\treturn [];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/commentblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/commentinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/commentinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for HTML comments. For example:\n\n```\n<!-- This is a comment -->\n```\n\nNote that the syntax for comments is simplified to an opening \"<!--\" sequence and a closing \"-->\" sequence -- HTML itself implements a more complex format (see http://ostermiller.org/findhtmlcomment.html)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"commentinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\tthis.matchRegExp = /<!--/mg;\n\tthis.endMatchRegExp = /-->/mg;\n};\n\nexports.findNextMatch = function(startPos) {\n\tthis.matchRegExp.lastIndex = startPos;\n\tthis.match = this.matchRegExp.exec(this.parser.source);\n\tif(this.match) {\n\t\tthis.endMatchRegExp.lastIndex = startPos + this.match[0].length;\n\t\tthis.endMatch = this.endMatchRegExp.exec(this.parser.source);\n\t\tif(this.endMatch) {\n\t\t\treturn this.match.index;\n\t\t}\n\t}\n\treturn undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.endMatchRegExp.lastIndex;\n\t// Don't return any elements\n\treturn [];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/commentinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/dash.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/dash.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for dashes. For example:\n\n```\nThis is an en-dash: --\n\nThis is an em-dash: ---\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"dash\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /-{2,3}(?!-)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar dash = this.match[0].length === 2 ? \"&ndash;\" : \"&mdash;\";\n\treturn [{\n\t\ttype: \"entity\",\n\t\tentity: dash\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/dash.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/bold.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/bold.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - bold. For example:\n\n```\n\tThis is ''bold'' text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except bold \n\\rules only bold \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"bold\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /''/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/''/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"strong\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/bold.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/italic.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/italic.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - italic. For example:\n\n```\n\tThis is //italic// text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except italic\n\\rules only italic\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"italic\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\/\\//mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/\\/\\//mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"em\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/italic.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/strikethrough.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/strikethrough.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - strikethrough. For example:\n\n```\n\tThis is ~~strikethrough~~ text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except strikethrough \n\\rules only strikethrough \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"strikethrough\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /~~/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/~~/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"strike\",\n\t\tchildren: tree\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/strikethrough.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/subscript.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/subscript.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - subscript. For example:\n\n```\n\tThis is ,,subscript,, text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except subscript \n\\rules only subscript \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"subscript\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /,,/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/,,/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"sub\",\n\t\tchildren: tree\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/subscript.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/superscript.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/superscript.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - superscript. For example:\n\n```\n\tThis is ^^superscript^^ text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except superscript \n\\rules only superscript \n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"superscript\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\^\\^/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/\\^\\^/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"sup\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/superscript.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/emphasis/underscore.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/emphasis/underscore.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for emphasis - underscore. For example:\n\n```\n\tThis is __underscore__ text\n```\n\nThis wikiparser can be modified using the rules eg:\n\n```\n\\rules except underscore \n\\rules only underscore\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"underscore\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /__/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\n\t// Parse the run including the terminator\n\tvar tree = this.parser.parseInlineRun(/__/mg,{eatTerminator: true});\n\n\t// Return the classed span\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"u\",\n\t\tchildren: tree\n\t}];\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/emphasis/underscore.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/entity.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/entity.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for HTML entities. For example:\n\n```\n\tThis is a copyright symbol: &copy;\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"entity\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(&#?[a-zA-Z0-9]{2,8};)/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar entityString = this.match[1];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Return the entity\n\treturn [{type: \"entity\", entity: this.match[0]}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/entity.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/extlink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/extlink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for external links. For example:\n\n```\nAn external link: http://www.tiddlywiki.com/\n\nA suppressed external link: ~http://www.tiddlyspace.com/\n```\n\nExternal links can be suppressed by preceding them with `~`.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"extlink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /~?(?:file|http|https|mailto|ftp|irc|news|data|skype):[^\\s<>{}\\[\\]`|\"\\\\^]+(?:\\/|\\b)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Create the link unless it is suppressed\n\tif(this.match[0].substr(0,1) === \"~\") {\n\t\treturn [{type: \"text\", text: this.match[0].substr(1)}];\n\t} else {\n\t\treturn [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"a\",\n\t\t\tattributes: {\n\t\t\t\thref: {type: \"string\", value: this.match[0]},\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-tiddlylink-external\"},\n\t\t\t\ttarget: {type: \"string\", value: \"_blank\"},\n\t\t\t\trel: {type: \"string\", value: \"noopener noreferrer\"}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\", text: this.match[0]\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/extlink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/filteredtranscludeblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for block-level filtered transclusion. For example:\n\n```\n{{{ [tag[docs]] }}}\n{{{ [tag[docs]] |tooltip}}}\n{{{ [tag[docs]] ||TemplateTitle}}}\n{{{ [tag[docs]] |tooltip||TemplateTitle}}}\n{{{ [tag[docs]] }}width:40;height:50;}.class.class\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"filteredtranscludeblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{\\{([^\\|]+?)(?:\\|([^\\|\\{\\}]+))?(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}([^\\}]*)\\}(?:\\.(\\S+))?(?:\\r?\\n|$)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar filter = this.match[1],\n\t\ttooltip = this.match[2],\n\t\ttemplate = $tw.utils.trim(this.match[3]),\n\t\tstyle = this.match[4],\n\t\tclasses = this.match[5];\n\t// Return the list widget\n\tvar node = {\n\t\ttype: \"list\",\n\t\tattributes: {\n\t\t\tfilter: {type: \"string\", value: filter}\n\t\t},\n\t\tisBlock: true\n\t};\n\tif(tooltip) {\n\t\tnode.attributes.tooltip = {type: \"string\", value: tooltip};\n\t}\n\tif(template) {\n\t\tnode.attributes.template = {type: \"string\", value: template};\n\t}\n\tif(style) {\n\t\tnode.attributes.style = {type: \"string\", value: style};\n\t}\n\tif(classes) {\n\t\tnode.attributes.itemClass = {type: \"string\", value: classes.split(\".\").join(\" \")};\n\t}\n\treturn [node];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/filteredtranscludeinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for inline filtered transclusion. For example:\n\n```\n{{{ [tag[docs]] }}}\n{{{ [tag[docs]] |tooltip}}}\n{{{ [tag[docs]] ||TemplateTitle}}}\n{{{ [tag[docs]] |tooltip||TemplateTitle}}}\n{{{ [tag[docs]] }}width:40;height:50;}.class.class\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"filteredtranscludeinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{\\{([^\\|]+?)(?:\\|([^\\|\\{\\}]+))?(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}([^\\}]*)\\}(?:\\.(\\S+))?/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar filter = this.match[1],\n\t\ttooltip = this.match[2],\n\t\ttemplate = $tw.utils.trim(this.match[3]),\n\t\tstyle = this.match[4],\n\t\tclasses = this.match[5];\n\t// Return the list widget\n\tvar node = {\n\t\ttype: \"list\",\n\t\tattributes: {\n\t\t\tfilter: {type: \"string\", value: filter}\n\t\t}\n\t};\n\tif(tooltip) {\n\t\tnode.attributes.tooltip = {type: \"string\", value: tooltip};\n\t}\n\tif(template) {\n\t\tnode.attributes.template = {type: \"string\", value: template};\n\t}\n\tif(style) {\n\t\tnode.attributes.style = {type: \"string\", value: style};\n\t}\n\tif(classes) {\n\t\tnode.attributes.itemClass = {type: \"string\", value: classes.split(\".\").join(\" \")};\n\t}\n\treturn [node];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/filteredtranscludeinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/hardlinebreaks.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/hardlinebreaks.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for marking areas with hard line breaks. For example:\n\n```\n\"\"\"\nThis is some text\nThat is set like\nIt is a Poem\nWhen it is\nClearly\nNot\n\"\"\"\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"hardlinebreaks\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\"\"\"(?:\\r?\\n)?/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /(\"\"\")|(\\r?\\n)/mg,\n\t\ttree = [],\n\t\tmatch;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tdo {\n\t\t// Parse the run up to the terminator\n\t\ttree.push.apply(tree,this.parser.parseInlineRun(reEnd,{eatTerminator: false}));\n\t\t// Redo the terminator match\n\t\treEnd.lastIndex = this.parser.pos;\n\t\tmatch = reEnd.exec(this.parser.source);\n\t\tif(match) {\n\t\t\tthis.parser.pos = reEnd.lastIndex;\n\t\t\t// Add a line break if the terminator was a line break\n\t\t\tif(match[2]) {\n\t\t\t\ttree.push({type: \"element\", tag: \"br\"});\n\t\t\t}\n\t\t}\n\t} while(match && !match[1]);\n\t// Return the nodes\n\treturn tree;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/hardlinebreaks.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/heading.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/heading.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for headings\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"heading\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(!{1,6})/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar headingLevel = this.match[1].length;\n\t// Move past the !s\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse any classes, whitespace and then the heading itself\n\tvar classes = this.parser.parseClasses();\n\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\tvar tree = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t// Return the heading\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"h\" + headingLevel, \n\t\tattributes: {\n\t\t\t\"class\": {type: \"string\", value: classes.join(\" \")}\n\t\t},\n\t\tchildren: tree\n\t}];\n};\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/heading.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/horizrule.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/horizrule.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for rules. For example:\n\n```\n---\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"horizrule\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /-{3,}\\r?(?:\\n|$)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\treturn [{type: \"element\", tag: \"hr\"}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/horizrule.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/html.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/html.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki rule for HTML elements and widgets. For example:\n\n{{{\n<aside>\nThis is an HTML5 aside element\n</aside>\n\n<$slider target=\"MyTiddler\">\nThis is a widget invocation\n</$slider>\n\n}}}\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"html\";\nexports.types = {inline: true, block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n};\n\nexports.findNextMatch = function(startPos) {\n\t// Find the next tag\n\tthis.nextTag = this.findNextTag(this.parser.source,startPos,{\n\t\trequireLineBreak: this.is.block\n\t});\n\treturn this.nextTag ? this.nextTag.start : undefined;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Retrieve the most recent match so that recursive calls don't overwrite it\n\tvar tag = this.nextTag;\n\tthis.nextTag = null;\n\t// Advance the parser position to past the tag\n\tthis.parser.pos = tag.end;\n\t// Check for an immediately following double linebreak\n\tvar hasLineBreak = !tag.isSelfClosing && !!$tw.utils.parseTokenRegExp(this.parser.source,this.parser.pos,/([^\\S\\n\\r]*\\r?\\n(?:[^\\S\\n\\r]*\\r?\\n|$))/g);\n\t// Set whether we're in block mode\n\ttag.isBlock = this.is.block || hasLineBreak;\n\t// Parse the body if we need to\n\tif(!tag.isSelfClosing && $tw.config.htmlVoidElements.indexOf(tag.tag) === -1) {\n\t\t\tvar reEndString = \"</\" + $tw.utils.escapeRegExp(tag.tag) + \">\",\n\t\t\t\treEnd = new RegExp(\"(\" + reEndString + \")\",\"mg\");\n\t\tif(hasLineBreak) {\n\t\t\ttag.children = this.parser.parseBlocks(reEndString);\n\t\t} else {\n\t\t\ttag.children = this.parser.parseInlineRun(reEnd);\n\t\t}\n\t\treEnd.lastIndex = this.parser.pos;\n\t\tvar endMatch = reEnd.exec(this.parser.source);\n\t\tif(endMatch && endMatch.index === this.parser.pos) {\n\t\t\tthis.parser.pos = endMatch.index + endMatch[0].length;\n\t\t}\n\t}\n\t// Return the tag\n\treturn [tag];\n};\n\n/*\nLook for an HTML tag. Returns null if not found, otherwise returns {type: \"element\", name:, attributes: [], isSelfClosing:, start:, end:,}\n*/\nexports.parseTag = function(source,pos,options) {\n\toptions = options || {};\n\tvar token,\n\t\tnode = {\n\t\t\ttype: \"element\",\n\t\t\tstart: pos,\n\t\t\tattributes: {}\n\t\t};\n\t// Define our regexps\n\tvar reTagName = /([a-zA-Z0-9\\-\\$]+)/g;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a less than sign\n\ttoken = $tw.utils.parseTokenString(source,pos,\"<\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Get the tag name\n\ttoken = $tw.utils.parseTokenRegExp(source,pos,reTagName);\n\tif(!token) {\n\t\treturn null;\n\t}\n\tnode.tag = token.match[1];\n\tif(node.tag.charAt(0) === \"$\") {\n\t\tnode.type = node.tag.substr(1);\n\t}\n\tpos = token.end;\n\t// Process attributes\n\tvar attribute = $tw.utils.parseAttribute(source,pos);\n\twhile(attribute) {\n\t\tnode.attributes[attribute.name] = attribute;\n\t\tpos = attribute.end;\n\t\t// Get the next attribute\n\t\tattribute = $tw.utils.parseAttribute(source,pos);\n\t}\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for a closing slash\n\ttoken = $tw.utils.parseTokenString(source,pos,\"/\");\n\tif(token) {\n\t\tpos = token.end;\n\t\tnode.isSelfClosing = true;\n\t}\n\t// Look for a greater than sign\n\ttoken = $tw.utils.parseTokenString(source,pos,\">\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Check for a required line break\n\tif(options.requireLineBreak) {\n\t\ttoken = $tw.utils.parseTokenRegExp(source,pos,/([^\\S\\n\\r]*\\r?\\n(?:[^\\S\\n\\r]*\\r?\\n|$))/g);\n\t\tif(!token) {\n\t\t\treturn null;\n\t\t}\n\t}\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\nexports.findNextTag = function(source,pos,options) {\n\t// A regexp for finding candidate HTML tags\n\tvar reLookahead = /<([a-zA-Z\\-\\$]+)/g;\n\t// Find the next candidate\n\treLookahead.lastIndex = pos;\n\tvar match = reLookahead.exec(source);\n\twhile(match) {\n\t\t// Try to parse the candidate as a tag\n\t\tvar tag = this.parseTag(source,match.index,options);\n\t\t// Return success\n\t\tif(tag && this.isLegalTag(tag)) {\n\t\t\treturn tag;\n\t\t}\n\t\t// Look for the next match\n\t\treLookahead.lastIndex = match.index + 1;\n\t\tmatch = reLookahead.exec(source);\n\t}\n\t// Failed\n\treturn null;\n};\n\nexports.isLegalTag = function(tag) {\n\t// Widgets are always OK\n\tif(tag.type !== \"element\") {\n\t\treturn true;\n\t// If it's an HTML tag that starts with a dash then it's not legal\n\t} else if(tag.tag.charAt(0) === \"-\") {\n\t\treturn false;\n\t} else {\n\t\t// Otherwise it's OK\n\t\treturn true;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/html.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/image.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/image.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for embedding images. For example:\n\n```\n[img[http://tiddlywiki.com/fractalveg.jpg]]\n[img width=23 height=24 [http://tiddlywiki.com/fractalveg.jpg]]\n[img width={{!!width}} height={{!!height}} [http://tiddlywiki.com/fractalveg.jpg]]\n[img[Description of image|http://tiddlywiki.com/fractalveg.jpg]]\n[img[TiddlerTitle]]\n[img[Description of image|TiddlerTitle]]\n```\n\nGenerates the `<$image>` widget.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"image\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n};\n\nexports.findNextMatch = function(startPos) {\n\t// Find the next tag\n\tthis.nextImage = this.findNextImage(this.parser.source,startPos);\n\treturn this.nextImage ? this.nextImage.start : undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.nextImage.end;\n\tvar node = {\n\t\ttype: \"image\",\n\t\tattributes: this.nextImage.attributes\n\t};\n\treturn [node];\n};\n\n/*\nFind the next image from the current position\n*/\nexports.findNextImage = function(source,pos) {\n\t// A regexp for finding candidate HTML tags\n\tvar reLookahead = /(\\[img)/g;\n\t// Find the next candidate\n\treLookahead.lastIndex = pos;\n\tvar match = reLookahead.exec(source);\n\twhile(match) {\n\t\t// Try to parse the candidate as a tag\n\t\tvar tag = this.parseImage(source,match.index);\n\t\t// Return success\n\t\tif(tag) {\n\t\t\treturn tag;\n\t\t}\n\t\t// Look for the next match\n\t\treLookahead.lastIndex = match.index + 1;\n\t\tmatch = reLookahead.exec(source);\n\t}\n\t// Failed\n\treturn null;\n};\n\n/*\nLook for an image at the specified position. Returns null if not found, otherwise returns {type: \"image\", attributes: [], isSelfClosing:, start:, end:,}\n*/\nexports.parseImage = function(source,pos) {\n\tvar token,\n\t\tnode = {\n\t\t\ttype: \"image\",\n\t\t\tstart: pos,\n\t\t\tattributes: {}\n\t\t};\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the `[img`\n\ttoken = $tw.utils.parseTokenString(source,pos,\"[img\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Process attributes\n\tif(source.charAt(pos) !== \"[\") {\n\t\tvar attribute = $tw.utils.parseAttribute(source,pos);\n\t\twhile(attribute) {\n\t\t\tnode.attributes[attribute.name] = attribute;\n\t\t\tpos = attribute.end;\n\t\t\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t\t\tif(source.charAt(pos) !== \"[\") {\n\t\t\t\t// Get the next attribute\n\t\t\t\tattribute = $tw.utils.parseAttribute(source,pos);\n\t\t\t} else {\n\t\t\t\tattribute = null;\n\t\t\t}\n\t\t}\n\t}\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the `[` after the attributes\n\ttoken = $tw.utils.parseTokenString(source,pos,\"[\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Get the source up to the terminating `]]`\n\ttoken = $tw.utils.parseTokenRegExp(source,pos,/(?:([^|\\]]*?)\\|)?([^\\]]+?)\\]\\]/g);\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\tif(token.match[1]) {\n\t\tnode.attributes.tooltip = {type: \"string\", value: token.match[1].trim()};\n\t}\n\tnode.attributes.source = {type: \"string\", value: (token.match[2] || \"\").trim()};\n\t// Update the end position\n\tnode.end = pos;\n\treturn node;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/image.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/list.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/list.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for lists. For example:\n\n```\n* This is an unordered list\n* It has two items\n\n# This is a numbered list\n## With a subitem\n# And a third item\n\n; This is a term that is being defined\n: This is the definition of that term\n```\n\nNote that lists can be nested arbitrarily:\n\n```\n#** One\n#* Two\n#** Three\n#**** Four\n#**# Five\n#**## Six\n## Seven\n### Eight\n## Nine\n```\n\nA CSS class can be applied to a list item as follows:\n\n```\n* List item one\n*.active List item two has the class `active`\n* List item three\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"list\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /([\\*#;:>]+)/mg;\n};\n\nvar listTypes = {\n\t\"*\": {listTag: \"ul\", itemTag: \"li\"},\n\t\"#\": {listTag: \"ol\", itemTag: \"li\"},\n\t\";\": {listTag: \"dl\", itemTag: \"dt\"},\n\t\":\": {listTag: \"dl\", itemTag: \"dd\"},\n\t\">\": {listTag: \"blockquote\", itemTag: \"p\"}\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Array of parse tree nodes for the previous row of the list\n\tvar listStack = [];\n\t// Cycle through the items in the list\n\twhile(true) {\n\t\t// Match the list marker\n\t\tvar reMatch = /([\\*#;:>]+)/mg;\n\t\treMatch.lastIndex = this.parser.pos;\n\t\tvar match = reMatch.exec(this.parser.source);\n\t\tif(!match || match.index !== this.parser.pos) {\n\t\t\tbreak;\n\t\t}\n\t\t// Check whether the list type of the top level matches\n\t\tvar listInfo = listTypes[match[0].charAt(0)];\n\t\tif(listStack.length > 0 && listStack[0].tag !== listInfo.listTag) {\n\t\t\tbreak;\n\t\t}\n\t\t// Move past the list marker\n\t\tthis.parser.pos = match.index + match[0].length;\n\t\t// Walk through the list markers for the current row\n\t\tfor(var t=0; t<match[0].length; t++) {\n\t\t\tlistInfo = listTypes[match[0].charAt(t)];\n\t\t\t// Remove any stacked up element if we can't re-use it because the list type doesn't match\n\t\t\tif(listStack.length > t && listStack[t].tag !== listInfo.listTag) {\n\t\t\t\tlistStack.splice(t,listStack.length - t);\n\t\t\t}\n\t\t\t// Construct the list element or reuse the previous one at this level\n\t\t\tif(listStack.length <= t) {\n\t\t\t\tvar listElement = {type: \"element\", tag: listInfo.listTag, children: [\n\t\t\t\t\t{type: \"element\", tag: listInfo.itemTag, children: []}\n\t\t\t\t]};\n\t\t\t\t// Link this list element into the last child item of the parent list item\n\t\t\t\tif(t) {\n\t\t\t\t\tvar prevListItem = listStack[t-1].children[listStack[t-1].children.length-1];\n\t\t\t\t\tprevListItem.children.push(listElement);\n\t\t\t\t}\n\t\t\t\t// Save this element in the stack\n\t\t\t\tlistStack[t] = listElement;\n\t\t\t} else if(t === (match[0].length - 1)) {\n\t\t\t\tlistStack[t].children.push({type: \"element\", tag: listInfo.itemTag, children: []});\n\t\t\t}\n\t\t}\n\t\tif(listStack.length > match[0].length) {\n\t\t\tlistStack.splice(match[0].length,listStack.length - match[0].length);\n\t\t}\n\t\t// Process the body of the list item into the last list item\n\t\tvar lastListChildren = listStack[listStack.length-1].children,\n\t\t\tlastListItem = lastListChildren[lastListChildren.length-1],\n\t\t\tclasses = this.parser.parseClasses();\n\t\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\t\tvar tree = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t\tlastListItem.children.push.apply(lastListItem.children,tree);\n\t\tif(classes.length > 0) {\n\t\t\t$tw.utils.addClassToParseTreeNode(lastListItem,classes.join(\" \"));\n\t\t}\n\t\t// Consume any whitespace following the list item\n\t\tthis.parser.skipWhitespace();\n\t}\n\t// Return the root element of the list\n\treturn [listStack[0]];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/list.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/macrocallblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/macrocallblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki rule for block macro calls\n\n```\n<<name value value2>>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"macrocallblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /<<([^>\\s]+)(?:\\s*)((?:[^>]|(?:>(?!>)))*?)>>(?:\\r?\\n|$)/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar macroName = this.match[1],\n\t\tparamString = this.match[2];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar params = [],\n\t\treParam = /\\s*(?:([A-Za-z0-9\\-_]+)\\s*:)?(?:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\"'\\s]+)))/mg,\n\t\tparamMatch = reParam.exec(paramString);\n\twhile(paramMatch) {\n\t\t// Process this parameter\n\t\tvar paramInfo = {\n\t\t\tvalue: paramMatch[2] || paramMatch[3] || paramMatch[4] || paramMatch[5] || paramMatch[6]\n\t\t};\n\t\tif(paramMatch[1]) {\n\t\t\tparamInfo.name = paramMatch[1];\n\t\t}\n\t\tparams.push(paramInfo);\n\t\t// Find the next match\n\t\tparamMatch = reParam.exec(paramString);\n\t}\n\treturn [{\n\t\ttype: \"macrocall\",\n\t\tname: macroName,\n\t\tparams: params,\n\t\tisBlock: true\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/macrocallblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/macrocallinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/macrocallinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki rule for macro calls\n\n```\n<<name value value2>>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"macrocallinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /<<([^\\s>]+)\\s*([\\s\\S]*?)>>/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get all the details of the match\n\tvar macroName = this.match[1],\n\t\tparamString = this.match[2];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\tvar params = [],\n\t\treParam = /\\s*(?:([A-Za-z0-9\\-_]+)\\s*:)?(?:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\"'\\s]+)))/mg,\n\t\tparamMatch = reParam.exec(paramString);\n\twhile(paramMatch) {\n\t\t// Process this parameter\n\t\tvar paramInfo = {\n\t\t\tvalue: paramMatch[2] || paramMatch[3] || paramMatch[4] || paramMatch[5]|| paramMatch[6]\n\t\t};\n\t\tif(paramMatch[1]) {\n\t\t\tparamInfo.name = paramMatch[1];\n\t\t}\n\t\tparams.push(paramInfo);\n\t\t// Find the next match\n\t\tparamMatch = reParam.exec(paramString);\n\t}\n\treturn [{\n\t\ttype: \"macrocall\",\n\t\tname: macroName,\n\t\tparams: params\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/macrocallinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/macrodef.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/macrodef.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki pragma rule for macro definitions\n\n```\n\\define name(param:defaultvalue,param2:defaultvalue)\ndefinition text, including $param$ markers\n\\end\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"macrodef\";\nexports.types = {pragma: true};\n\n/*\nInstantiate parse rule\n*/\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /^\\\\define\\s+([^(\\s]+)\\(\\s*([^)]*)\\)(\\s*\\r?\\n)?/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Move past the macro name and parameters\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse the parameters\n\tvar paramString = this.match[2],\n\t\tparams = [];\n\tif(paramString !== \"\") {\n\t\tvar reParam = /\\s*([A-Za-z0-9\\-_]+)(?:\\s*:\\s*(?:\"\"\"([\\s\\S]*?)\"\"\"|\"([^\"]*)\"|'([^']*)'|\\[\\[([^\\]]*)\\]\\]|([^\"'\\s]+)))?/mg,\n\t\t\tparamMatch = reParam.exec(paramString);\n\t\twhile(paramMatch) {\n\t\t\t// Save the parameter details\n\t\t\tvar paramInfo = {name: paramMatch[1]},\n\t\t\t\tdefaultValue = paramMatch[2] || paramMatch[3] || paramMatch[4] || paramMatch[5] || paramMatch[6];\n\t\t\tif(defaultValue) {\n\t\t\t\tparamInfo[\"default\"] = defaultValue;\n\t\t\t}\n\t\t\tparams.push(paramInfo);\n\t\t\t// Look for the next parameter\n\t\t\tparamMatch = reParam.exec(paramString);\n\t\t}\n\t}\n\t// Is this a multiline definition?\n\tvar reEnd;\n\tif(this.match[3]) {\n\t\t// If so, the end of the body is marked with \\end\n\t\treEnd = /(\\r?\\n\\\\end[^\\S\\n\\r]*(?:$|\\r?\\n))/mg;\n\t} else {\n\t\t// Otherwise, the end of the definition is marked by the end of the line\n\t\treEnd = /(\\r?\\n)/mg;\n\t\t// Move past any whitespace\n\t\tthis.parser.pos = $tw.utils.skipWhiteSpace(this.parser.source,this.parser.pos);\n\t}\n\t// Find the end of the definition\n\treEnd.lastIndex = this.parser.pos;\n\tvar text,\n\t\tendMatch = reEnd.exec(this.parser.source);\n\tif(endMatch) {\n\t\ttext = this.parser.source.substring(this.parser.pos,endMatch.index);\n\t\tthis.parser.pos = endMatch.index + endMatch[0].length;\n\t} else {\n\t\t// We didn't find the end of the definition, so we'll make it blank\n\t\ttext = \"\";\n\t}\n\t// Save the macro definition\n\treturn [{\n\t\ttype: \"set\",\n\t\tattributes: {\n\t\t\tname: {type: \"string\", value: this.match[1]},\n\t\t\tvalue: {type: \"string\", value: text}\n\t\t},\n\t\tchildren: [],\n\t\tparams: params\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/macrodef.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/prettyextlink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/prettyextlink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for external links. For example:\n\n```\n[ext[http://tiddlywiki.com/fractalveg.jpg]]\n[ext[Tooltip|http://tiddlywiki.com/fractalveg.jpg]]\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"prettyextlink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n};\n\nexports.findNextMatch = function(startPos) {\n\t// Find the next tag\n\tthis.nextLink = this.findNextLink(this.parser.source,startPos);\n\treturn this.nextLink ? this.nextLink.start : undefined;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.nextLink.end;\n\treturn [this.nextLink];\n};\n\n/*\nFind the next link from the current position\n*/\nexports.findNextLink = function(source,pos) {\n\t// A regexp for finding candidate links\n\tvar reLookahead = /(\\[ext\\[)/g;\n\t// Find the next candidate\n\treLookahead.lastIndex = pos;\n\tvar match = reLookahead.exec(source);\n\twhile(match) {\n\t\t// Try to parse the candidate as a link\n\t\tvar link = this.parseLink(source,match.index);\n\t\t// Return success\n\t\tif(link) {\n\t\t\treturn link;\n\t\t}\n\t\t// Look for the next match\n\t\treLookahead.lastIndex = match.index + 1;\n\t\tmatch = reLookahead.exec(source);\n\t}\n\t// Failed\n\treturn null;\n};\n\n/*\nLook for an link at the specified position. Returns null if not found, otherwise returns {type: \"element\", tag: \"a\", attributes: [], isSelfClosing:, start:, end:,}\n*/\nexports.parseLink = function(source,pos) {\n\tvar token,\n\t\ttextNode = {\n\t\t\ttype: \"text\"\n\t\t},\n\t\tnode = {\n\t\t\ttype: \"element\",\n\t\t\ttag: \"a\",\n\t\t\tstart: pos,\n\t\t\tattributes: {\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-tiddlylink-external\"},\n\t\t\t},\n\t\t\tchildren: [textNode]\n\t\t};\n\t// Skip whitespace\n\tpos = $tw.utils.skipWhiteSpace(source,pos);\n\t// Look for the `[ext[`\n\ttoken = $tw.utils.parseTokenString(source,pos,\"[ext[\");\n\tif(!token) {\n\t\treturn null;\n\t}\n\tpos = token.end;\n\t// Look ahead for the terminating `]]`\n\tvar closePos = source.indexOf(\"]]\",pos);\n\tif(closePos === -1) {\n\t\treturn null;\n\t}\n\t// Look for a `|` separating the tooltip\n\tvar splitPos = source.indexOf(\"|\",pos);\n\tif(splitPos === -1 || splitPos > closePos) {\n\t\tsplitPos = null;\n\t}\n\t// Pull out the tooltip and URL\n\tvar tooltip, URL;\n\tif(splitPos) {\n\t\tURL = source.substring(splitPos + 1,closePos).trim();\n\t\ttextNode.text = source.substring(pos,splitPos).trim();\n\t} else {\n\t\tURL = source.substring(pos,closePos).trim();\n\t\ttextNode.text = URL;\n\t}\n\tnode.attributes.href = {type: \"string\", value: URL};\n\tnode.attributes.target = {type: \"string\", value: \"_blank\"};\n\tnode.attributes.rel = {type: \"string\", value: \"noopener noreferrer\"};\n\t// Update the end position\n\tnode.end = closePos + 2;\n\treturn node;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/prettyextlink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/prettylink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/prettylink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for pretty links. For example:\n\n```\n[[Introduction]]\n\n[[Link description|TiddlerTitle]]\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"prettylink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\[\\[(.*?)(?:\\|(.*?))?\\]\\]/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Process the link\n\tvar text = this.match[1],\n\t\tlink = this.match[2] || text;\n\tif($tw.utils.isLinkExternal(link)) {\n\t\treturn [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"a\",\n\t\t\tattributes: {\n\t\t\t\thref: {type: \"string\", value: link},\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-tiddlylink-external\"},\n\t\t\t\ttarget: {type: \"string\", value: \"_blank\"},\n\t\t\t\trel: {type: \"string\", value: \"noopener noreferrer\"}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\", text: text\n\t\t\t}]\n\t\t}];\n\t} else {\n\t\treturn [{\n\t\t\ttype: \"link\",\n\t\t\tattributes: {\n\t\t\t\tto: {type: \"string\", value: link}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\", text: text\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/prettylink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/quoteblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/quoteblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for quote blocks. For example:\n\n```\n\t<<<.optionalClass(es) optional cited from\n\ta quote\n\t<<<\n\t\n\t<<<.optionalClass(es)\n\ta quote\n\t<<< optional cited from\n```\n\nQuotes can be quoted by putting more <s\n\n```\n\t<<<\n\tQuote Level 1\n\t\n\t<<<<\n\tQuoteLevel 2\n\t<<<<\n\t\n\t<<<\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"quoteblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /(<<<+)/mg;\n};\n\nexports.parse = function() {\n\tvar classes = [\"tc-quote\"];\n\t// Get all the details of the match\n\tvar reEndString = \"^\" + this.match[1] + \"(?!<)\";\n\t// Move past the <s\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t\n\t// Parse any classes, whitespace and then the optional cite itself\n\tclasses.push.apply(classes, this.parser.parseClasses());\n\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\tvar cite = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t// before handling the cite, parse the body of the quote\n\tvar tree= this.parser.parseBlocks(reEndString);\n\t// If we got a cite, put it before the text\n\tif(cite.length > 0) {\n\t\ttree.unshift({\n\t\t\ttype: \"element\",\n\t\t\ttag: \"cite\",\n\t\t\tchildren: cite\n\t\t});\n\t}\n\t// Parse any optional cite\n\tthis.parser.skipWhitespace({treatNewlinesAsNonWhitespace: true});\n\tcite = this.parser.parseInlineRun(/(\\r?\\n)/mg);\n\t// If we got a cite, push it\n\tif(cite.length > 0) {\n\t\ttree.push({\n\t\t\ttype: \"element\",\n\t\t\ttag: \"cite\",\n\t\t\tchildren: cite\n\t\t});\n\t}\n\t// Return the blockquote element\n\treturn [{\n\t\ttype: \"element\",\n\t\ttag: \"blockquote\",\n\t\tattributes: {\n\t\t\tclass: { type: \"string\", value: classes.join(\" \") },\n\t\t},\n\t\tchildren: tree\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/quoteblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/rules.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/rules.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki pragma rule for rules specifications\n\n```\n\\rules except ruleone ruletwo rulethree\n\\rules only ruleone ruletwo rulethree\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"rules\";\nexports.types = {pragma: true};\n\n/*\nInstantiate parse rule\n*/\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /^\\\\rules[^\\S\\n]/mg;\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Move past the pragma invocation\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse whitespace delimited tokens terminated by a line break\n\tvar reMatch = /[^\\S\\n]*(\\S+)|(\\r?\\n)/mg,\n\t\ttokens = [];\n\treMatch.lastIndex = this.parser.pos;\n\tvar match = reMatch.exec(this.parser.source);\n\twhile(match && match.index === this.parser.pos) {\n\t\tthis.parser.pos = reMatch.lastIndex;\n\t\t// Exit if we've got the line break\n\t\tif(match[2]) {\n\t\t\tbreak;\n\t\t}\n\t\t// Process the token\n\t\tif(match[1]) {\n\t\t\ttokens.push(match[1]);\n\t\t}\n\t\t// Match the next token\n\t\tmatch = reMatch.exec(this.parser.source);\n\t}\n\t// Process the tokens\n\tif(tokens.length > 0) {\n\t\tthis.parser.amendRules(tokens[0],tokens.slice(1));\n\t}\n\t// No parse tree nodes to return\n\treturn [];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/rules.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/styleblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/styleblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for assigning styles and classes to paragraphs and other blocks. For example:\n\n```\n@@.myClass\n@@background-color:red;\nThis paragraph will have the CSS class `myClass`.\n\n* The `<ul>` around this list will also have the class `myClass`\n* List item 2\n\n@@\n```\n\nNote that classes and styles can be mixed subject to the rule that styles must precede classes. For example\n\n```\n@@.myFirstClass.mySecondClass\n@@width:100px;.myThirdClass\nThis is a paragraph\n@@\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"styleblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /@@((?:[^\\.\\r\\n\\s:]+:[^\\r\\n;]+;)+)?(?:\\.([^\\r\\n\\s]+))?\\r?\\n/mg;\n};\n\nexports.parse = function() {\n\tvar reEndString = \"^@@(?:\\\\r?\\\\n)?\";\n\tvar classes = [], styles = [];\n\tdo {\n\t\t// Get the class and style\n\t\tif(this.match[1]) {\n\t\t\tstyles.push(this.match[1]);\n\t\t}\n\t\tif(this.match[2]) {\n\t\t\tclasses.push(this.match[2].split(\".\").join(\" \"));\n\t\t}\n\t\t// Move past the match\n\t\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t\t// Look for another line of classes and styles\n\t\tthis.match = this.matchRegExp.exec(this.parser.source);\n\t} while(this.match && this.match.index === this.parser.pos);\n\t// Parse the body\n\tvar tree = this.parser.parseBlocks(reEndString);\n\tfor(var t=0; t<tree.length; t++) {\n\t\tif(classes.length > 0) {\n\t\t\t$tw.utils.addClassToParseTreeNode(tree[t],classes.join(\" \"));\n\t\t}\n\t\tif(styles.length > 0) {\n\t\t\t$tw.utils.addAttributeToParseTreeNode(tree[t],\"style\",styles.join(\"\"));\n\t\t}\n\t}\n\treturn tree;\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/styleblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/styleinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/styleinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for assigning styles and classes to inline runs. For example:\n\n```\n@@.myClass This is some text with a class@@\n@@background-color:red;This is some text with a background colour@@\n@@width:100px;.myClass This is some text with a class and a width@@\n```\n\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"styleinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /@@((?:[^\\.\\r\\n\\s:]+:[^\\r\\n;]+;)+)?(\\.(?:[^\\r\\n\\s]+)\\s+)?/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /@@/g;\n\t// Get the styles and class\n\tvar stylesString = this.match[1],\n\t\tclassString = this.match[2] ? this.match[2].split(\".\").join(\" \") : undefined;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Parse the run up to the terminator\n\tvar tree = this.parser.parseInlineRun(reEnd,{eatTerminator: true});\n\t// Return the classed span\n\tvar node = {\n\t\ttype: \"element\",\n\t\ttag: \"span\",\n\t\tattributes: {\n\t\t\t\"class\": {type: \"string\", value: \"tc-inline-style\"}\n\t\t},\n\t\tchildren: tree\n\t};\n\tif(classString) {\n\t\t$tw.utils.addClassToParseTreeNode(node,classString);\n\t}\n\tif(stylesString) {\n\t\t$tw.utils.addAttributeToParseTreeNode(node,\"style\",stylesString);\n\t}\n\treturn [node];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/styleinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/syslink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/syslink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for system tiddler links.\nCan be suppressed preceding them with `~`.\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"syslink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /~?\\$:\\/[a-zA-Z0-9/.\\-_]+/mg;\n};\n\nexports.parse = function() {\n\tvar match = this.match[0];\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Create the link unless it is suppressed\n\tif(match.substr(0,1) === \"~\") {\n\t\treturn [{type: \"text\", text: match.substr(1)}];\n\t} else {\n\t\treturn [{\n\t\t\ttype: \"link\",\n\t\t\tattributes: {\n\t\t\t\tto: {type: \"string\", value: match}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\",\n\t\t\t\ttext: match\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();",
            "title": "$:/core/modules/parsers/wikiparser/rules/syslink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/table.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/table.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text block rule for tables.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"table\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /^\\|(?:[^\\n]*)\\|(?:[fhck]?)\\r?(?:\\n|$)/mg;\n};\n\nvar processRow = function(prevColumns) {\n\tvar cellRegExp = /(?:\\|([^\\n\\|]*)\\|)|(\\|[fhck]?\\r?(?:\\n|$))/mg,\n\t\tcellTermRegExp = /((?:\\x20*)\\|)/mg,\n\t\ttree = [],\n\t\tcol = 0,\n\t\tcolSpanCount = 1,\n\t\tprevCell,\n\t\tvAlign;\n\t// Match a single cell\n\tcellRegExp.lastIndex = this.parser.pos;\n\tvar cellMatch = cellRegExp.exec(this.parser.source);\n\twhile(cellMatch && cellMatch.index === this.parser.pos) {\n\t\tif(cellMatch[1] === \"~\") {\n\t\t\t// Rowspan\n\t\t\tvar last = prevColumns[col];\n\t\t\tif(last) {\n\t\t\t\tlast.rowSpanCount++;\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(last.element,\"rowspan\",last.rowSpanCount);\n\t\t\t\tvAlign = $tw.utils.getAttributeValueFromParseTreeNode(last.element,\"valign\",\"center\");\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(last.element,\"valign\",vAlign);\n\t\t\t\tif(colSpanCount > 1) {\n\t\t\t\t\t$tw.utils.addAttributeToParseTreeNode(last.element,\"colspan\",colSpanCount);\n\t\t\t\t\tcolSpanCount = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Move to just before the `|` terminating the cell\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t} else if(cellMatch[1] === \">\") {\n\t\t\t// Colspan\n\t\t\tcolSpanCount++;\n\t\t\t// Move to just before the `|` terminating the cell\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t} else if(cellMatch[1] === \"<\" && prevCell) {\n\t\t\tcolSpanCount = 1 + $tw.utils.getAttributeValueFromParseTreeNode(prevCell,\"colspan\",1);\n\t\t\t$tw.utils.addAttributeToParseTreeNode(prevCell,\"colspan\",colSpanCount);\n\t\t\tcolSpanCount = 1;\n\t\t\t// Move to just before the `|` terminating the cell\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t} else if(cellMatch[2]) {\n\t\t\t// End of row\n\t\t\tif(prevCell && colSpanCount > 1) {\n\t\t\t\tif(prevCell.attributes && prevCell.attributes && prevCell.attributes.colspan) {\n\t\t\t\t\t\tcolSpanCount += prevCell.attributes.colspan.value;\n\t\t\t\t} else {\n\t\t\t\t\tcolSpanCount -= 1;\n\t\t\t\t}\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(prevCell,\"colspan\",colSpanCount);\n\t\t\t}\n\t\t\tthis.parser.pos = cellRegExp.lastIndex - 1;\n\t\t\tbreak;\n\t\t} else {\n\t\t\t// For ordinary cells, step beyond the opening `|`\n\t\t\tthis.parser.pos++;\n\t\t\t// Look for a space at the start of the cell\n\t\t\tvar spaceLeft = false;\n\t\t\tvAlign = null;\n\t\t\tif(this.parser.source.substr(this.parser.pos).search(/^\\^([^\\^]|\\^\\^)/) === 0) {\n\t\t\t\tvAlign = \"top\";\n\t\t\t} else if(this.parser.source.substr(this.parser.pos).search(/^,([^,]|,,)/) === 0) {\n\t\t\t\tvAlign = \"bottom\";\n\t\t\t}\n\t\t\tif(vAlign) {\n\t\t\t\tthis.parser.pos++;\n\t\t\t}\n\t\t\tvar chr = this.parser.source.substr(this.parser.pos,1);\n\t\t\twhile(chr === \" \") {\n\t\t\t\tspaceLeft = true;\n\t\t\t\tthis.parser.pos++;\n\t\t\t\tchr = this.parser.source.substr(this.parser.pos,1);\n\t\t\t}\n\t\t\t// Check whether this is a heading cell\n\t\t\tvar cell;\n\t\t\tif(chr === \"!\") {\n\t\t\t\tthis.parser.pos++;\n\t\t\t\tcell = {type: \"element\", tag: \"th\", children: []};\n\t\t\t} else {\n\t\t\t\tcell = {type: \"element\", tag: \"td\", children: []};\n\t\t\t}\n\t\t\ttree.push(cell);\n\t\t\t// Record information about this cell\n\t\t\tprevCell = cell;\n\t\t\tprevColumns[col] = {rowSpanCount:1,element:cell};\n\t\t\t// Check for a colspan\n\t\t\tif(colSpanCount > 1) {\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"colspan\",colSpanCount);\n\t\t\t\tcolSpanCount = 1;\n\t\t\t}\n\t\t\t// Parse the cell\n\t\t\tcell.children = this.parser.parseInlineRun(cellTermRegExp,{eatTerminator: true});\n\t\t\t// Set the alignment for the cell\n\t\t\tif(vAlign) {\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"valign\",vAlign);\n\t\t\t}\n\t\t\tif(this.parser.source.substr(this.parser.pos - 2,1) === \" \") { // spaceRight\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"align\",spaceLeft ? \"center\" : \"left\");\n\t\t\t} else if(spaceLeft) {\n\t\t\t\t$tw.utils.addAttributeToParseTreeNode(cell,\"align\",\"right\");\n\t\t\t}\n\t\t\t// Move back to the closing `|`\n\t\t\tthis.parser.pos--;\n\t\t}\n\t\tcol++;\n\t\tcellRegExp.lastIndex = this.parser.pos;\n\t\tcellMatch = cellRegExp.exec(this.parser.source);\n\t}\n\treturn tree;\n};\n\nexports.parse = function() {\n\tvar rowContainerTypes = {\"c\":\"caption\", \"h\":\"thead\", \"\":\"tbody\", \"f\":\"tfoot\"},\n\t\ttable = {type: \"element\", tag: \"table\", children: []},\n\t\trowRegExp = /^\\|([^\\n]*)\\|([fhck]?)\\r?(?:\\n|$)/mg,\n\t\trowTermRegExp = /(\\|(?:[fhck]?)\\r?(?:\\n|$))/mg,\n\t\tprevColumns = [],\n\t\tcurrRowType,\n\t\trowContainer,\n\t\trowCount = 0;\n\t// Match the row\n\trowRegExp.lastIndex = this.parser.pos;\n\tvar rowMatch = rowRegExp.exec(this.parser.source);\n\twhile(rowMatch && rowMatch.index === this.parser.pos) {\n\t\tvar rowType = rowMatch[2];\n\t\t// Check if it is a class assignment\n\t\tif(rowType === \"k\") {\n\t\t\t$tw.utils.addClassToParseTreeNode(table,rowMatch[1]);\n\t\t\tthis.parser.pos = rowMatch.index + rowMatch[0].length;\n\t\t} else {\n\t\t\t// Otherwise, create a new row if this one is of a different type\n\t\t\tif(rowType !== currRowType) {\n\t\t\t\trowContainer = {type: \"element\", tag: rowContainerTypes[rowType], children: []};\n\t\t\t\ttable.children.push(rowContainer);\n\t\t\t\tcurrRowType = rowType;\n\t\t\t}\n\t\t\t// Is this a caption row?\n\t\t\tif(currRowType === \"c\") {\n\t\t\t\t// If so, move past the opening `|` of the row\n\t\t\t\tthis.parser.pos++;\n\t\t\t\t// Move the caption to the first row if it isn't already\n\t\t\t\tif(table.children.length !== 1) {\n\t\t\t\t\ttable.children.pop(); // Take rowContainer out of the children array\n\t\t\t\t\ttable.children.splice(0,0,rowContainer); // Insert it at the bottom\t\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\t// Set the alignment - TODO: figure out why TW did this\n//\t\t\t\trowContainer.attributes.align = rowCount === 0 ? \"top\" : \"bottom\";\n\t\t\t\t// Parse the caption\n\t\t\t\trowContainer.children = this.parser.parseInlineRun(rowTermRegExp,{eatTerminator: true});\n\t\t\t} else {\n\t\t\t\t// Create the row\n\t\t\t\tvar theRow = {type: \"element\", tag: \"tr\", children: []};\n\t\t\t\t$tw.utils.addClassToParseTreeNode(theRow,rowCount%2 ? \"oddRow\" : \"evenRow\");\n\t\t\t\trowContainer.children.push(theRow);\n\t\t\t\t// Process the row\n\t\t\t\ttheRow.children = processRow.call(this,prevColumns);\n\t\t\t\tthis.parser.pos = rowMatch.index + rowMatch[0].length;\n\t\t\t\t// Increment the row count\n\t\t\t\trowCount++;\n\t\t\t}\n\t\t}\n\t\trowMatch = rowRegExp.exec(this.parser.source);\n\t}\n\treturn [table];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/table.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/transcludeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/transcludeblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for block-level transclusion. For example:\n\n```\n{{MyTiddler}}\n{{MyTiddler||TemplateTitle}}\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"transcludeblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{([^\\{\\}\\|]*)(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}(?:\\r?\\n|$)/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar template = $tw.utils.trim(this.match[2]),\n\t\ttextRef = $tw.utils.trim(this.match[1]);\n\t// Prepare the transclude widget\n\tvar transcludeNode = {\n\t\t\ttype: \"transclude\",\n\t\t\tattributes: {},\n\t\t\tisBlock: true\n\t\t};\n\t// Prepare the tiddler widget\n\tvar tr, targetTitle, targetField, targetIndex, tiddlerNode;\n\tif(textRef) {\n\t\ttr = $tw.utils.parseTextReference(textRef);\n\t\ttargetTitle = tr.title;\n\t\ttargetField = tr.field;\n\t\ttargetIndex = tr.index;\n\t\ttiddlerNode = {\n\t\t\ttype: \"tiddler\",\n\t\t\tattributes: {\n\t\t\t\ttiddler: {type: \"string\", value: targetTitle}\n\t\t\t},\n\t\t\tisBlock: true,\n\t\t\tchildren: [transcludeNode]\n\t\t};\n\t}\n\tif(template) {\n\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: template};\n\t\tif(textRef) {\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t} else {\n\t\tif(textRef) {\n\t\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: targetTitle};\n\t\t\tif(targetField) {\n\t\t\t\ttranscludeNode.attributes.field = {type: \"string\", value: targetField};\n\t\t\t}\n\t\t\tif(targetIndex) {\n\t\t\t\ttranscludeNode.attributes.index = {type: \"string\", value: targetIndex};\n\t\t\t}\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/transcludeblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/transcludeinline.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/transcludeinline.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for inline-level transclusion. For example:\n\n```\n{{MyTiddler}}\n{{MyTiddler||TemplateTitle}}\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"transcludeinline\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\{\\{([^\\{\\}\\|]*)(?:\\|\\|([^\\|\\{\\}]+))?\\}\\}/mg;\n};\n\nexports.parse = function() {\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Get the match details\n\tvar template = $tw.utils.trim(this.match[2]),\n\t\ttextRef = $tw.utils.trim(this.match[1]);\n\t// Prepare the transclude widget\n\tvar transcludeNode = {\n\t\t\ttype: \"transclude\",\n\t\t\tattributes: {}\n\t\t};\n\t// Prepare the tiddler widget\n\tvar tr, targetTitle, targetField, targetIndex, tiddlerNode;\n\tif(textRef) {\n\t\ttr = $tw.utils.parseTextReference(textRef);\n\t\ttargetTitle = tr.title;\n\t\ttargetField = tr.field;\n\t\ttargetIndex = tr.index;\n\t\ttiddlerNode = {\n\t\t\ttype: \"tiddler\",\n\t\t\tattributes: {\n\t\t\t\ttiddler: {type: \"string\", value: targetTitle}\n\t\t\t},\n\t\t\tchildren: [transcludeNode]\n\t\t};\n\t}\n\tif(template) {\n\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: template};\n\t\tif(textRef) {\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t} else {\n\t\tif(textRef) {\n\t\t\ttranscludeNode.attributes.tiddler = {type: \"string\", value: targetTitle};\n\t\t\tif(targetField) {\n\t\t\t\ttranscludeNode.attributes.field = {type: \"string\", value: targetField};\n\t\t\t}\n\t\t\tif(targetIndex) {\n\t\t\t\ttranscludeNode.attributes.index = {type: \"string\", value: targetIndex};\n\t\t\t}\n\t\t\treturn [tiddlerNode];\n\t\t} else {\n\t\t\treturn [transcludeNode];\n\t\t}\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/transcludeinline.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/typedblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/typedblock.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text rule for typed blocks. For example:\n\n```\n$$$.js\nThis will be rendered as JavaScript\n$$$\n\n$$$.svg\n<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"150\" height=\"100\">\n  <circle cx=\"100\" cy=\"50\" r=\"40\" stroke=\"black\" stroke-width=\"2\" fill=\"red\" />\n</svg>\n$$$\n\n$$$text/vnd.tiddlywiki>text/html\nThis will be rendered as an //HTML representation// of WikiText\n$$$\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.name = \"typedblock\";\nexports.types = {block: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = /\\$\\$\\$([^ >\\r\\n]*)(?: *> *([^ \\r\\n]+))?\\r?\\n/mg;\n};\n\nexports.parse = function() {\n\tvar reEnd = /\\r?\\n\\$\\$\\$\\r?(?:\\n|$)/mg;\n\t// Save the type\n\tvar parseType = this.match[1],\n\t\trenderType = this.match[2];\n\t// Move past the match\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// Look for the end of the block\n\treEnd.lastIndex = this.parser.pos;\n\tvar match = reEnd.exec(this.parser.source),\n\t\ttext;\n\t// Process the block\n\tif(match) {\n\t\ttext = this.parser.source.substring(this.parser.pos,match.index);\n\t\tthis.parser.pos = match.index + match[0].length;\n\t} else {\n\t\ttext = this.parser.source.substr(this.parser.pos);\n\t\tthis.parser.pos = this.parser.sourceLength;\n\t}\n\t// Parse the block according to the specified type\n\tvar parser = this.parser.wiki.parseText(parseType,text,{defaultType: \"text/plain\"});\n\t// If there's no render type, just return the parse tree\n\tif(!renderType) {\n\t\treturn parser.tree;\n\t} else {\n\t\t// Otherwise, render to the rendertype and return in a <PRE> tag\n\t\tvar widgetNode = this.parser.wiki.makeWidget(parser),\n\t\t\tcontainer = $tw.fakeDocument.createElement(\"div\");\n\t\twidgetNode.render(container,null);\n\t\ttext = renderType === \"text/html\" ? container.innerHTML : container.textContent;\n\t\treturn [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"pre\",\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\",\n\t\t\t\ttext: text\n\t\t\t}]\n\t\t}];\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/typedblock.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/rules/wikilink.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/wikilink.js\ntype: application/javascript\nmodule-type: wikirule\n\nWiki text inline rule for wiki links. For example:\n\n```\nAWikiLink\nAnotherLink\n~SuppressedLink\n```\n\nPrecede a camel case word with `~` to prevent it from being recognised as a link.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.name = \"wikilink\";\nexports.types = {inline: true};\n\nexports.init = function(parser) {\n\tthis.parser = parser;\n\t// Regexp to match\n\tthis.matchRegExp = new RegExp($tw.config.textPrimitives.unWikiLink + \"?\" + $tw.config.textPrimitives.wikiLink,\"mg\");\n};\n\n/*\nParse the most recent match\n*/\nexports.parse = function() {\n\t// Get the details of the match\n\tvar linkText = this.match[0];\n\t// Move past the macro call\n\tthis.parser.pos = this.matchRegExp.lastIndex;\n\t// If the link starts with the unwikilink character then just output it as plain text\n\tif(linkText.substr(0,1) === $tw.config.textPrimitives.unWikiLink) {\n\t\treturn [{type: \"text\", text: linkText.substr(1)}];\n\t}\n\t// If the link has been preceded with a blocked letter then don't treat it as a link\n\tif(this.match.index > 0) {\n\t\tvar preRegExp = new RegExp($tw.config.textPrimitives.blockPrefixLetters,\"mg\");\n\t\tpreRegExp.lastIndex = this.match.index-1;\n\t\tvar preMatch = preRegExp.exec(this.parser.source);\n\t\tif(preMatch && preMatch.index === this.match.index-1) {\n\t\t\treturn [{type: \"text\", text: linkText}];\n\t\t}\n\t}\n\treturn [{\n\t\ttype: \"link\",\n\t\tattributes: {\n\t\t\tto: {type: \"string\", value: linkText}\n\t\t},\n\t\tchildren: [{\n\t\t\ttype: \"text\",\n\t\t\ttext: linkText\n\t\t}]\n\t}];\n};\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/wikilink.js",
            "type": "application/javascript",
            "module-type": "wikirule"
        },
        "$:/core/modules/parsers/wikiparser/wikiparser.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/wikiparser.js\ntype: application/javascript\nmodule-type: parser\n\nThe wiki text parser processes blocks of source text into a parse tree.\n\nThe parse tree is made up of nested arrays of these JavaScript objects:\n\n\t{type: \"element\", tag: <string>, attributes: {}, children: []} - an HTML element\n\t{type: \"text\", text: <string>} - a text node\n\t{type: \"entity\", value: <string>} - an entity\n\t{type: \"raw\", html: <string>} - raw HTML\n\nAttributes are stored as hashmaps of the following objects:\n\n\t{type: \"string\", value: <string>} - literal string\n\t{type: \"indirect\", textReference: <textReference>} - indirect through a text reference\n\t{type: \"macro\", macro: <TBD>} - indirect through a macro invocation\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar WikiParser = function(type,text,options) {\n\tthis.wiki = options.wiki;\n\tvar self = this;\n\t// Check for an externally linked tiddler\n\tif($tw.browser && (text || \"\") === \"\" && options._canonical_uri) {\n\t\tthis.loadRemoteTiddler(options._canonical_uri);\n\t\ttext = $tw.language.getRawString(\"LazyLoadingWarning\");\n\t}\n\t// Initialise the classes if we don't have them already\n\tif(!this.pragmaRuleClasses) {\n\t\tWikiParser.prototype.pragmaRuleClasses = $tw.modules.createClassesFromModules(\"wikirule\",\"pragma\",$tw.WikiRuleBase);\n\t\tthis.setupRules(WikiParser.prototype.pragmaRuleClasses,\"$:/config/WikiParserRules/Pragmas/\");\n\t}\n\tif(!this.blockRuleClasses) {\n\t\tWikiParser.prototype.blockRuleClasses = $tw.modules.createClassesFromModules(\"wikirule\",\"block\",$tw.WikiRuleBase);\n\t\tthis.setupRules(WikiParser.prototype.blockRuleClasses,\"$:/config/WikiParserRules/Block/\");\n\t}\n\tif(!this.inlineRuleClasses) {\n\t\tWikiParser.prototype.inlineRuleClasses = $tw.modules.createClassesFromModules(\"wikirule\",\"inline\",$tw.WikiRuleBase);\n\t\tthis.setupRules(WikiParser.prototype.inlineRuleClasses,\"$:/config/WikiParserRules/Inline/\");\n\t}\n\t// Save the parse text\n\tthis.type = type || \"text/vnd.tiddlywiki\";\n\tthis.source = text || \"\";\n\tthis.sourceLength = this.source.length;\n\t// Set current parse position\n\tthis.pos = 0;\n\t// Instantiate the pragma parse rules\n\tthis.pragmaRules = this.instantiateRules(this.pragmaRuleClasses,\"pragma\",0);\n\t// Instantiate the parser block and inline rules\n\tthis.blockRules = this.instantiateRules(this.blockRuleClasses,\"block\",0);\n\tthis.inlineRules = this.instantiateRules(this.inlineRuleClasses,\"inline\",0);\n\t// Parse any pragmas\n\tthis.tree = [];\n\tvar topBranch = this.parsePragmas();\n\t// Parse the text into inline runs or blocks\n\tif(options.parseAsInline) {\n\t\ttopBranch.push.apply(topBranch,this.parseInlineRun());\n\t} else {\n\t\ttopBranch.push.apply(topBranch,this.parseBlocks());\n\t}\n\t// Return the parse tree\n};\n\n/*\n*/\nWikiParser.prototype.loadRemoteTiddler = function(url) {\n\tvar self = this;\n\t$tw.utils.httpRequest({\n\t\turl: url,\n\t\ttype: \"GET\",\n\t\tcallback: function(err,data) {\n\t\t\tif(!err) {\n\t\t\t\tvar tiddlers = self.wiki.deserializeTiddlers(\".tid\",data,self.wiki.getCreationFields());\n\t\t\t\t$tw.utils.each(tiddlers,function(tiddler) {\n\t\t\t\t\ttiddler[\"_canonical_uri\"] = url;\n\t\t\t\t});\n\t\t\t\tif(tiddlers) {\n\t\t\t\t\tself.wiki.addTiddlers(tiddlers);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\n*/\nWikiParser.prototype.setupRules = function(proto,configPrefix) {\n\tvar self = this;\n\tif(!$tw.safemode) {\n\t\t$tw.utils.each(proto,function(object,name) {\n\t\t\tif(self.wiki.getTiddlerText(configPrefix + name,\"enable\") !== \"enable\") {\n\t\t\t\tdelete proto[name];\n\t\t\t}\n\t\t});\n\t}\n};\n\n/*\nInstantiate an array of parse rules\n*/\nWikiParser.prototype.instantiateRules = function(classes,type,startPos) {\n\tvar rulesInfo = [],\n\t\tself = this;\n\t$tw.utils.each(classes,function(RuleClass) {\n\t\t// Instantiate the rule\n\t\tvar rule = new RuleClass(self);\n\t\trule.is = {};\n\t\trule.is[type] = true;\n\t\trule.init(self);\n\t\tvar matchIndex = rule.findNextMatch(startPos);\n\t\tif(matchIndex !== undefined) {\n\t\t\trulesInfo.push({\n\t\t\t\trule: rule,\n\t\t\t\tmatchIndex: matchIndex\n\t\t\t});\n\t\t}\n\t});\n\treturn rulesInfo;\n};\n\n/*\nSkip any whitespace at the current position. Options are:\n\ttreatNewlinesAsNonWhitespace: true if newlines are NOT to be treated as whitespace\n*/\nWikiParser.prototype.skipWhitespace = function(options) {\n\toptions = options || {};\n\tvar whitespaceRegExp = options.treatNewlinesAsNonWhitespace ? /([^\\S\\n]+)/mg : /(\\s+)/mg;\n\twhitespaceRegExp.lastIndex = this.pos;\n\tvar whitespaceMatch = whitespaceRegExp.exec(this.source);\n\tif(whitespaceMatch && whitespaceMatch.index === this.pos) {\n\t\tthis.pos = whitespaceRegExp.lastIndex;\n\t}\n};\n\n/*\nGet the next match out of an array of parse rule instances\n*/\nWikiParser.prototype.findNextMatch = function(rules,startPos) {\n\t// Find the best matching rule by finding the closest match position\n\tvar matchingRule,\n\t\tmatchingRulePos = this.sourceLength;\n\t// Step through each rule\n\tfor(var t=0; t<rules.length; t++) {\n\t\tvar ruleInfo = rules[t];\n\t\t// Ask the rule to get the next match if we've moved past the current one\n\t\tif(ruleInfo.matchIndex !== undefined  && ruleInfo.matchIndex < startPos) {\n\t\t\truleInfo.matchIndex = ruleInfo.rule.findNextMatch(startPos);\n\t\t}\n\t\t// Adopt this match if it's closer than the current best match\n\t\tif(ruleInfo.matchIndex !== undefined && ruleInfo.matchIndex <= matchingRulePos) {\n\t\t\tmatchingRule = ruleInfo;\n\t\t\tmatchingRulePos = ruleInfo.matchIndex;\n\t\t}\n\t}\n\treturn matchingRule;\n};\n\n/*\nParse any pragmas at the beginning of a block of parse text\n*/\nWikiParser.prototype.parsePragmas = function() {\n\tvar currentTreeBranch = this.tree;\n\twhile(true) {\n\t\t// Skip whitespace\n\t\tthis.skipWhitespace();\n\t\t// Check for the end of the text\n\t\tif(this.pos >= this.sourceLength) {\n\t\t\tbreak;\n\t\t}\n\t\t// Check if we've arrived at a pragma rule match\n\t\tvar nextMatch = this.findNextMatch(this.pragmaRules,this.pos);\n\t\t// If not, just exit\n\t\tif(!nextMatch || nextMatch.matchIndex !== this.pos) {\n\t\t\tbreak;\n\t\t}\n\t\t// Process the pragma rule\n\t\tvar subTree = nextMatch.rule.parse();\n\t\tif(subTree.length > 0) {\n\t\t\t// Quick hack; we only cope with a single parse tree node being returned, which is true at the moment\n\t\t\tcurrentTreeBranch.push.apply(currentTreeBranch,subTree);\n\t\t\tsubTree[0].children = [];\n\t\t\tcurrentTreeBranch = subTree[0].children;\n\t\t}\n\t}\n\treturn currentTreeBranch;\n};\n\n/*\nParse a block from the current position\n\tterminatorRegExpString: optional regular expression string that identifies the end of plain paragraphs. Must not include capturing parenthesis\n*/\nWikiParser.prototype.parseBlock = function(terminatorRegExpString) {\n\tvar terminatorRegExp = terminatorRegExpString ? new RegExp(\"(\" + terminatorRegExpString + \"|\\\\r?\\\\n\\\\r?\\\\n)\",\"mg\") : /(\\r?\\n\\r?\\n)/mg;\n\tthis.skipWhitespace();\n\tif(this.pos >= this.sourceLength) {\n\t\treturn [];\n\t}\n\t// Look for a block rule that applies at the current position\n\tvar nextMatch = this.findNextMatch(this.blockRules,this.pos);\n\tif(nextMatch && nextMatch.matchIndex === this.pos) {\n\t\treturn nextMatch.rule.parse();\n\t}\n\t// Treat it as a paragraph if we didn't find a block rule\n\treturn [{type: \"element\", tag: \"p\", children: this.parseInlineRun(terminatorRegExp)}];\n};\n\n/*\nParse a series of blocks of text until a terminating regexp is encountered or the end of the text\n\tterminatorRegExpString: terminating regular expression\n*/\nWikiParser.prototype.parseBlocks = function(terminatorRegExpString) {\n\tif(terminatorRegExpString) {\n\t\treturn this.parseBlocksTerminated(terminatorRegExpString);\n\t} else {\n\t\treturn this.parseBlocksUnterminated();\n\t}\n};\n\n/*\nParse a block from the current position to the end of the text\n*/\nWikiParser.prototype.parseBlocksUnterminated = function() {\n\tvar tree = [];\n\twhile(this.pos < this.sourceLength) {\n\t\ttree.push.apply(tree,this.parseBlock());\n\t}\n\treturn tree;\n};\n\n/*\nParse blocks of text until a terminating regexp is encountered\n*/\nWikiParser.prototype.parseBlocksTerminated = function(terminatorRegExpString) {\n\tvar terminatorRegExp = new RegExp(\"(\" + terminatorRegExpString + \")\",\"mg\"),\n\t\ttree = [];\n\t// Skip any whitespace\n\tthis.skipWhitespace();\n\t//  Check if we've got the end marker\n\tterminatorRegExp.lastIndex = this.pos;\n\tvar match = terminatorRegExp.exec(this.source);\n\t// Parse the text into blocks\n\twhile(this.pos < this.sourceLength && !(match && match.index === this.pos)) {\n\t\tvar blocks = this.parseBlock(terminatorRegExpString);\n\t\ttree.push.apply(tree,blocks);\n\t\t// Skip any whitespace\n\t\tthis.skipWhitespace();\n\t\t//  Check if we've got the end marker\n\t\tterminatorRegExp.lastIndex = this.pos;\n\t\tmatch = terminatorRegExp.exec(this.source);\n\t}\n\tif(match && match.index === this.pos) {\n\t\tthis.pos = match.index + match[0].length;\n\t}\n\treturn tree;\n};\n\n/*\nParse a run of text at the current position\n\tterminatorRegExp: a regexp at which to stop the run\n\toptions: see below\nOptions available:\n\teatTerminator: move the parse position past any encountered terminator (default false)\n*/\nWikiParser.prototype.parseInlineRun = function(terminatorRegExp,options) {\n\tif(terminatorRegExp) {\n\t\treturn this.parseInlineRunTerminated(terminatorRegExp,options);\n\t} else {\n\t\treturn this.parseInlineRunUnterminated(options);\n\t}\n};\n\nWikiParser.prototype.parseInlineRunUnterminated = function(options) {\n\tvar tree = [];\n\t// Find the next occurrence of an inline rule\n\tvar nextMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t// Loop around the matches until we've reached the end of the text\n\twhile(this.pos < this.sourceLength && nextMatch) {\n\t\t// Process the text preceding the run rule\n\t\tif(nextMatch.matchIndex > this.pos) {\n\t\t\ttree.push({type: \"text\", text: this.source.substring(this.pos,nextMatch.matchIndex)});\n\t\t\tthis.pos = nextMatch.matchIndex;\n\t\t}\n\t\t// Process the run rule\n\t\ttree.push.apply(tree,nextMatch.rule.parse());\n\t\t// Look for the next run rule\n\t\tnextMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t}\n\t// Process the remaining text\n\tif(this.pos < this.sourceLength) {\n\t\ttree.push({type: \"text\", text: this.source.substr(this.pos)});\n\t}\n\tthis.pos = this.sourceLength;\n\treturn tree;\n};\n\nWikiParser.prototype.parseInlineRunTerminated = function(terminatorRegExp,options) {\n\toptions = options || {};\n\tvar tree = [];\n\t// Find the next occurrence of the terminator\n\tterminatorRegExp.lastIndex = this.pos;\n\tvar terminatorMatch = terminatorRegExp.exec(this.source);\n\t// Find the next occurrence of a inlinerule\n\tvar inlineRuleMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t// Loop around until we've reached the end of the text\n\twhile(this.pos < this.sourceLength && (terminatorMatch || inlineRuleMatch)) {\n\t\t// Return if we've found the terminator, and it precedes any inline rule match\n\t\tif(terminatorMatch) {\n\t\t\tif(!inlineRuleMatch || inlineRuleMatch.matchIndex >= terminatorMatch.index) {\n\t\t\t\tif(terminatorMatch.index > this.pos) {\n\t\t\t\t\ttree.push({type: \"text\", text: this.source.substring(this.pos,terminatorMatch.index)});\n\t\t\t\t}\n\t\t\t\tthis.pos = terminatorMatch.index;\n\t\t\t\tif(options.eatTerminator) {\n\t\t\t\t\tthis.pos += terminatorMatch[0].length;\n\t\t\t\t}\n\t\t\t\treturn tree;\n\t\t\t}\n\t\t}\n\t\t// Process any inline rule, along with the text preceding it\n\t\tif(inlineRuleMatch) {\n\t\t\t// Preceding text\n\t\t\tif(inlineRuleMatch.matchIndex > this.pos) {\n\t\t\t\ttree.push({type: \"text\", text: this.source.substring(this.pos,inlineRuleMatch.matchIndex)});\n\t\t\t\tthis.pos = inlineRuleMatch.matchIndex;\n\t\t\t}\n\t\t\t// Process the inline rule\n\t\t\ttree.push.apply(tree,inlineRuleMatch.rule.parse());\n\t\t\t// Look for the next inline rule\n\t\t\tinlineRuleMatch = this.findNextMatch(this.inlineRules,this.pos);\n\t\t\t// Look for the next terminator match\n\t\t\tterminatorRegExp.lastIndex = this.pos;\n\t\t\tterminatorMatch = terminatorRegExp.exec(this.source);\n\t\t}\n\t}\n\t// Process the remaining text\n\tif(this.pos < this.sourceLength) {\n\t\ttree.push({type: \"text\", text: this.source.substr(this.pos)});\n\t}\n\tthis.pos = this.sourceLength;\n\treturn tree;\n};\n\n/*\nParse zero or more class specifiers `.classname`\n*/\nWikiParser.prototype.parseClasses = function() {\n\tvar classRegExp = /\\.([^\\s\\.]+)/mg,\n\t\tclassNames = [];\n\tclassRegExp.lastIndex = this.pos;\n\tvar match = classRegExp.exec(this.source);\n\twhile(match && match.index === this.pos) {\n\t\tthis.pos = match.index + match[0].length;\n\t\tclassNames.push(match[1]);\n\t\tmatch = classRegExp.exec(this.source);\n\t}\n\treturn classNames;\n};\n\n/*\nAmend the rules used by this instance of the parser\n\ttype: `only` keeps just the named rules, `except` keeps all but the named rules\n\tnames: array of rule names\n*/\nWikiParser.prototype.amendRules = function(type,names) {\n\tnames = names || [];\n\t// Define the filter function\n\tvar keepFilter;\n\tif(type === \"only\") {\n\t\tkeepFilter = function(name) {\n\t\t\treturn names.indexOf(name) !== -1;\n\t\t};\n\t} else if(type === \"except\") {\n\t\tkeepFilter = function(name) {\n\t\t\treturn names.indexOf(name) === -1;\n\t\t};\n\t} else {\n\t\treturn;\n\t}\n\t// Define a function to process each of our rule arrays\n\tvar processRuleArray = function(ruleArray) {\n\t\tfor(var t=ruleArray.length-1; t>=0; t--) {\n\t\t\tif(!keepFilter(ruleArray[t].rule.name)) {\n\t\t\t\truleArray.splice(t,1);\n\t\t\t}\n\t\t}\n\t};\n\t// Process each rule array\n\tprocessRuleArray(this.pragmaRules);\n\tprocessRuleArray(this.blockRules);\n\tprocessRuleArray(this.inlineRules);\n};\n\nexports[\"text/vnd.tiddlywiki\"] = WikiParser;\n\n})();\n\n",
            "title": "$:/core/modules/parsers/wikiparser/wikiparser.js",
            "type": "application/javascript",
            "module-type": "parser"
        },
        "$:/core/modules/parsers/wikiparser/rules/wikirulebase.js": {
            "text": "/*\\\ntitle: $:/core/modules/parsers/wikiparser/rules/wikirulebase.js\ntype: application/javascript\nmodule-type: global\n\nBase class for wiki parser rules\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nThis constructor is always overridden with a blank constructor, and so shouldn't be used\n*/\nvar WikiRuleBase = function() {\n};\n\n/*\nTo be overridden by individual rules\n*/\nWikiRuleBase.prototype.init = function(parser) {\n\tthis.parser = parser;\n};\n\n/*\nDefault implementation of findNextMatch uses RegExp matching\n*/\nWikiRuleBase.prototype.findNextMatch = function(startPos) {\n\tthis.matchRegExp.lastIndex = startPos;\n\tthis.match = this.matchRegExp.exec(this.parser.source);\n\treturn this.match ? this.match.index : undefined;\n};\n\nexports.WikiRuleBase = WikiRuleBase;\n\n})();\n",
            "title": "$:/core/modules/parsers/wikiparser/rules/wikirulebase.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/pluginswitcher.js": {
            "text": "/*\\\ntitle: $:/core/modules/pluginswitcher.js\ntype: application/javascript\nmodule-type: global\n\nManages switching plugins for themes and languages.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\noptions:\nwiki: wiki store to be used\npluginType: type of plugin to be switched\ncontrollerTitle: title of tiddler used to control switching of this resource\ndefaultPlugins: array of default plugins to be used if nominated plugin isn't found\n*/\nfunction PluginSwitcher(options) {\n\tthis.wiki = options.wiki;\n\tthis.pluginType = options.pluginType;\n\tthis.controllerTitle = options.controllerTitle;\n\tthis.defaultPlugins = options.defaultPlugins || [];\n\t// Switch to the current plugin\n\tthis.switchPlugins();\n\t// Listen for changes to the selected plugin\n\tvar self = this;\n\tthis.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.utils.hop(changes,self.controllerTitle)) {\n\t\t\tself.switchPlugins();\n\t\t}\n\t});\n}\n\nPluginSwitcher.prototype.switchPlugins = function() {\n\t// Get the name of the current theme\n\tvar selectedPluginTitle = this.wiki.getTiddlerText(this.controllerTitle);\n\t// If it doesn't exist, then fallback to one of the default themes\n\tvar index = 0;\n\twhile(!this.wiki.getTiddler(selectedPluginTitle) && index < this.defaultPlugins.length) {\n\t\tselectedPluginTitle = this.defaultPlugins[index++];\n\t}\n\t// Accumulate the titles of the plugins that we need to load\n\tvar plugins = [],\n\t\tself = this,\n\t\taccumulatePlugin = function(title) {\n\t\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\t\tif(tiddler && tiddler.isPlugin() && plugins.indexOf(title) === -1) {\n\t\t\t\tplugins.push(title);\n\t\t\t\tvar pluginInfo = JSON.parse(self.wiki.getTiddlerText(title)),\n\t\t\t\t\tdependents = $tw.utils.parseStringArray(tiddler.fields.dependents || \"\");\n\t\t\t\t$tw.utils.each(dependents,function(title) {\n\t\t\t\t\taccumulatePlugin(title);\n\t\t\t\t});\n\t\t\t}\n\t\t};\n\taccumulatePlugin(selectedPluginTitle);\n\t// Unregister any existing theme tiddlers\n\tvar unregisteredTiddlers = $tw.wiki.unregisterPluginTiddlers(this.pluginType);\n\t// Register any new theme tiddlers\n\tvar registeredTiddlers = $tw.wiki.registerPluginTiddlers(this.pluginType,plugins);\n\t// Unpack the current theme tiddlers\n\t$tw.wiki.unpackPluginTiddlers();\n};\n\nexports.PluginSwitcher = PluginSwitcher;\n\n})();\n",
            "title": "$:/core/modules/pluginswitcher.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/saver-handler.js": {
            "text": "/*\\\ntitle: $:/core/modules/saver-handler.js\ntype: application/javascript\nmodule-type: global\n\nThe saver handler tracks changes to the store and handles saving the entire wiki via saver modules.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInstantiate the saver handler with the following options:\nwiki: wiki to be synced\ndirtyTracking: true if dirty tracking should be performed\n*/\nfunction SaverHandler(options) {\n\tvar self = this;\n\tthis.wiki = options.wiki;\n\tthis.dirtyTracking = options.dirtyTracking;\n\tthis.pendingAutoSave = false;\n\t// Make a logger\n\tthis.logger = new $tw.utils.Logger(\"saver-handler\");\n\t// Initialise our savers\n\tif($tw.browser) {\n\t\tthis.initSavers();\n\t}\n\t// Only do dirty tracking if required\n\tif($tw.browser && this.dirtyTracking) {\n\t\t// Compile the dirty tiddler filter\n\t\tthis.filterFn = this.wiki.compileFilter(this.wiki.getTiddlerText(this.titleSyncFilter));\n\t\t// Count of changes that have not yet been saved\n\t\tthis.numChanges = 0;\n\t\t// Listen out for changes to tiddlers\n\t\tthis.wiki.addEventListener(\"change\",function(changes) {\n\t\t\t// Filter the changes so that we only count changes to tiddlers that we care about\n\t\t\tvar filteredChanges = self.filterFn.call(self.wiki,function(callback) {\n\t\t\t\t$tw.utils.each(changes,function(change,title) {\n\t\t\t\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\t\t\t\tcallback(tiddler,title);\n\t\t\t\t});\n\t\t\t});\n\t\t\t// Adjust the number of changes\n\t\t\tself.numChanges += filteredChanges.length;\n\t\t\tself.updateDirtyStatus();\n\t\t\t// Do any autosave if one is pending and there's no more change events\n\t\t\tif(self.pendingAutoSave && self.wiki.getSizeOfTiddlerEventQueue() === 0) {\n\t\t\t\t// Check if we're dirty\n\t\t\t\tif(self.numChanges > 0) {\n\t\t\t\t\tself.saveWiki({\n\t\t\t\t\t\tmethod: \"autosave\",\n\t\t\t\t\t\tdownloadType: \"text/plain\"\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tself.pendingAutoSave = false;\n\t\t\t}\n\t\t});\n\t\t// Listen for the autosave event\n\t\t$tw.rootWidget.addEventListener(\"tm-auto-save-wiki\",function(event) {\n\t\t\t// Do the autosave unless there are outstanding tiddler change events\n\t\t\tif(self.wiki.getSizeOfTiddlerEventQueue() === 0) {\n\t\t\t\t// Check if we're dirty\n\t\t\t\tif(self.numChanges > 0) {\n\t\t\t\t\tself.saveWiki({\n\t\t\t\t\t\tmethod: \"autosave\",\n\t\t\t\t\t\tdownloadType: \"text/plain\"\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Otherwise put ourselves in the \"pending autosave\" state and wait for the change event before we do the autosave\n\t\t\t\tself.pendingAutoSave = true;\n\t\t\t}\n\t\t});\n\t\t// Set up our beforeunload handler\n\t\t$tw.addUnloadTask(function(event) {\n\t\t\tvar confirmationMessage;\n\t\t\tif(self.isDirty()) {\n\t\t\t\tconfirmationMessage = $tw.language.getString(\"UnsavedChangesWarning\");\n\t\t\t\tevent.returnValue = confirmationMessage; // Gecko\n\t\t\t}\n\t\t\treturn confirmationMessage;\n\t\t});\n\t}\n\t// Install the save action handlers\n\tif($tw.browser) {\n\t\t$tw.rootWidget.addEventListener(\"tm-save-wiki\",function(event) {\n\t\t\tself.saveWiki({\n\t\t\t\ttemplate: event.param,\n\t\t\t\tdownloadType: \"text/plain\",\n\t\t\t\tvariables: event.paramObject\n\t\t\t});\n\t\t});\n\t\t$tw.rootWidget.addEventListener(\"tm-download-file\",function(event) {\n\t\t\tself.saveWiki({\n\t\t\t\tmethod: \"download\",\n\t\t\t\ttemplate: event.param,\n\t\t\t\tdownloadType: \"text/plain\",\n\t\t\t\tvariables: event.paramObject\n\t\t\t});\n\t\t});\n\t}\n}\n\nSaverHandler.prototype.titleSyncFilter = \"$:/config/SaverFilter\";\nSaverHandler.prototype.titleAutoSave = \"$:/config/AutoSave\";\nSaverHandler.prototype.titleSavedNotification = \"$:/language/Notifications/Save/Done\";\n\n/*\nSelect the appropriate saver modules and set them up\n*/\nSaverHandler.prototype.initSavers = function(moduleType) {\n\tmoduleType = moduleType || \"saver\";\n\t// Instantiate the available savers\n\tthis.savers = [];\n\tvar self = this;\n\t$tw.modules.forEachModuleOfType(moduleType,function(title,module) {\n\t\tif(module.canSave(self)) {\n\t\t\tself.savers.push(module.create(self.wiki));\n\t\t}\n\t});\n\t// Sort the savers into priority order\n\tthis.savers.sort(function(a,b) {\n\t\tif(a.info.priority < b.info.priority) {\n\t\t\treturn -1;\n\t\t} else {\n\t\t\tif(a.info.priority > b.info.priority) {\n\t\t\t\treturn +1;\n\t\t\t} else {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\nSave the wiki contents. Options are:\n\tmethod: \"save\", \"autosave\" or \"download\"\n\ttemplate: the tiddler containing the template to save\n\tdownloadType: the content type for the saved file\n*/\nSaverHandler.prototype.saveWiki = function(options) {\n\toptions = options || {};\n\tvar self = this,\n\t\tmethod = options.method || \"save\",\n\t\tvariables = options.variables || {},\n\t\ttemplate = options.template || \"$:/core/save/all\",\n\t\tdownloadType = options.downloadType || \"text/plain\",\n\t\ttext = this.wiki.renderTiddler(downloadType,template,options),\n\t\tcallback = function(err) {\n\t\t\tif(err) {\n\t\t\t\talert($tw.language.getString(\"Error/WhileSaving\") + \":\\n\\n\" + err);\n\t\t\t} else {\n\t\t\t\t// Clear the task queue if we're saving (rather than downloading)\n\t\t\t\tif(method !== \"download\") {\n\t\t\t\t\tself.numChanges = 0;\n\t\t\t\t\tself.updateDirtyStatus();\n\t\t\t\t}\n\t\t\t\t$tw.notifier.display(self.titleSavedNotification);\n\t\t\t\tif(options.callback) {\n\t\t\t\t\toptions.callback();\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\t// Ignore autosave if disabled\n\tif(method === \"autosave\" && this.wiki.getTiddlerText(this.titleAutoSave,\"yes\") !== \"yes\") {\n\t\treturn false;\n\t}\n\t// Call the highest priority saver that supports this method\n\tfor(var t=this.savers.length-1; t>=0; t--) {\n\t\tvar saver = this.savers[t];\n\t\tif(saver.info.capabilities.indexOf(method) !== -1 && saver.save(text,method,callback,{variables: {filename: variables.filename}})) {\n\t\t\tthis.logger.log(\"Saving wiki with method\",method,\"through saver\",saver.info.name);\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n};\n\n/*\nChecks whether the wiki is dirty (ie the window shouldn't be closed)\n*/\nSaverHandler.prototype.isDirty = function() {\n\treturn this.numChanges > 0;\n};\n\n/*\nUpdate the document body with the class \"tc-dirty\" if the wiki has unsaved/unsynced changes\n*/\nSaverHandler.prototype.updateDirtyStatus = function() {\n\tif($tw.browser) {\n\t\t$tw.utils.toggleClass(document.body,\"tc-dirty\",this.isDirty());\n\t}\n};\n\nexports.SaverHandler = SaverHandler;\n\n})();\n",
            "title": "$:/core/modules/saver-handler.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/savers/andtidwiki.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/andtidwiki.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via the AndTidWiki Android app\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false, netscape: false, Components: false */\n\"use strict\";\n\nvar AndTidWiki = function(wiki) {\n};\n\nAndTidWiki.prototype.save = function(text,method,callback) {\n\t// Get the pathname of this document\n\tvar pathname = decodeURIComponent(document.location.toString().split(\"#\")[0]);\n\t// Strip the file://\n\tif(pathname.indexOf(\"file://\") === 0) {\n\t\tpathname = pathname.substr(7);\n\t}\n\t// Strip any query or location part\n\tvar p = pathname.indexOf(\"?\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\tp = pathname.indexOf(\"#\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\t// Save the file\n\twindow.twi.saveFile(pathname,text);\n\t// Call the callback\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nAndTidWiki.prototype.info = {\n\tname: \"andtidwiki\",\n\tpriority: 1600,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn !!window.twi && !!window.twi.saveFile;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new AndTidWiki(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/andtidwiki.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/download.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/download.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via HTML5's download APIs\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar DownloadSaver = function(wiki) {\n};\n\nDownloadSaver.prototype.save = function(text,method,callback,options) {\n\toptions = options || {};\n\t// Get the current filename\n\tvar filename = options.variables.filename;\n\tif(!filename) {\n\t\tvar p = document.location.pathname.lastIndexOf(\"/\");\n\t\tif(p !== -1) {\n\t\t\tfilename = document.location.pathname.substr(p+1);\n\t\t}\n\t}\n\tif(!filename) {\n\t\tfilename = \"tiddlywiki.html\";\n\t}\n\t// Set up the link\n\tvar link = document.createElement(\"a\");\n\tlink.setAttribute(\"target\",\"_blank\");\n\tlink.setAttribute(\"rel\",\"noopener noreferrer\");\n\tif(Blob !== undefined) {\n\t\tvar blob = new Blob([text], {type: \"text/html\"});\n\t\tlink.setAttribute(\"href\", URL.createObjectURL(blob));\n\t} else {\n\t\tlink.setAttribute(\"href\",\"data:text/html,\" + encodeURIComponent(text));\n\t}\n\tlink.setAttribute(\"download\",filename);\n\tdocument.body.appendChild(link);\n\tlink.click();\n\tdocument.body.removeChild(link);\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nDownloadSaver.prototype.info = {\n\tname: \"download\",\n\tpriority: 100,\n\tcapabilities: [\"save\", \"download\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn document.createElement(\"a\").download !== undefined;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new DownloadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/download.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/fsosaver.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/fsosaver.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via MS FileSystemObject ActiveXObject\n\nNote: Since TiddlyWiki's markup contains the MOTW, the FileSystemObject normally won't be available. \nHowever, if the wiki is loaded as an .HTA file (Windows HTML Applications) then the FSO can be used.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar FSOSaver = function(wiki) {\n};\n\nFSOSaver.prototype.save = function(text,method,callback) {\n\t// Get the pathname of this document\n\tvar pathname = unescape(document.location.pathname);\n\t// Test for a Windows path of the form /x:\\blah...\n\tif(/^\\/[A-Z]\\:\\\\[^\\\\]+/i.test(pathname)) {\t// ie: ^/[a-z]:/[^/]+\n\t\t// Remove the leading slash\n\t\tpathname = pathname.substr(1);\n\t} else if(document.location.hostname !== \"\" && /^\\/\\\\[^\\\\]+\\\\[^\\\\]+/i.test(pathname)) {\t// test for \\\\server\\share\\blah... - ^/[^/]+/[^/]+\n\t\t// Remove the leading slash\n\t\tpathname = pathname.substr(1);\n\t\t// reconstruct UNC path\n\t\tpathname = \"\\\\\\\\\" + document.location.hostname + pathname;\n\t} else {\n\t\treturn false;\n\t}\n\t// Save the file (as UTF-16)\n\tvar fso = new ActiveXObject(\"Scripting.FileSystemObject\");\n\tvar file = fso.OpenTextFile(pathname,2,-1,-1);\n\tfile.Write(text);\n\tfile.Close();\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nFSOSaver.prototype.info = {\n\tname: \"FSOSaver\",\n\tpriority: 120,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\ttry {\n\t\treturn (window.location.protocol === \"file:\") && !!(new ActiveXObject(\"Scripting.FileSystemObject\"));\n\t} catch(e) { return false; }\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new FSOSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/fsosaver.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/manualdownload.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/manualdownload.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via HTML5's download APIs\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Title of the tiddler containing the download message\nvar downloadInstructionsTitle = \"$:/language/Modals/Download\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar ManualDownloadSaver = function(wiki) {\n};\n\nManualDownloadSaver.prototype.save = function(text,method,callback) {\n\t$tw.modal.display(downloadInstructionsTitle,{\n\t\tdownloadLink: \"data:text/html,\" + encodeURIComponent(text)\n\t});\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nManualDownloadSaver.prototype.info = {\n\tname: \"manualdownload\",\n\tpriority: 0,\n\tcapabilities: [\"save\", \"download\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn true;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new ManualDownloadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/manualdownload.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/msdownload.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/msdownload.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via window.navigator.msSaveBlob()\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar MsDownloadSaver = function(wiki) {\n};\n\nMsDownloadSaver.prototype.save = function(text,method,callback) {\n\t// Get the current filename\n\tvar filename = \"tiddlywiki.html\",\n\t\tp = document.location.pathname.lastIndexOf(\"/\");\n\tif(p !== -1) {\n\t\tfilename = document.location.pathname.substr(p+1);\n\t}\n\t// Set up the link\n\tvar blob = new Blob([text], {type: \"text/html\"});\n\twindow.navigator.msSaveBlob(blob,filename);\n\t// Callback that we succeeded\n\tcallback(null);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nMsDownloadSaver.prototype.info = {\n\tname: \"msdownload\",\n\tpriority: 110,\n\tcapabilities: [\"save\", \"download\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn !!window.navigator.msSaveBlob;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new MsDownloadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/msdownload.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/put.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/put.js\ntype: application/javascript\nmodule-type: saver\n\nSaves wiki by performing a PUT request to the server\n\nWorks with any server which accepts a PUT request\nto the current URL, such as a WebDAV server.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar PutSaver = function(wiki) {\n\tthis.wiki = wiki;\n\tvar self = this;\n\t// Async server probe. Until probe finishes, save will fail fast\n\t// See also https://github.com/Jermolene/TiddlyWiki5/issues/2276\n\tvar req = new XMLHttpRequest();\n\treq.open(\"OPTIONS\",encodeURI(document.location.protocol + \"//\" + document.location.hostname + \":\" + document.location.port + document.location.pathname));\n\treq.onload = function() {\n\t\t// Check DAV header http://www.webdav.org/specs/rfc2518.html#rfc.section.9.1\n\t\tself.serverAcceptsPuts = (this.status === 200 && !!this.getResponseHeader('dav'));\n\t};\n\treq.send();\n};\n\nPutSaver.prototype.save = function(text,method,callback) {\n\tif (!this.serverAcceptsPuts) {\n\t\treturn false;\n\t}\n\tvar req = new XMLHttpRequest();\n\t// TODO: store/check ETags if supported by server, to protect against overwrites\n\t// Prompt: Do you want to save over this? Y/N\n\t// Merging would be ideal, and may be possible using future generic merge flow\n\treq.onload = function() {\n\t\tif (this.status === 200 || this.status === 201) {\n\t\t\tcallback(null); // success\n\t\t}\n\t\telse {\n\t\t\tcallback(this.responseText); // fail\n\t\t}\n\t};\n\treq.open(\"PUT\", encodeURI(window.location.href));\n\treq.setRequestHeader(\"Content-Type\", \"text/html;charset=UTF-8\");\n\treq.send(text);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nPutSaver.prototype.info = {\n\tname: \"put\",\n\tpriority: 2000,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn /^https?:/.test(location.protocol);\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new PutSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/put.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/tiddlyfox.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/tiddlyfox.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via the TiddlyFox file extension\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false, netscape: false, Components: false */\n\"use strict\";\n\nvar TiddlyFoxSaver = function(wiki) {\n};\n\nTiddlyFoxSaver.prototype.save = function(text,method,callback) {\n\tvar messageBox = document.getElementById(\"tiddlyfox-message-box\");\n\tif(messageBox) {\n\t\t// Get the pathname of this document\n\t\tvar pathname = document.location.toString().split(\"#\")[0];\n\t\t// Replace file://localhost/ with file:///\n\t\tif(pathname.indexOf(\"file://localhost/\") === 0) {\n\t\t\tpathname = \"file://\" + pathname.substr(16);\n\t\t}\n\t\t// Windows path file:///x:/blah/blah --> x:\\blah\\blah\n\t\tif(/^file\\:\\/\\/\\/[A-Z]\\:\\//i.test(pathname)) {\n\t\t\t// Remove the leading slash and convert slashes to backslashes\n\t\t\tpathname = pathname.substr(8).replace(/\\//g,\"\\\\\");\n\t\t// Firefox Windows network path file://///server/share/blah/blah --> //server/share/blah/blah\n\t\t} else if(pathname.indexOf(\"file://///\") === 0) {\n\t\t\tpathname = \"\\\\\\\\\" + unescape(pathname.substr(10)).replace(/\\//g,\"\\\\\");\n\t\t// Mac/Unix local path file:///path/path --> /path/path\n\t\t} else if(pathname.indexOf(\"file:///\") === 0) {\n\t\t\tpathname = unescape(pathname.substr(7));\n\t\t// Mac/Unix local path file:/path/path --> /path/path\n\t\t} else if(pathname.indexOf(\"file:/\") === 0) {\n\t\t\tpathname = unescape(pathname.substr(5));\n\t\t// Otherwise Windows networth path file://server/share/path/path --> \\\\server\\share\\path\\path\n\t\t} else {\n\t\t\tpathname = \"\\\\\\\\\" + unescape(pathname.substr(7)).replace(new RegExp(\"/\",\"g\"),\"\\\\\");\n\t\t}\n\t\t// Create the message element and put it in the message box\n\t\tvar message = document.createElement(\"div\");\n\t\tmessage.setAttribute(\"data-tiddlyfox-path\",decodeURIComponent(pathname));\n\t\tmessage.setAttribute(\"data-tiddlyfox-content\",text);\n\t\tmessageBox.appendChild(message);\n\t\t// Add an event handler for when the file has been saved\n\t\tmessage.addEventListener(\"tiddlyfox-have-saved-file\",function(event) {\n\t\t\tcallback(null);\n\t\t}, false);\n\t\t// Create and dispatch the custom event to the extension\n\t\tvar event = document.createEvent(\"Events\");\n\t\tevent.initEvent(\"tiddlyfox-save-file\",true,false);\n\t\tmessage.dispatchEvent(event);\n\t\treturn true;\n\t} else {\n\t\treturn false;\n\t}\n};\n\n/*\nInformation about this saver\n*/\nTiddlyFoxSaver.prototype.info = {\n\tname: \"tiddlyfox\",\n\tpriority: 1500,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn (window.location.protocol === \"file:\");\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new TiddlyFoxSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/tiddlyfox.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/tiddlyie.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/tiddlyie.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via Internet Explorer BHO extenion (TiddlyIE)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar TiddlyIESaver = function(wiki) {\n};\n\nTiddlyIESaver.prototype.save = function(text,method,callback) {\n\t// Check existence of TiddlyIE BHO extension (note: only works after document is complete)\n\tif(typeof(window.TiddlyIE) != \"undefined\") {\n\t\t// Get the pathname of this document\n\t\tvar pathname = unescape(document.location.pathname);\n\t\t// Test for a Windows path of the form /x:/blah...\n\t\tif(/^\\/[A-Z]\\:\\/[^\\/]+/i.test(pathname)) {\t// ie: ^/[a-z]:/[^/]+ (is this better?: ^/[a-z]:/[^/]+(/[^/]+)*\\.[^/]+ )\n\t\t\t// Remove the leading slash\n\t\t\tpathname = pathname.substr(1);\n\t\t\t// Convert slashes to backslashes\n\t\t\tpathname = pathname.replace(/\\//g,\"\\\\\");\n\t\t} else if(document.hostname !== \"\" && /^\\/[^\\/]+\\/[^\\/]+/i.test(pathname)) {\t// test for \\\\server\\share\\blah... - ^/[^/]+/[^/]+\n\t\t\t// Convert slashes to backslashes\n\t\t\tpathname = pathname.replace(/\\//g,\"\\\\\");\n\t\t\t// reconstruct UNC path\n\t\t\tpathname = \"\\\\\\\\\" + document.location.hostname + pathname;\n\t\t} else return false;\n\t\t// Prompt the user to save the file\n\t\twindow.TiddlyIE.save(pathname, text);\n\t\t// Callback that we succeeded\n\t\tcallback(null);\n\t\treturn true;\n\t} else {\n\t\treturn false;\n\t}\n};\n\n/*\nInformation about this saver\n*/\nTiddlyIESaver.prototype.info = {\n\tname: \"tiddlyiesaver\",\n\tpriority: 1500,\n\tcapabilities: [\"save\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn (window.location.protocol === \"file:\");\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new TiddlyIESaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/tiddlyie.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/twedit.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/twedit.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via the TWEdit iOS app\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false, netscape: false, Components: false */\n\"use strict\";\n\nvar TWEditSaver = function(wiki) {\n};\n\nTWEditSaver.prototype.save = function(text,method,callback) {\n\t// Bail if we're not running under TWEdit\n\tif(typeof DeviceInfo !== \"object\") {\n\t\treturn false;\n\t}\n\t// Get the pathname of this document\n\tvar pathname = decodeURIComponent(document.location.pathname);\n\t// Strip any query or location part\n\tvar p = pathname.indexOf(\"?\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\tp = pathname.indexOf(\"#\");\n\tif(p !== -1) {\n\t\tpathname = pathname.substr(0,p);\n\t}\n\t// Remove the leading \"/Documents\" from path\n\tvar prefix = \"/Documents\";\n\tif(pathname.indexOf(prefix) === 0) {\n\t\tpathname = pathname.substr(prefix.length);\n\t}\n\t// Error handler\n\tvar errorHandler = function(event) {\n\t\t// Error\n\t\tcallback($tw.language.getString(\"Error/SavingToTWEdit\") + \": \" + event.target.error.code);\n\t};\n\t// Get the file system\n\twindow.requestFileSystem(LocalFileSystem.PERSISTENT,0,function(fileSystem) {\n\t\t// Now we've got the filesystem, get the fileEntry\n\t\tfileSystem.root.getFile(pathname, {create: true}, function(fileEntry) {\n\t\t\t// Now we've got the fileEntry, create the writer\n\t\t\tfileEntry.createWriter(function(writer) {\n\t\t\t\twriter.onerror = errorHandler;\n\t\t\t\twriter.onwrite = function() {\n\t\t\t\t\tcallback(null);\n\t\t\t\t};\n\t\t\t\twriter.position = 0;\n\t\t\t\twriter.write(text);\n\t\t\t},errorHandler);\n\t\t}, errorHandler);\n\t}, errorHandler);\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nTWEditSaver.prototype.info = {\n\tname: \"twedit\",\n\tpriority: 1600,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn true;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new TWEditSaver(wiki);\n};\n\n/////////////////////////// Hack\n// HACK: This ensures that TWEdit recognises us as a TiddlyWiki document\nif($tw.browser) {\n\twindow.version = {title: \"TiddlyWiki\"};\n}\n\n})();\n",
            "title": "$:/core/modules/savers/twedit.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/savers/upload.js": {
            "text": "/*\\\ntitle: $:/core/modules/savers/upload.js\ntype: application/javascript\nmodule-type: saver\n\nHandles saving changes via upload to a server.\n\nDesigned to be compatible with BidiX's UploadPlugin at http://tiddlywiki.bidix.info/#UploadPlugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSelect the appropriate saver module and set it up\n*/\nvar UploadSaver = function(wiki) {\n\tthis.wiki = wiki;\n};\n\nUploadSaver.prototype.save = function(text,method,callback) {\n\t// Get the various parameters we need\n\tvar backupDir = this.wiki.getTextReference(\"$:/UploadBackupDir\") || \".\",\n\t\tusername = this.wiki.getTextReference(\"$:/UploadName\"),\n\t\tpassword = $tw.utils.getPassword(\"upload\"),\n\t\tuploadDir = this.wiki.getTextReference(\"$:/UploadDir\") || \".\",\n\t\tuploadFilename = this.wiki.getTextReference(\"$:/UploadFilename\") || \"index.html\",\n\t\turl = this.wiki.getTextReference(\"$:/UploadURL\");\n\t// Bail out if we don't have the bits we need\n\tif(!username || username.toString().trim() === \"\" || !password || password.toString().trim() === \"\") {\n\t\treturn false;\n\t}\n\t// Construct the url if not provided\n\tif(!url) {\n\t\turl = \"http://\" + username + \".tiddlyspot.com/store.cgi\";\n\t}\n\t// Assemble the header\n\tvar boundary = \"---------------------------\" + \"AaB03x\";\t\n\tvar uploadFormName = \"UploadPlugin\";\n\tvar head = [];\n\thead.push(\"--\" + boundary + \"\\r\\nContent-disposition: form-data; name=\\\"UploadPlugin\\\"\\r\\n\");\n\thead.push(\"backupDir=\" + backupDir + \";user=\" + username + \";password=\" + password + \";uploaddir=\" + uploadDir + \";;\"); \n\thead.push(\"\\r\\n\" + \"--\" + boundary);\n\thead.push(\"Content-disposition: form-data; name=\\\"userfile\\\"; filename=\\\"\" + uploadFilename + \"\\\"\");\n\thead.push(\"Content-Type: text/html;charset=UTF-8\");\n\thead.push(\"Content-Length: \" + text.length + \"\\r\\n\");\n\thead.push(\"\");\n\t// Assemble the tail and the data itself\n\tvar tail = \"\\r\\n--\" + boundary + \"--\\r\\n\",\n\t\tdata = head.join(\"\\r\\n\") + text + tail;\n\t// Do the HTTP post\n\tvar http = new XMLHttpRequest();\n\thttp.open(\"POST\",url,true,username,password);\n\thttp.setRequestHeader(\"Content-Type\",\"multipart/form-data; charset=UTF-8; boundary=\" + boundary);\n\thttp.onreadystatechange = function() {\n\t\tif(http.readyState == 4 && http.status == 200) {\n\t\t\tif(http.responseText.substr(0,4) === \"0 - \") {\n\t\t\t\tcallback(null);\n\t\t\t} else {\n\t\t\t\tcallback(http.responseText);\n\t\t\t}\n\t\t}\n\t};\n\ttry {\n\t\thttp.send(data);\n\t} catch(ex) {\n\t\treturn callback($tw.language.getString(\"Error/Caption\") + \":\" + ex);\n\t}\n\t$tw.notifier.display(\"$:/language/Notifications/Save/Starting\");\n\treturn true;\n};\n\n/*\nInformation about this saver\n*/\nUploadSaver.prototype.info = {\n\tname: \"upload\",\n\tpriority: 2000,\n\tcapabilities: [\"save\", \"autosave\"]\n};\n\n/*\nStatic method that returns true if this saver is capable of working\n*/\nexports.canSave = function(wiki) {\n\treturn true;\n};\n\n/*\nCreate an instance of this saver\n*/\nexports.create = function(wiki) {\n\treturn new UploadSaver(wiki);\n};\n\n})();\n",
            "title": "$:/core/modules/savers/upload.js",
            "type": "application/javascript",
            "module-type": "saver"
        },
        "$:/core/modules/browser-messaging.js": {
            "text": "/*\\\ntitle: $:/core/modules/browser-messaging.js\ntype: application/javascript\nmodule-type: startup\n\nBrowser message handling\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"browser-messaging\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\n/*\nLoad a specified url as an iframe and call the callback when it is loaded. If the url is already loaded then the existing iframe instance is used\n*/\nfunction loadIFrame(url,callback) {\n\t// Check if iframe already exists\n\tvar iframeInfo = $tw.browserMessaging.iframeInfoMap[url];\n\tif(iframeInfo) {\n\t\t// We've already got the iframe\n\t\tcallback(null,iframeInfo);\n\t} else {\n\t\t// Create the iframe and save it in the list\n\t\tvar iframe = document.createElement(\"iframe\"),\n\t\t\tiframeInfo = {\n\t\t\t\turl: url,\n\t\t\t\tstatus: \"loading\",\n\t\t\t\tdomNode: iframe\n\t\t\t};\n\t\t$tw.browserMessaging.iframeInfoMap[url] = iframeInfo;\n\t\tsaveIFrameInfoTiddler(iframeInfo);\n\t\t// Add the iframe to the DOM and hide it\n\t\tiframe.style.display = \"none\";\n\t\tdocument.body.appendChild(iframe);\n\t\t// Set up onload\n\t\tiframe.onload = function() {\n\t\t\tiframeInfo.status = \"loaded\";\n\t\t\tsaveIFrameInfoTiddler(iframeInfo);\n\t\t\tcallback(null,iframeInfo);\n\t\t};\n\t\tiframe.onerror = function() {\n\t\t\tcallback(\"Cannot load iframe\");\n\t\t};\n\t\ttry {\n\t\t\tiframe.src = url;\n\t\t} catch(ex) {\n\t\t\tcallback(ex);\n\t\t}\n\t}\n}\n\nfunction saveIFrameInfoTiddler(iframeInfo) {\n\t$tw.wiki.addTiddler(new $tw.Tiddler($tw.wiki.getCreationFields(),{\n\t\ttitle: \"$:/temp/ServerConnection/\" + iframeInfo.url,\n\t\ttext: iframeInfo.status,\n\t\ttags: [\"$:/tags/ServerConnection\"],\n\t\turl: iframeInfo.url\n\t},$tw.wiki.getModificationFields()));\n}\n\nexports.startup = function() {\n\t// Initialise the store of iframes we've created\n\t$tw.browserMessaging = {\n\t\tiframeInfoMap: {} // Hashmap by URL of {url:,status:\"loading/loaded\",domNode:}\n\t};\n\t// Listen for widget messages to control loading the plugin library\n\t$tw.rootWidget.addEventListener(\"tm-load-plugin-library\",function(event) {\n\t\tvar paramObject = event.paramObject || {},\n\t\t\turl = paramObject.url;\n\t\tif(url) {\n\t\t\tloadIFrame(url,function(err,iframeInfo) {\n\t\t\t\tif(err) {\n\t\t\t\t\talert($tw.language.getString(\"Error/LoadingPluginLibrary\") + \": \" + url);\n\t\t\t\t} else {\n\t\t\t\t\tiframeInfo.domNode.contentWindow.postMessage({\n\t\t\t\t\t\tverb: \"GET\",\n\t\t\t\t\t\turl: \"recipes/library/tiddlers.json\",\n\t\t\t\t\t\tcookies: {\n\t\t\t\t\t\t\ttype: \"save-info\",\n\t\t\t\t\t\t\tinfoTitlePrefix: paramObject.infoTitlePrefix || \"$:/temp/RemoteAssetInfo/\",\n\t\t\t\t\t\t\turl: url\n\t\t\t\t\t\t}\n\t\t\t\t\t},\"*\");\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\t$tw.rootWidget.addEventListener(\"tm-load-plugin-from-library\",function(event) {\n\t\tvar paramObject = event.paramObject || {},\n\t\t\turl = paramObject.url,\n\t\t\ttitle = paramObject.title;\n\t\tif(url && title) {\n\t\t\tloadIFrame(url,function(err,iframeInfo) {\n\t\t\t\tif(err) {\n\t\t\t\t\talert($tw.language.getString(\"Error/LoadingPluginLibrary\") + \": \" + url);\n\t\t\t\t} else {\n\t\t\t\t\tiframeInfo.domNode.contentWindow.postMessage({\n\t\t\t\t\t\tverb: \"GET\",\n\t\t\t\t\t\turl: \"recipes/library/tiddlers/\" + encodeURIComponent(title) + \".json\",\n\t\t\t\t\t\tcookies: {\n\t\t\t\t\t\t\ttype: \"save-tiddler\",\n\t\t\t\t\t\t\turl: url\n\t\t\t\t\t\t}\n\t\t\t\t\t},\"*\");\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\t// Listen for window messages from other windows\n\twindow.addEventListener(\"message\",function listener(event){\n\t\tconsole.log(\"browser-messaging: \",document.location.toString())\n\t\tconsole.log(\"browser-messaging: Received message from\",event.origin);\n\t\tconsole.log(\"browser-messaging: Message content\",event.data);\n\t\tswitch(event.data.verb) {\n\t\t\tcase \"GET-RESPONSE\":\n\t\t\t\tif(event.data.status.charAt(0) === \"2\") {\n\t\t\t\t\tif(event.data.cookies) {\n\t\t\t\t\t\tif(event.data.cookies.type === \"save-info\") {\n\t\t\t\t\t\t\tvar tiddlers = JSON.parse(event.data.body);\n\t\t\t\t\t\t\t$tw.utils.each(tiddlers,function(tiddler) {\n\t\t\t\t\t\t\t\t$tw.wiki.addTiddler(new $tw.Tiddler($tw.wiki.getCreationFields(),tiddler,{\n\t\t\t\t\t\t\t\t\ttitle: event.data.cookies.infoTitlePrefix + event.data.cookies.url + \"/\" + tiddler.title,\n\t\t\t\t\t\t\t\t\t\"original-title\": tiddler.title,\n\t\t\t\t\t\t\t\t\ttext: \"\",\n\t\t\t\t\t\t\t\t\ttype: \"text/vnd.tiddlywiki\",\n\t\t\t\t\t\t\t\t\t\"original-type\": tiddler.type,\n\t\t\t\t\t\t\t\t\t\"plugin-type\": undefined,\n\t\t\t\t\t\t\t\t\t\"original-plugin-type\": tiddler[\"plugin-type\"],\n\t\t\t\t\t\t\t\t\t\"module-type\": undefined,\n\t\t\t\t\t\t\t\t\t\"original-module-type\": tiddler[\"module-type\"],\n\t\t\t\t\t\t\t\t\ttags: [\"$:/tags/RemoteAssetInfo\"],\n\t\t\t\t\t\t\t\t\t\"original-tags\": $tw.utils.stringifyList(tiddler.tags || []),\n\t\t\t\t\t\t\t\t\t\"server-url\": event.data.cookies.url\n\t\t\t\t\t\t\t\t},$tw.wiki.getModificationFields()));\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t} else if(event.data.cookies.type === \"save-tiddler\") {\n\t\t\t\t\t\t\tvar tiddler = JSON.parse(event.data.body);\n\t\t\t\t\t\t\t$tw.wiki.addTiddler(new $tw.Tiddler(tiddler));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t}\n\t},false);\n};\n\n})();\n",
            "title": "$:/core/modules/browser-messaging.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/commands.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/commands.js\ntype: application/javascript\nmodule-type: startup\n\nCommand processing\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"commands\";\nexports.platforms = [\"node\"];\nexports.after = [\"story\"];\nexports.synchronous = false;\n\nexports.startup = function(callback) {\n\t// On the server, start a commander with the command line arguments\n\tvar commander = new $tw.Commander(\n\t\t$tw.boot.argv,\n\t\tfunction(err) {\n\t\t\tif(err) {\n\t\t\t\treturn $tw.utils.error(\"Error: \" + err);\n\t\t\t}\n\t\t\tcallback();\n\t\t},\n\t\t$tw.wiki,\n\t\t{output: process.stdout, error: process.stderr}\n\t);\n\tcommander.execute();\n};\n\n})();\n",
            "title": "$:/core/modules/startup/commands.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/favicon.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/favicon.js\ntype: application/javascript\nmodule-type: startup\n\nFavicon handling\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"favicon\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\t\t\n// Favicon tiddler\nvar FAVICON_TITLE = \"$:/favicon.ico\";\n\nexports.startup = function() {\n\t// Set up the favicon\n\tsetFavicon();\n\t// Reset the favicon when the tiddler changes\n\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.utils.hop(changes,FAVICON_TITLE)) {\n\t\t\tsetFavicon();\n\t\t}\n\t});\n};\n\nfunction setFavicon() {\n\tvar tiddler = $tw.wiki.getTiddler(FAVICON_TITLE);\n\tif(tiddler) {\n\t\tvar faviconLink = document.getElementById(\"faviconLink\");\n\t\tfaviconLink.setAttribute(\"href\",\"data:\" + tiddler.fields.type + \";base64,\" + tiddler.fields.text);\n\t}\n}\n\n})();\n",
            "title": "$:/core/modules/startup/favicon.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/info.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/info.js\ntype: application/javascript\nmodule-type: startup\n\nInitialise $:/info tiddlers via $:/temp/info-plugin pseudo-plugin\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"info\";\nexports.before = [\"startup\"];\nexports.after = [\"load-modules\"];\nexports.synchronous = true;\n\nexports.startup = function() {\n\t// Collect up the info tiddlers\n\tvar infoTiddlerFields = {};\n\t// Give each info module a chance to fill in as many info tiddlers as they want\n\t$tw.modules.forEachModuleOfType(\"info\",function(title,moduleExports) {\n\t\tif(moduleExports && moduleExports.getInfoTiddlerFields) {\n\t\t\tvar tiddlerFieldsArray = moduleExports.getInfoTiddlerFields(infoTiddlerFields);\n\t\t\t$tw.utils.each(tiddlerFieldsArray,function(fields) {\n\t\t\t\tif(fields) {\n\t\t\t\t\tinfoTiddlerFields[fields.title] = fields;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\t// Bake the info tiddlers into a plugin\n\tvar fields = {\n\t\ttitle: \"$:/temp/info-plugin\",\n\t\ttype: \"application/json\",\n\t\t\"plugin-type\": \"info\",\n\t\ttext: JSON.stringify({tiddlers: infoTiddlerFields},null,$tw.config.preferences.jsonSpaces)\n\t};\n\t$tw.wiki.addTiddler(new $tw.Tiddler(fields));\n\t$tw.wiki.readPluginInfo();\n\t$tw.wiki.registerPluginTiddlers(\"info\");\n\t$tw.wiki.unpackPluginTiddlers();\n};\n\n})();\n",
            "title": "$:/core/modules/startup/info.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/load-modules.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/load-modules.js\ntype: application/javascript\nmodule-type: startup\n\nLoad core modules\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"load-modules\";\nexports.synchronous = true;\n\nexports.startup = function() {\n\t// Load modules\n\t$tw.modules.applyMethods(\"utils\",$tw.utils);\n\tif($tw.node) {\n\t\t$tw.modules.applyMethods(\"utils-node\",$tw.utils);\n\t}\n\t$tw.modules.applyMethods(\"global\",$tw);\n\t$tw.modules.applyMethods(\"config\",$tw.config);\n\t$tw.Tiddler.fieldModules = $tw.modules.getModulesByTypeAsHashmap(\"tiddlerfield\");\n\t$tw.modules.applyMethods(\"tiddlermethod\",$tw.Tiddler.prototype);\n\t$tw.modules.applyMethods(\"wikimethod\",$tw.Wiki.prototype);\n\t$tw.modules.applyMethods(\"tiddlerdeserializer\",$tw.Wiki.tiddlerDeserializerModules);\n\t$tw.macros = $tw.modules.getModulesByTypeAsHashmap(\"macro\");\n\t$tw.wiki.initParsers();\n\t$tw.Commander.initCommands();\n};\n\n})();\n",
            "title": "$:/core/modules/startup/load-modules.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/password.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/password.js\ntype: application/javascript\nmodule-type: startup\n\nPassword handling\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"password\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\nexports.startup = function() {\n\t$tw.rootWidget.addEventListener(\"tm-set-password\",function(event) {\n\t\t$tw.passwordPrompt.createPrompt({\n\t\t\tserviceName: $tw.language.getString(\"Encryption/PromptSetPassword\"),\n\t\t\tnoUserName: true,\n\t\t\tsubmitText: $tw.language.getString(\"Encryption/SetPassword\"),\n\t\t\tcanCancel: true,\n\t\t\trepeatPassword: true,\n\t\t\tcallback: function(data) {\n\t\t\t\tif(data) {\n\t\t\t\t\t$tw.crypto.setPassword(data.password);\n\t\t\t\t}\n\t\t\t\treturn true; // Get rid of the password prompt\n\t\t\t}\n\t\t});\n\t});\n\t$tw.rootWidget.addEventListener(\"tm-clear-password\",function(event) {\n\t\tif($tw.browser) {\n\t\t\tif(!confirm($tw.language.getString(\"Encryption/ConfirmClearPassword\"))) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t$tw.crypto.setPassword(null);\n\t});\n\t// Ensure that $:/isEncrypted is maintained properly\n\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.utils.hop(changes,\"$:/isEncrypted\")) {\n\t\t\t$tw.crypto.updateCryptoStateTiddler();\n\t\t}\n\t});\n};\n\n})();\n",
            "title": "$:/core/modules/startup/password.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/render.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/render.js\ntype: application/javascript\nmodule-type: startup\n\nTitle, stylesheet and page rendering\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"render\";\nexports.platforms = [\"browser\"];\nexports.after = [\"story\"];\nexports.synchronous = true;\n\n// Default story and history lists\nvar PAGE_TITLE_TITLE = \"$:/core/wiki/title\";\nvar PAGE_STYLESHEET_TITLE = \"$:/core/ui/PageStylesheet\";\nvar PAGE_TEMPLATE_TITLE = \"$:/core/ui/PageTemplate\";\n\n// Time (in ms) that we defer refreshing changes to draft tiddlers\nvar DRAFT_TIDDLER_TIMEOUT_TITLE = \"$:/config/Drafts/TypingTimeout\";\nvar DRAFT_TIDDLER_TIMEOUT = 400;\n\nexports.startup = function() {\n\t// Set up the title\n\t$tw.titleWidgetNode = $tw.wiki.makeTranscludeWidget(PAGE_TITLE_TITLE,{document: $tw.fakeDocument, parseAsInline: true});\n\t$tw.titleContainer = $tw.fakeDocument.createElement(\"div\");\n\t$tw.titleWidgetNode.render($tw.titleContainer,null);\n\tdocument.title = $tw.titleContainer.textContent;\n\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\tif($tw.titleWidgetNode.refresh(changes,$tw.titleContainer,null)) {\n\t\t\tdocument.title = $tw.titleContainer.textContent;\n\t\t}\n\t});\n\t// Set up the styles\n\t$tw.styleWidgetNode = $tw.wiki.makeTranscludeWidget(PAGE_STYLESHEET_TITLE,{document: $tw.fakeDocument});\n\t$tw.styleContainer = $tw.fakeDocument.createElement(\"style\");\n\t$tw.styleWidgetNode.render($tw.styleContainer,null);\n\t$tw.styleElement = document.createElement(\"style\");\n\t$tw.styleElement.innerHTML = $tw.styleContainer.textContent;\n\tdocument.head.insertBefore($tw.styleElement,document.head.firstChild);\n\t$tw.wiki.addEventListener(\"change\",$tw.perf.report(\"styleRefresh\",function(changes) {\n\t\tif($tw.styleWidgetNode.refresh(changes,$tw.styleContainer,null)) {\n\t\t\t$tw.styleElement.innerHTML = $tw.styleContainer.textContent;\n\t\t}\n\t}));\n\t// Display the $:/core/ui/PageTemplate tiddler to kick off the display\n\t$tw.perf.report(\"mainRender\",function() {\n\t\t$tw.pageWidgetNode = $tw.wiki.makeTranscludeWidget(PAGE_TEMPLATE_TITLE,{document: document, parentWidget: $tw.rootWidget});\n\t\t$tw.pageContainer = document.createElement(\"div\");\n\t\t$tw.utils.addClass($tw.pageContainer,\"tc-page-container-wrapper\");\n\t\tdocument.body.insertBefore($tw.pageContainer,document.body.firstChild);\n\t\t$tw.pageWidgetNode.render($tw.pageContainer,null);\n\t})();\n\t// Prepare refresh mechanism\n\tvar deferredChanges = Object.create(null),\n\t\ttimerId;\n\tfunction refresh() {\n\t\t// Process the refresh\n\t\t$tw.pageWidgetNode.refresh(deferredChanges);\n\t\tdeferredChanges = Object.create(null);\n\t}\n\t// Add the change event handler\n\t$tw.wiki.addEventListener(\"change\",$tw.perf.report(\"mainRefresh\",function(changes) {\n\t\t// Check if only drafts have changed\n\t\tvar onlyDraftsHaveChanged = true;\n\t\tfor(var title in changes) {\n\t\t\tvar tiddler = $tw.wiki.getTiddler(title);\n\t\t\tif(!tiddler || !tiddler.hasField(\"draft.of\")) {\n\t\t\t\tonlyDraftsHaveChanged = false;\n\t\t\t}\n\t\t}\n\t\t// Defer the change if only drafts have changed\n\t\tif(timerId) {\n\t\t\tclearTimeout(timerId);\n\t\t}\n\t\ttimerId = null;\n\t\tif(onlyDraftsHaveChanged) {\n\t\t\tvar timeout = parseInt($tw.wiki.getTiddlerText(DRAFT_TIDDLER_TIMEOUT_TITLE,\"\"),10);\n\t\t\tif(isNaN(timeout)) {\n\t\t\t\ttimeout = DRAFT_TIDDLER_TIMEOUT;\n\t\t\t}\n\t\t\ttimerId = setTimeout(refresh,timeout);\n\t\t\t$tw.utils.extend(deferredChanges,changes);\n\t\t} else {\n\t\t\t$tw.utils.extend(deferredChanges,changes);\n\t\t\trefresh();\n\t\t}\n\t}));\n\t// Fix up the link between the root widget and the page container\n\t$tw.rootWidget.domNodes = [$tw.pageContainer];\n\t$tw.rootWidget.children = [$tw.pageWidgetNode];\n};\n\n})();\n",
            "title": "$:/core/modules/startup/render.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/rootwidget.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/rootwidget.js\ntype: application/javascript\nmodule-type: startup\n\nSetup the root widget and the core root widget handlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"rootwidget\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.before = [\"story\"];\nexports.synchronous = true;\n\nexports.startup = function() {\n\t// Install the modal message mechanism\n\t$tw.modal = new $tw.utils.Modal($tw.wiki);\n\t$tw.rootWidget.addEventListener(\"tm-modal\",function(event) {\n\t\t$tw.modal.display(event.param,{variables: event.paramObject});\n\t});\n\t// Install the notification  mechanism\n\t$tw.notifier = new $tw.utils.Notifier($tw.wiki);\n\t$tw.rootWidget.addEventListener(\"tm-notify\",function(event) {\n\t\t$tw.notifier.display(event.param,{variables: event.paramObject});\n\t});\n\t// Install the scroller\n\t$tw.pageScroller = new $tw.utils.PageScroller();\n\t$tw.rootWidget.addEventListener(\"tm-scroll\",function(event) {\n\t\t$tw.pageScroller.handleEvent(event);\n\t});\n\tvar fullscreen = $tw.utils.getFullScreenApis();\n\tif(fullscreen) {\n\t\t$tw.rootWidget.addEventListener(\"tm-full-screen\",function(event) {\n\t\t\tif(document[fullscreen._fullscreenElement]) {\n\t\t\t\tdocument[fullscreen._exitFullscreen]();\n\t\t\t} else {\n\t\t\t\tdocument.documentElement[fullscreen._requestFullscreen](Element.ALLOW_KEYBOARD_INPUT);\n\t\t\t}\n\t\t});\n\t}\n\t// If we're being viewed on a data: URI then give instructions for how to save\n\tif(document.location.protocol === \"data:\") {\n\t\t$tw.rootWidget.dispatchEvent({\n\t\t\ttype: \"tm-modal\",\n\t\t\tparam: \"$:/language/Modals/SaveInstructions\"\n\t\t});\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/startup/rootwidget.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup.js\ntype: application/javascript\nmodule-type: startup\n\nMiscellaneous startup logic for both the client and server.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"startup\";\nexports.after = [\"load-modules\"];\nexports.synchronous = true;\n\n// Set to `true` to enable performance instrumentation\nvar PERFORMANCE_INSTRUMENTATION_CONFIG_TITLE = \"$:/config/Performance/Instrumentation\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nexports.startup = function() {\n\tvar modules,n,m,f;\n\t// Minimal browser detection\n\tif($tw.browser) {\n\t\t$tw.browser.isIE = (/msie|trident/i.test(navigator.userAgent));\n\t\t$tw.browser.isFirefox = !!document.mozFullScreenEnabled;\n\t}\n\t// Platform detection\n\t$tw.platform = {};\n\tif($tw.browser) {\n\t\t$tw.platform.isMac = /Mac/.test(navigator.platform);\n\t\t$tw.platform.isWindows = /win/i.test(navigator.platform);\n\t\t$tw.platform.isLinux = /Linux/i.test(navigator.appVersion);\n\t} else {\n\t\tswitch(require(\"os\").platform()) {\n\t\t\tcase \"darwin\":\n\t\t\t\t$tw.platform.isMac = true;\n\t\t\t\tbreak;\n\t\t\tcase \"win32\":\n\t\t\t\t$tw.platform.isWindows = true;\n\t\t\t\tbreak;\n\t\t\tcase \"freebsd\":\n\t\t\t\t$tw.platform.isLinux = true;\n\t\t\t\tbreak;\n\t\t\tcase \"linux\":\n\t\t\t\t$tw.platform.isLinux = true;\n\t\t\t\tbreak;\n\t\t}\n\t}\n\t// Initialise version\n\t$tw.version = $tw.utils.extractVersionInfo();\n\t// Set up the performance framework\n\t$tw.perf = new $tw.Performance($tw.wiki.getTiddlerText(PERFORMANCE_INSTRUMENTATION_CONFIG_TITLE,\"no\") === \"yes\");\n\t// Kick off the language manager and switcher\n\t$tw.language = new $tw.Language();\n\t$tw.languageSwitcher = new $tw.PluginSwitcher({\n\t\twiki: $tw.wiki,\n\t\tpluginType: \"language\",\n\t\tcontrollerTitle: \"$:/language\",\n\t\tdefaultPlugins: [\n\t\t\t\"$:/languages/en-US\"\n\t\t]\n\t});\n\t// Kick off the theme manager\n\t$tw.themeManager = new $tw.PluginSwitcher({\n\t\twiki: $tw.wiki,\n\t\tpluginType: \"theme\",\n\t\tcontrollerTitle: \"$:/theme\",\n\t\tdefaultPlugins: [\n\t\t\t\"$:/themes/tiddlywiki/snowwhite\",\n\t\t\t\"$:/themes/tiddlywiki/vanilla\"\n\t\t]\n\t});\n\t// Kick off the keyboard manager\n\t$tw.keyboardManager = new $tw.KeyboardManager();\n\t// Clear outstanding tiddler store change events to avoid an unnecessary refresh cycle at startup\n\t$tw.wiki.clearTiddlerEventQueue();\n\t// Create a root widget for attaching event handlers. By using it as the parentWidget for another widget tree, one can reuse the event handlers\n\tif($tw.browser) {\n\t\t$tw.rootWidget = new widget.widget({\n\t\t\ttype: \"widget\",\n\t\t\tchildren: []\n\t\t},{\n\t\t\twiki: $tw.wiki,\n\t\t\tdocument: document\n\t\t});\n\t}\n\t// Find a working syncadaptor\n\t$tw.syncadaptor = undefined;\n\t$tw.modules.forEachModuleOfType(\"syncadaptor\",function(title,module) {\n\t\tif(!$tw.syncadaptor && module.adaptorClass) {\n\t\t\t$tw.syncadaptor = new module.adaptorClass({wiki: $tw.wiki});\n\t\t}\n\t});\n\t// Set up the syncer object if we've got a syncadaptor\n\tif($tw.syncadaptor) {\n\t\t$tw.syncer = new $tw.Syncer({wiki: $tw.wiki, syncadaptor: $tw.syncadaptor});\n\t} \n\t// Setup the saver handler\n\t$tw.saverHandler = new $tw.SaverHandler({wiki: $tw.wiki, dirtyTracking: !$tw.syncadaptor});\n\t// Host-specific startup\n\tif($tw.browser) {\n\t\t// Install the popup manager\n\t\t$tw.popup = new $tw.utils.Popup();\n\t\t// Install the animator\n\t\t$tw.anim = new $tw.utils.Animator();\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/startup.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/story.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/story.js\ntype: application/javascript\nmodule-type: startup\n\nLoad core modules\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"story\";\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\n// Default story and history lists\nvar DEFAULT_STORY_TITLE = \"$:/StoryList\";\nvar DEFAULT_HISTORY_TITLE = \"$:/HistoryList\";\n\n// Default tiddlers\nvar DEFAULT_TIDDLERS_TITLE = \"$:/DefaultTiddlers\";\n\n// Config\nvar CONFIG_UPDATE_ADDRESS_BAR = \"$:/config/Navigation/UpdateAddressBar\"; // Can be \"no\", \"permalink\", \"permaview\"\nvar CONFIG_UPDATE_HISTORY = \"$:/config/Navigation/UpdateHistory\"; // Can be \"yes\" or \"no\"\n\nexports.startup = function() {\n\t// Open startup tiddlers\n\topenStartupTiddlers();\n\tif($tw.browser) {\n\t\t// Set up location hash update\n\t\t$tw.wiki.addEventListener(\"change\",function(changes) {\n\t\t\tif($tw.utils.hop(changes,DEFAULT_STORY_TITLE) || $tw.utils.hop(changes,DEFAULT_HISTORY_TITLE)) {\n\t\t\t\tupdateLocationHash({\n\t\t\t\t\tupdateAddressBar: $tw.wiki.getTiddlerText(CONFIG_UPDATE_ADDRESS_BAR,\"permaview\").trim(),\n\t\t\t\t\tupdateHistory: $tw.wiki.getTiddlerText(CONFIG_UPDATE_HISTORY,\"no\").trim()\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\t\t// Listen for changes to the browser location hash\n\t\twindow.addEventListener(\"hashchange\",function() {\n\t\t\tvar hash = $tw.utils.getLocationHash();\n\t\t\tif(hash !== $tw.locationHash) {\n\t\t\t\t$tw.locationHash = hash;\n\t\t\t\topenStartupTiddlers({defaultToCurrentStory: true});\n\t\t\t}\n\t\t},false);\n\t\t// Listen for the tm-browser-refresh message\n\t\t$tw.rootWidget.addEventListener(\"tm-browser-refresh\",function(event) {\n\t\t\twindow.location.reload(true);\n\t\t});\n\t\t// Listen for the tm-home message\n\t\t$tw.rootWidget.addEventListener(\"tm-home\",function(event) {\n\t\t\twindow.location.hash = \"\";\n\t\t\tvar storyFilter = $tw.wiki.getTiddlerText(DEFAULT_TIDDLERS_TITLE),\n\t\t\t\tstoryList = $tw.wiki.filterTiddlers(storyFilter);\n\t\t\t//invoke any hooks that might change the default story list\n\t\t\tstoryList = $tw.hooks.invokeHook(\"th-opening-default-tiddlers-list\",storyList);\n\t\t\t$tw.wiki.addTiddler({title: DEFAULT_STORY_TITLE, text: \"\", list: storyList},$tw.wiki.getModificationFields());\n\t\t\tif(storyList[0]) {\n\t\t\t\t$tw.wiki.addToHistory(storyList[0]);\t\t\t\t\n\t\t\t}\n\t\t});\n\t\t// Listen for the tm-permalink message\n\t\t$tw.rootWidget.addEventListener(\"tm-permalink\",function(event) {\n\t\t\tupdateLocationHash({\n\t\t\t\tupdateAddressBar: \"permalink\",\n\t\t\t\tupdateHistory: $tw.wiki.getTiddlerText(CONFIG_UPDATE_HISTORY,\"no\").trim(),\n\t\t\t\ttargetTiddler: event.param || event.tiddlerTitle\n\t\t\t});\n\t\t});\n\t\t// Listen for the tm-permaview message\n\t\t$tw.rootWidget.addEventListener(\"tm-permaview\",function(event) {\n\t\t\tupdateLocationHash({\n\t\t\t\tupdateAddressBar: \"permaview\",\n\t\t\t\tupdateHistory: $tw.wiki.getTiddlerText(CONFIG_UPDATE_HISTORY,\"no\").trim(),\n\t\t\t\ttargetTiddler: event.param || event.tiddlerTitle\n\t\t\t});\n\t\t});\n\t}\n};\n\n/*\nProcess the location hash to open the specified tiddlers. Options:\ndefaultToCurrentStory: If true, the current story is retained as the default, instead of opening the default tiddlers\n*/\nfunction openStartupTiddlers(options) {\n\toptions = options || {};\n\t// Work out the target tiddler and the story filter. \"null\" means \"unspecified\"\n\tvar target = null,\n\t\tstoryFilter = null;\n\tif($tw.locationHash.length > 1) {\n\t\tvar hash = $tw.locationHash.substr(1),\n\t\t\tsplit = hash.indexOf(\":\");\n\t\tif(split === -1) {\n\t\t\ttarget = decodeURIComponent(hash.trim());\n\t\t} else {\n\t\t\ttarget = decodeURIComponent(hash.substr(0,split).trim());\n\t\t\tstoryFilter = decodeURIComponent(hash.substr(split + 1).trim());\n\t\t}\n\t}\n\t// If the story wasn't specified use the current tiddlers or a blank story\n\tif(storyFilter === null) {\n\t\tif(options.defaultToCurrentStory) {\n\t\t\tvar currStoryList = $tw.wiki.getTiddlerList(DEFAULT_STORY_TITLE);\n\t\t\tstoryFilter = $tw.utils.stringifyList(currStoryList);\n\t\t} else {\n\t\t\tif(target && target !== \"\") {\n\t\t\t\tstoryFilter = \"\";\n\t\t\t} else {\n\t\t\t\tstoryFilter = $tw.wiki.getTiddlerText(DEFAULT_TIDDLERS_TITLE);\n\t\t\t}\n\t\t}\n\t}\n\t// Process the story filter to get the story list\n\tvar storyList = $tw.wiki.filterTiddlers(storyFilter);\n\t// Invoke any hooks that want to change the default story list\n\tstoryList = $tw.hooks.invokeHook(\"th-opening-default-tiddlers-list\",storyList);\n\t// If the target tiddler isn't included then splice it in at the top\n\tif(target && storyList.indexOf(target) === -1) {\n\t\tstoryList.unshift(target);\n\t}\n\t// Save the story list\n\t$tw.wiki.addTiddler({title: DEFAULT_STORY_TITLE, text: \"\", list: storyList},$tw.wiki.getModificationFields());\n\t// If a target tiddler was specified add it to the history stack\n\tif(target && target !== \"\") {\n\t\t// The target tiddler doesn't need double square brackets, but we'll silently remove them if they're present\n\t\tif(target.indexOf(\"[[\") === 0 && target.substr(-2) === \"]]\") {\n\t\t\ttarget = target.substr(2,target.length - 4);\n\t\t}\n\t\t$tw.wiki.addToHistory(target);\n\t} else if(storyList.length > 0) {\n\t\t$tw.wiki.addToHistory(storyList[0]);\n\t}\n}\n\n/*\noptions: See below\noptions.updateAddressBar: \"permalink\", \"permaview\" or \"no\" (defaults to \"permaview\")\noptions.updateHistory: \"yes\" or \"no\" (defaults to \"no\")\noptions.targetTiddler: optional title of target tiddler for permalink\n*/\nfunction updateLocationHash(options) {\n\tif(options.updateAddressBar !== \"no\") {\n\t\t// Get the story and the history stack\n\t\tvar storyList = $tw.wiki.getTiddlerList(DEFAULT_STORY_TITLE),\n\t\t\thistoryList = $tw.wiki.getTiddlerData(DEFAULT_HISTORY_TITLE,[]),\n\t\t\ttargetTiddler = \"\";\n\t\tif(options.targetTiddler) {\n\t\t\ttargetTiddler = options.targetTiddler;\n\t\t} else {\n\t\t\t// The target tiddler is the one at the top of the stack\n\t\t\tif(historyList.length > 0) {\n\t\t\t\ttargetTiddler = historyList[historyList.length-1].title;\n\t\t\t}\n\t\t\t// Blank the target tiddler if it isn't present in the story\n\t\t\tif(storyList.indexOf(targetTiddler) === -1) {\n\t\t\t\ttargetTiddler = \"\";\n\t\t\t}\n\t\t}\n\t\t// Assemble the location hash\n\t\tif(options.updateAddressBar === \"permalink\") {\n\t\t\t$tw.locationHash = \"#\" + encodeURIComponent(targetTiddler);\n\t\t} else {\n\t\t\t$tw.locationHash = \"#\" + encodeURIComponent(targetTiddler) + \":\" + encodeURIComponent($tw.utils.stringifyList(storyList));\n\t\t}\n\t\t// Only change the location hash if we must, thus avoiding unnecessary onhashchange events\n\t\tif($tw.utils.getLocationHash() !== $tw.locationHash) {\n\t\t\tif(options.updateHistory === \"yes\") {\n\t\t\t\t// Assign the location hash so that history is updated\n\t\t\t\twindow.location.hash = $tw.locationHash;\n\t\t\t} else {\n\t\t\t\t// We use replace so that browser history isn't affected\n\t\t\t\twindow.location.replace(window.location.toString().split(\"#\")[0] + $tw.locationHash);\n\t\t\t}\n\t\t}\n\t}\n}\n\n})();\n",
            "title": "$:/core/modules/startup/story.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/startup/windows.js": {
            "text": "/*\\\ntitle: $:/core/modules/startup/windows.js\ntype: application/javascript\nmodule-type: startup\n\nSetup root widget handlers for the messages concerned with opening external browser windows\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Export name and synchronous status\nexports.name = \"windows\";\nexports.platforms = [\"browser\"];\nexports.after = [\"startup\"];\nexports.synchronous = true;\n\n// Global to keep track of open windows (hashmap by title)\nvar windows = {};\n\nexports.startup = function() {\n\t// Handle open window message\n\t$tw.rootWidget.addEventListener(\"tm-open-window\",function(event) {\n\t\t// Get the parameters\n\t\tvar refreshHandler,\n\t\t\ttitle = event.param || event.tiddlerTitle,\n\t\t\tparamObject = event.paramObject || {},\n\t\t\ttemplate = paramObject.template || \"$:/core/templates/single.tiddler.window\",\n\t\t\twidth = paramObject.width || \"700\",\n\t\t\theight = paramObject.height || \"600\",\n\t\t\tvariables = $tw.utils.extend({},paramObject,{currentTiddler: title});\n\t\t// Open the window\n\t\tvar srcWindow = window.open(\"\",\"external-\" + title,\"scrollbars,width=\" + width + \",height=\" + height),\n\t\t\tsrcDocument = srcWindow.document;\n\t\twindows[title] = srcWindow;\n\t\t// Check for reopening the same window\n\t\tif(srcWindow.haveInitialisedWindow) {\n\t\t\treturn;\n\t\t}\n\t\t// Initialise the document\n\t\tsrcDocument.write(\"<html><head></head><body class='tc-body tc-single-tiddler-window'></body></html>\");\n\t\tsrcDocument.close();\n\t\tsrcDocument.title = title;\n\t\tsrcWindow.addEventListener(\"beforeunload\",function(event) {\n\t\t\tdelete windows[title];\n\t\t\t$tw.wiki.removeEventListener(\"change\",refreshHandler);\n\t\t},false);\n\t\t// Set up the styles\n\t\tvar styleWidgetNode = $tw.wiki.makeTranscludeWidget(\"$:/core/ui/PageStylesheet\",{document: $tw.fakeDocument, variables: variables}),\n\t\t\tstyleContainer = $tw.fakeDocument.createElement(\"style\");\n\t\tstyleWidgetNode.render(styleContainer,null);\n\t\tvar styleElement = srcDocument.createElement(\"style\");\n\t\tstyleElement.innerHTML = styleContainer.textContent;\n\t\tsrcDocument.head.insertBefore(styleElement,srcDocument.head.firstChild);\n\t\t// Render the text of the tiddler\n\t\tvar parser = $tw.wiki.parseTiddler(template),\n\t\t\twidgetNode = $tw.wiki.makeWidget(parser,{document: srcDocument, parentWidget: $tw.rootWidget, variables: variables});\n\t\twidgetNode.render(srcDocument.body,srcDocument.body.firstChild);\n\t\t// Function to handle refreshes\n\t\trefreshHandler = function(changes) {\n\t\t\tif(styleWidgetNode.refresh(changes,styleContainer,null)) {\n\t\t\t\tstyleElement.innerHTML = styleContainer.textContent;\n\t\t\t}\n\t\t\twidgetNode.refresh(changes);\n\t\t};\n\t\t$tw.wiki.addEventListener(\"change\",refreshHandler);\n\t\tsrcWindow.haveInitialisedWindow = true;\n\t});\n\t// Close open windows when unloading main window\n\t$tw.addUnloadTask(function() {\n\t\t$tw.utils.each(windows,function(win) {\n\t\t\twin.close();\n\t\t});\n\t});\n\n};\n\n})();\n",
            "title": "$:/core/modules/startup/windows.js",
            "type": "application/javascript",
            "module-type": "startup"
        },
        "$:/core/modules/story.js": {
            "text": "/*\\\ntitle: $:/core/modules/story.js\ntype: application/javascript\nmodule-type: global\n\nLightweight object for managing interactions with the story and history lists.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nConstruct Story object with options:\nwiki: reference to wiki object to use to resolve tiddler titles\nstoryTitle: title of story list tiddler\nhistoryTitle: title of history list tiddler\n*/\nfunction Story(options) {\n\toptions = options || {};\n\tthis.wiki = options.wiki || $tw.wiki;\n\tthis.storyTitle = options.storyTitle || \"$:/StoryList\";\n\tthis.historyTitle = options.historyTitle || \"$:/HistoryList\";\n};\n\nStory.prototype.navigateTiddler = function(navigateTo,navigateFromTitle,navigateFromClientRect) {\n\tthis.addToStory(navigateTo,navigateFromTitle);\n\tthis.addToHistory(navigateTo,navigateFromClientRect);\n};\n\nStory.prototype.getStoryList = function() {\n\treturn this.wiki.getTiddlerList(this.storyTitle) || [];\n};\n\nStory.prototype.addToStory = function(navigateTo,navigateFromTitle,options) {\n\toptions = options || {};\n\tvar storyList = this.getStoryList();\n\t// See if the tiddler is already there\n\tvar slot = storyList.indexOf(navigateTo);\n\t// Quit if it already exists in the story river\n\tif(slot >= 0) {\n\t\treturn;\n\t}\n\t// First we try to find the position of the story element we navigated from\n\tvar fromIndex = storyList.indexOf(navigateFromTitle);\n\tif(fromIndex >= 0) {\n\t\t// The tiddler is added from inside the river\n\t\t// Determine where to insert the tiddler; Fallback is \"below\"\n\t\tswitch(options.openLinkFromInsideRiver) {\n\t\t\tcase \"top\":\n\t\t\t\tslot = 0;\n\t\t\t\tbreak;\n\t\t\tcase \"bottom\":\n\t\t\t\tslot = storyList.length;\n\t\t\t\tbreak;\n\t\t\tcase \"above\":\n\t\t\t\tslot = fromIndex;\n\t\t\t\tbreak;\n\t\t\tcase \"below\": // Intentional fall-through\n\t\t\tdefault:\n\t\t\t\tslot = fromIndex + 1;\n\t\t\t\tbreak;\n\t\t}\n\t} else {\n\t\t// The tiddler is opened from outside the river. Determine where to insert the tiddler; default is \"top\"\n\t\tif(options.openLinkFromOutsideRiver === \"bottom\") {\n\t\t\t// Insert at bottom\n\t\t\tslot = storyList.length;\n\t\t} else {\n\t\t\t// Insert at top\n\t\t\tslot = 0;\n\t\t}\n\t}\n\t// Add the tiddler\n\tstoryList.splice(slot,0,navigateTo);\n\t// Save the story\n\tthis.saveStoryList(storyList);\n};\n\nStory.prototype.saveStoryList = function(storyList) {\n\tvar storyTiddler = this.wiki.getTiddler(this.storyTitle);\n\tthis.wiki.addTiddler(new $tw.Tiddler(\n\t\tthis.wiki.getCreationFields(),\n\t\t{title: this.storyTitle},\n\t\tstoryTiddler,\n\t\t{list: storyList},\n\t\tthis.wiki.getModificationFields()\n\t));\n};\n\nStory.prototype.addToHistory = function(navigateTo,navigateFromClientRect) {\n\tvar titles = $tw.utils.isArray(navigateTo) ? navigateTo : [navigateTo];\n\t// Add a new record to the top of the history stack\n\tvar historyList = this.wiki.getTiddlerData(this.historyTitle,[]);\n\t$tw.utils.each(titles,function(title) {\n\t\thistoryList.push({title: title, fromPageRect: navigateFromClientRect});\n\t});\n\tthis.wiki.setTiddlerData(this.historyTitle,historyList,{\"current-tiddler\": titles[titles.length-1]});\n};\n\nStory.prototype.storyCloseTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyCloseAllTiddlers = function() {\n// TBD\n};\n\nStory.prototype.storyCloseOtherTiddlers = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyEditTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyDeleteTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storySaveTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyCancelTiddler = function(targetTitle) {\n// TBD\n};\n\nStory.prototype.storyNewTiddler = function(targetTitle) {\n// TBD\n};\n\nexports.Story = Story;\n\n\n})();\n",
            "title": "$:/core/modules/story.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/storyviews/classic.js": {
            "text": "/*\\\ntitle: $:/core/modules/storyviews/classic.js\ntype: application/javascript\nmodule-type: storyview\n\nViews the story as a linear sequence\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar easing = \"cubic-bezier(0.645, 0.045, 0.355, 1)\"; // From http://easings.net/#easeInOutCubic\n\nvar ClassicStoryView = function(listWidget) {\n\tthis.listWidget = listWidget;\n};\n\nClassicStoryView.prototype.navigateTo = function(historyInfo) {\n\tvar listElementIndex = this.listWidget.findListItem(0,historyInfo.title);\n\tif(listElementIndex === undefined) {\n\t\treturn;\n\t}\n\tvar listItemWidget = this.listWidget.children[listElementIndex],\n\t\ttargetElement = listItemWidget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Scroll the node into view\n\tthis.listWidget.dispatchEvent({type: \"tm-scroll\", target: targetElement});\n};\n\nClassicStoryView.prototype.insert = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Get the current height of the tiddler\n\tvar computedStyle = window.getComputedStyle(targetElement),\n\t\tcurrMarginBottom = parseInt(computedStyle.marginBottom,10),\n\t\tcurrMarginTop = parseInt(computedStyle.marginTop,10),\n\t\tcurrHeight = targetElement.offsetHeight + currMarginTop;\n\t// Reset the margin once the transition is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(targetElement,[\n\t\t\t{transition: \"none\"},\n\t\t\t{marginBottom: \"\"}\n\t\t]);\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{marginBottom: (-currHeight) + \"px\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t// Transition to the final position\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"opacity \" + duration + \"ms \" + easing + \", \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms \" + easing},\n\t\t{marginBottom: currMarginBottom + \"px\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n};\n\nClassicStoryView.prototype.remove = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\tremoveElement = function() {\n\t\t\twidget.removeChildDomNodes();\n\t\t};\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Get the current height of the tiddler\n\tvar currWidth = targetElement.offsetWidth,\n\t\tcomputedStyle = window.getComputedStyle(targetElement),\n\t\tcurrMarginBottom = parseInt(computedStyle.marginBottom,10),\n\t\tcurrMarginTop = parseInt(computedStyle.marginTop,10),\n\t\tcurrHeight = targetElement.offsetHeight + currMarginTop;\n\t// Remove the dom nodes of the widget at the end of the transition\n\tsetTimeout(removeElement,duration);\n\t// Animate the closure\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{transform: \"translateX(0px)\"},\n\t\t{marginBottom:  currMarginBottom + \"px\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms \" + easing + \", \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms \" + easing},\n\t\t{transform: \"translateX(-\" + currWidth + \"px)\"},\n\t\t{marginBottom: (-currHeight) + \"px\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n};\n\nexports.classic = ClassicStoryView;\n\n})();",
            "title": "$:/core/modules/storyviews/classic.js",
            "type": "application/javascript",
            "module-type": "storyview"
        },
        "$:/core/modules/storyviews/pop.js": {
            "text": "/*\\\ntitle: $:/core/modules/storyviews/pop.js\ntype: application/javascript\nmodule-type: storyview\n\nAnimates list insertions and removals\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar PopStoryView = function(listWidget) {\n\tthis.listWidget = listWidget;\n};\n\nPopStoryView.prototype.navigateTo = function(historyInfo) {\n\tvar listElementIndex = this.listWidget.findListItem(0,historyInfo.title);\n\tif(listElementIndex === undefined) {\n\t\treturn;\n\t}\n\tvar listItemWidget = this.listWidget.children[listElementIndex],\n\t\ttargetElement = listItemWidget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Scroll the node into view\n\tthis.listWidget.dispatchEvent({type: \"tm-scroll\", target: targetElement});\n};\n\nPopStoryView.prototype.insert = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Reset once the transition is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(targetElement,[\n\t\t\t{transition: \"none\"},\n\t\t\t{transform: \"none\"}\n\t\t]);\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{transform: \"scale(2)\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t// Transition to the final position\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{transform: \"scale(1)\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n};\n\nPopStoryView.prototype.remove = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\tremoveElement = function() {\n\t\t\tif(targetElement.parentNode) {\n\t\t\t\twidget.removeChildDomNodes();\n\t\t\t}\n\t\t};\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Remove the element at the end of the transition\n\tsetTimeout(removeElement,duration);\n\t// Animate the closure\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: \"none\"},\n\t\t{transform: \"scale(1)\"},\n\t\t{opacity: \"1.0\"}\n\t]);\n\t$tw.utils.forceLayout(targetElement);\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{transform: \"scale(0.1)\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n};\n\nexports.pop = PopStoryView;\n\n})();\n",
            "title": "$:/core/modules/storyviews/pop.js",
            "type": "application/javascript",
            "module-type": "storyview"
        },
        "$:/core/modules/storyviews/zoomin.js": {
            "text": "/*\\\ntitle: $:/core/modules/storyviews/zoomin.js\ntype: application/javascript\nmodule-type: storyview\n\nZooms between individual tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar easing = \"cubic-bezier(0.645, 0.045, 0.355, 1)\"; // From http://easings.net/#easeInOutCubic\n\nvar ZoominListView = function(listWidget) {\n\tvar self = this;\n\tthis.listWidget = listWidget;\n\t// Get the index of the tiddler that is at the top of the history\n\tvar history = this.listWidget.wiki.getTiddlerDataCached(this.listWidget.historyTitle,[]),\n\t\ttargetTiddler;\n\tif(history.length > 0) {\n\t\ttargetTiddler = history[history.length-1].title;\n\t}\n\t// Make all the tiddlers position absolute, and hide all but the top (or first) one\n\t$tw.utils.each(this.listWidget.children,function(itemWidget,index) {\n\t\tvar domNode = itemWidget.findFirstDomNode();\n\t\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\t\tif(!(domNode instanceof Element)) {\n\t\t\treturn;\n\t\t}\n\t\tif((targetTiddler && targetTiddler !== itemWidget.parseTreeNode.itemTitle) || (!targetTiddler && index)) {\n\t\t\tdomNode.style.display = \"none\";\n\t\t} else {\n\t\t\tself.currentTiddlerDomNode = domNode;\n\t\t}\n\t\t$tw.utils.addClass(domNode,\"tc-storyview-zoomin-tiddler\");\n\t});\n};\n\nZoominListView.prototype.navigateTo = function(historyInfo) {\n\tvar duration = $tw.utils.getAnimationDuration(),\n\t\tlistElementIndex = this.listWidget.findListItem(0,historyInfo.title);\n\tif(listElementIndex === undefined) {\n\t\treturn;\n\t}\n\tvar listItemWidget = this.listWidget.children[listElementIndex],\n\t\ttargetElement = listItemWidget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Make the new tiddler be position absolute and visible so that we can measure it\n\t$tw.utils.addClass(targetElement,\"tc-storyview-zoomin-tiddler\");\n\t$tw.utils.setStyle(targetElement,[\n\t\t{display: \"block\"},\n\t\t{transformOrigin: \"0 0\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t{transition: \"none\"},\n\t\t{opacity: \"0.0\"}\n\t]);\n\t// Get the position of the source node, or use the centre of the window as the source position\n\tvar sourceBounds = historyInfo.fromPageRect || {\n\t\t\tleft: window.innerWidth/2 - 2,\n\t\t\ttop: window.innerHeight/2 - 2,\n\t\t\twidth: window.innerWidth/8,\n\t\t\theight: window.innerHeight/8\n\t\t};\n\t// Try to find the title node in the target tiddler\n\tvar titleDomNode = findTitleDomNode(listItemWidget) || listItemWidget.findFirstDomNode(),\n\t\tzoomBounds = titleDomNode.getBoundingClientRect();\n\t// Compute the transform for the target tiddler to make the title lie over the source rectange\n\tvar targetBounds = targetElement.getBoundingClientRect(),\n\t\tscale = sourceBounds.width / zoomBounds.width,\n\t\tx = sourceBounds.left - targetBounds.left - (zoomBounds.left - targetBounds.left) * scale,\n\t\ty = sourceBounds.top - targetBounds.top - (zoomBounds.top - targetBounds.top) * scale;\n\t// Transform the target tiddler to its starting position\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transform: \"translateX(\" + x + \"px) translateY(\" + y + \"px) scale(\" + scale + \")\"}\n\t]);\n\t// Force layout\n\t$tw.utils.forceLayout(targetElement);\n\t// Apply the ending transitions with a timeout to ensure that the previously applied transformations are applied first\n\tvar self = this,\n\t\tprevCurrentTiddler = this.currentTiddlerDomNode;\n\tthis.currentTiddlerDomNode = targetElement;\n\t// Transform the target tiddler to its natural size\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t{opacity: \"1.0\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t{zIndex: \"500\"},\n\t]);\n\t// Transform the previous tiddler out of the way and then hide it\n\tif(prevCurrentTiddler && prevCurrentTiddler !== targetElement) {\n\t\tscale = zoomBounds.width / sourceBounds.width;\n\t\tx =  zoomBounds.left - targetBounds.left - (sourceBounds.left - targetBounds.left) * scale;\n\t\ty =  zoomBounds.top - targetBounds.top - (sourceBounds.top - targetBounds.top) * scale;\n\t\t$tw.utils.setStyle(prevCurrentTiddler,[\n\t\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t\t{opacity: \"0.0\"},\n\t\t\t{transformOrigin: \"0 0\"},\n\t\t\t{transform: \"translateX(\" + x + \"px) translateY(\" + y + \"px) scale(\" + scale + \")\"},\n\t\t\t{zIndex: \"0\"}\n\t\t]);\n\t\t// Hide the tiddler when the transition has finished\n\t\tsetTimeout(function() {\n\t\t\tif(self.currentTiddlerDomNode !== prevCurrentTiddler) {\n\t\t\t\tprevCurrentTiddler.style.display = \"none\";\n\t\t\t}\n\t\t},duration);\n\t}\n\t// Scroll the target into view\n//\t$tw.pageScroller.scrollIntoView(targetElement);\n};\n\n/*\nFind the first child DOM node of a widget that has the class \"tc-title\"\n*/\nfunction findTitleDomNode(widget,targetClass) {\n\ttargetClass = targetClass || \"tc-title\";\n\tvar domNode = widget.findFirstDomNode();\n\tif(domNode && domNode.querySelector) {\n\t\treturn domNode.querySelector(\".\" + targetClass);\n\t}\n\treturn null;\n}\n\nZoominListView.prototype.insert = function(widget) {\n\tvar targetElement = widget.findFirstDomNode();\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\treturn;\n\t}\n\t// Make the newly inserted node position absolute and hidden\n\t$tw.utils.addClass(targetElement,\"tc-storyview-zoomin-tiddler\");\n\t$tw.utils.setStyle(targetElement,[\n\t\t{display: \"none\"}\n\t]);\n};\n\nZoominListView.prototype.remove = function(widget) {\n\tvar targetElement = widget.findFirstDomNode(),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\tremoveElement = function() {\n\t\t\twidget.removeChildDomNodes();\n\t\t};\n\t// Abandon if the list entry isn't a DOM element (it might be a text node)\n\tif(!(targetElement instanceof Element)) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Abandon if hidden\n\tif(targetElement.style.display != \"block\" ) {\n\t\tremoveElement();\n\t\treturn;\n\t}\n\t// Set up the tiddler that is being closed\n\t$tw.utils.addClass(targetElement,\"tc-storyview-zoomin-tiddler\");\n\t$tw.utils.setStyle(targetElement,[\n\t\t{display: \"block\"},\n\t\t{transformOrigin: \"50% 50%\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t{transition: \"none\"},\n\t\t{zIndex: \"0\"}\n\t]);\n\t// We'll move back to the previous or next element in the story\n\tvar toWidget = widget.previousSibling();\n\tif(!toWidget) {\n\t\ttoWidget = widget.nextSibling();\n\t}\n\tvar toWidgetDomNode = toWidget && toWidget.findFirstDomNode();\n\t// Set up the tiddler we're moving back in\n\tif(toWidgetDomNode) {\n\t\t$tw.utils.addClass(toWidgetDomNode,\"tc-storyview-zoomin-tiddler\");\n\t\t$tw.utils.setStyle(toWidgetDomNode,[\n\t\t\t{display: \"block\"},\n\t\t\t{transformOrigin: \"50% 50%\"},\n\t\t\t{transform: \"translateX(0px) translateY(0px) scale(10)\"},\n\t\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t\t{opacity: \"0\"},\n\t\t\t{zIndex: \"500\"}\n\t\t]);\n\t\tthis.currentTiddlerDomNode = toWidgetDomNode;\n\t}\n\t// Animate them both\n\t// Force layout\n\t$tw.utils.forceLayout(this.listWidget.parentDomNode);\n\t// First, the tiddler we're closing\n\t$tw.utils.setStyle(targetElement,[\n\t\t{transformOrigin: \"50% 50%\"},\n\t\t{transform: \"translateX(0px) translateY(0px) scale(0.1)\"},\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms \" + easing + \", opacity \" + duration + \"ms \" + easing},\n\t\t{opacity: \"0\"},\n\t\t{zIndex: \"0\"}\n\t]);\n\tsetTimeout(removeElement,duration);\n\t// Now the tiddler we're going back to\n\tif(toWidgetDomNode) {\n\t\t$tw.utils.setStyle(toWidgetDomNode,[\n\t\t\t{transform: \"translateX(0px) translateY(0px) scale(1)\"},\n\t\t\t{opacity: \"1\"}\n\t\t]);\n\t}\n\treturn true; // Indicate that we'll delete the DOM node\n};\n\nexports.zoomin = ZoominListView;\n\n})();\n",
            "title": "$:/core/modules/storyviews/zoomin.js",
            "type": "application/javascript",
            "module-type": "storyview"
        },
        "$:/core/modules/syncer.js": {
            "text": "/*\\\ntitle: $:/core/modules/syncer.js\ntype: application/javascript\nmodule-type: global\n\nThe syncer tracks changes to the store. If a syncadaptor is used then individual tiddlers are synchronised through it. If there is no syncadaptor then the entire wiki is saved via saver modules.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nInstantiate the syncer with the following options:\nsyncadaptor: reference to syncadaptor to be used\nwiki: wiki to be synced\n*/\nfunction Syncer(options) {\n\tvar self = this;\n\tthis.wiki = options.wiki;\n\tthis.syncadaptor = options.syncadaptor;\n\t// Make a logger\n\tthis.logger = new $tw.utils.Logger(\"syncer\" + ($tw.browser ? \"-browser\" : \"\") + ($tw.node ? \"-server\" : \"\"));\n\t// Compile the dirty tiddler filter\n\tthis.filterFn = this.wiki.compileFilter(this.wiki.getTiddlerText(this.titleSyncFilter));\n\t// Record information for known tiddlers\n\tthis.readTiddlerInfo();\n\t// Tasks are {type: \"load\"/\"save\"/\"delete\", title:, queueTime:, lastModificationTime:}\n\tthis.taskQueue = {}; // Hashmap of tasks yet to be performed\n\tthis.taskInProgress = {}; // Hash of tasks in progress\n\tthis.taskTimerId = null; // Timer for task dispatch\n\tthis.pollTimerId = null; // Timer for polling server\n\t// Listen out for changes to tiddlers\n\tthis.wiki.addEventListener(\"change\",function(changes) {\n\t\tself.syncToServer(changes);\n\t});\n\t// Browser event handlers\n\tif($tw.browser) {\n\t\t// Set up our beforeunload handler\n\t\t$tw.addUnloadTask(function(event) {\n\t\t\tvar confirmationMessage;\n\t\t\tif(self.isDirty()) {\n\t\t\t\tconfirmationMessage = $tw.language.getString(\"UnsavedChangesWarning\");\n\t\t\t\tevent.returnValue = confirmationMessage; // Gecko\n\t\t\t}\n\t\t\treturn confirmationMessage;\n\t\t});\n\t\t// Listen out for login/logout/refresh events in the browser\n\t\t$tw.rootWidget.addEventListener(\"tm-login\",function() {\n\t\t\tself.handleLoginEvent();\n\t\t});\n\t\t$tw.rootWidget.addEventListener(\"tm-logout\",function() {\n\t\t\tself.handleLogoutEvent();\n\t\t});\n\t\t$tw.rootWidget.addEventListener(\"tm-server-refresh\",function() {\n\t\t\tself.handleRefreshEvent();\n\t\t});\n\t}\n\t// Listen out for lazyLoad events\n\tthis.wiki.addEventListener(\"lazyLoad\",function(title) {\n\t\tself.handleLazyLoadEvent(title);\n\t});\n\t// Get the login status\n\tthis.getStatus(function(err,isLoggedIn) {\n\t\t// Do a sync from the server\n\t\tself.syncFromServer();\n\t});\n}\n\n/*\nConstants\n*/\nSyncer.prototype.titleIsLoggedIn = \"$:/status/IsLoggedIn\";\nSyncer.prototype.titleUserName = \"$:/status/UserName\";\nSyncer.prototype.titleSyncFilter = \"$:/config/SyncFilter\";\nSyncer.prototype.titleSavedNotification = \"$:/language/Notifications/Save/Done\";\nSyncer.prototype.taskTimerInterval = 1 * 1000; // Interval for sync timer\nSyncer.prototype.throttleInterval = 1 * 1000; // Defer saving tiddlers if they've changed in the last 1s...\nSyncer.prototype.fallbackInterval = 10 * 1000; // Unless the task is older than 10s\nSyncer.prototype.pollTimerInterval = 60 * 1000; // Interval for polling for changes from the adaptor\n\n\n/*\nRead (or re-read) the latest tiddler info from the store\n*/\nSyncer.prototype.readTiddlerInfo = function() {\n\t// Hashmap by title of {revision:,changeCount:,adaptorInfo:}\n\tthis.tiddlerInfo = {};\n\t// Record information for known tiddlers\n\tvar self = this,\n\t\ttiddlers = this.filterFn.call(this.wiki);\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\tself.tiddlerInfo[title] = {\n\t\t\trevision: tiddler.fields.revision,\n\t\t\tadaptorInfo: self.syncadaptor && self.syncadaptor.getTiddlerInfo(tiddler),\n\t\t\tchangeCount: self.wiki.getChangeCount(title),\n\t\t\thasBeenLazyLoaded: false\n\t\t};\n\t});\n};\n\n/*\nCreate an tiddlerInfo structure if it doesn't already exist\n*/\nSyncer.prototype.createTiddlerInfo = function(title) {\n\tif(!$tw.utils.hop(this.tiddlerInfo,title)) {\n\t\tthis.tiddlerInfo[title] = {\n\t\t\trevision: null,\n\t\t\tadaptorInfo: {},\n\t\t\tchangeCount: -1,\n\t\t\thasBeenLazyLoaded: false\n\t\t};\n\t}\n};\n\n/*\nChecks whether the wiki is dirty (ie the window shouldn't be closed)\n*/\nSyncer.prototype.isDirty = function() {\n\treturn (this.numTasksInQueue() > 0) || (this.numTasksInProgress() > 0);\n};\n\n/*\nUpdate the document body with the class \"tc-dirty\" if the wiki has unsaved/unsynced changes\n*/\nSyncer.prototype.updateDirtyStatus = function() {\n\tif($tw.browser) {\n\t\t$tw.utils.toggleClass(document.body,\"tc-dirty\",this.isDirty());\n\t}\n};\n\n/*\nSave an incoming tiddler in the store, and updates the associated tiddlerInfo\n*/\nSyncer.prototype.storeTiddler = function(tiddlerFields) {\n\t// Save the tiddler\n\tvar tiddler = new $tw.Tiddler(this.wiki.getTiddler(tiddlerFields.title),tiddlerFields);\n\tthis.wiki.addTiddler(tiddler);\n\t// Save the tiddler revision and changeCount details\n\tthis.tiddlerInfo[tiddlerFields.title] = {\n\t\trevision: tiddlerFields.revision,\n\t\tadaptorInfo: this.syncadaptor.getTiddlerInfo(tiddler),\n\t\tchangeCount: this.wiki.getChangeCount(tiddlerFields.title),\n\t\thasBeenLazyLoaded: true\n\t};\n};\n\nSyncer.prototype.getStatus = function(callback) {\n\tvar self = this;\n\t// Check if the adaptor supports getStatus()\n\tif(this.syncadaptor && this.syncadaptor.getStatus) {\n\t\t// Mark us as not logged in\n\t\tthis.wiki.addTiddler({title: this.titleIsLoggedIn,text: \"no\"});\n\t\t// Get login status\n\t\tthis.syncadaptor.getStatus(function(err,isLoggedIn,username) {\n\t\t\tif(err) {\n\t\t\t\tself.logger.alert(err);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// Set the various status tiddlers\n\t\t\tself.wiki.addTiddler({title: self.titleIsLoggedIn,text: isLoggedIn ? \"yes\" : \"no\"});\n\t\t\tif(isLoggedIn) {\n\t\t\t\tself.wiki.addTiddler({title: self.titleUserName,text: username || \"\"});\n\t\t\t} else {\n\t\t\t\tself.wiki.deleteTiddler(self.titleUserName);\n\t\t\t}\n\t\t\t// Invoke the callback\n\t\t\tif(callback) {\n\t\t\t\tcallback(err,isLoggedIn,username);\n\t\t\t}\n\t\t});\n\t} else {\n\t\tcallback(null,true,\"UNAUTHENTICATED\");\n\t}\n};\n\n/*\nSynchronise from the server by reading the skinny tiddler list and queuing up loads for any tiddlers that we don't already have up to date\n*/\nSyncer.prototype.syncFromServer = function() {\n\tif(this.syncadaptor && this.syncadaptor.getSkinnyTiddlers) {\n\t\tthis.logger.log(\"Retrieving skinny tiddler list\");\n\t\tvar self = this;\n\t\tif(this.pollTimerId) {\n\t\t\tclearTimeout(this.pollTimerId);\n\t\t\tthis.pollTimerId = null;\n\t\t}\n\t\tthis.syncadaptor.getSkinnyTiddlers(function(err,tiddlers) {\n\t\t\t// Trigger the next sync\n\t\t\tself.pollTimerId = setTimeout(function() {\n\t\t\t\tself.pollTimerId = null;\n\t\t\t\tself.syncFromServer.call(self);\n\t\t\t},self.pollTimerInterval);\n\t\t\t// Check for errors\n\t\t\tif(err) {\n\t\t\t\tself.logger.alert($tw.language.getString(\"Error/RetrievingSkinny\") + \":\",err);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// Process each incoming tiddler\n\t\t\tfor(var t=0; t<tiddlers.length; t++) {\n\t\t\t\t// Get the incoming tiddler fields, and the existing tiddler\n\t\t\t\tvar tiddlerFields = tiddlers[t],\n\t\t\t\t\tincomingRevision = tiddlerFields.revision + \"\",\n\t\t\t\t\ttiddler = self.wiki.getTiddler(tiddlerFields.title),\n\t\t\t\t\ttiddlerInfo = self.tiddlerInfo[tiddlerFields.title],\n\t\t\t\t\tcurrRevision = tiddlerInfo ? tiddlerInfo.revision : null;\n\t\t\t\t// Ignore the incoming tiddler if it's the same as the revision we've already got\n\t\t\t\tif(currRevision !== incomingRevision) {\n\t\t\t\t\t// Do a full load if we've already got a fat version of the tiddler\n\t\t\t\t\tif(tiddler && tiddler.fields.text !== undefined) {\n\t\t\t\t\t\t// Do a full load of this tiddler\n\t\t\t\t\t\tself.enqueueSyncTask({\n\t\t\t\t\t\t\ttype: \"load\",\n\t\t\t\t\t\t\ttitle: tiddlerFields.title\n\t\t\t\t\t\t});\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Load the skinny version of the tiddler\n\t\t\t\t\t\tself.storeTiddler(tiddlerFields);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n};\n\n/*\nSynchronise a set of changes to the server\n*/\nSyncer.prototype.syncToServer = function(changes) {\n\tvar self = this,\n\t\tnow = Date.now(),\n\t\tfilteredChanges = this.filterFn.call(this.wiki,function(callback) {\n\t\t\t$tw.utils.each(changes,function(change,title) {\n\t\t\t\tvar tiddler = self.wiki.getTiddler(title);\n\t\t\t\tcallback(tiddler,title);\n\t\t\t});\n\t\t});\n\t$tw.utils.each(changes,function(change,title,object) {\n\t\t// Process the change if it is a deletion of a tiddler we're already syncing, or is on the filtered change list\n\t\tif((change.deleted && $tw.utils.hop(self.tiddlerInfo,title)) || filteredChanges.indexOf(title) !== -1) {\n\t\t\t// Queue a task to sync this tiddler\n\t\t\tself.enqueueSyncTask({\n\t\t\t\ttype: change.deleted ? \"delete\" : \"save\",\n\t\t\t\ttitle: title\n\t\t\t});\n\t\t}\n\t});\n};\n\n/*\nLazily load a skinny tiddler if we can\n*/\nSyncer.prototype.handleLazyLoadEvent = function(title) {\n\t// Don't lazy load the same tiddler twice\n\tvar info = this.tiddlerInfo[title];\n\tif(!info || !info.hasBeenLazyLoaded) {\n\t\tthis.createTiddlerInfo(title);\n\t\tthis.tiddlerInfo[title].hasBeenLazyLoaded = true;\n\t\t// Queue up a sync task to load this tiddler\n\t\tthis.enqueueSyncTask({\n\t\t\ttype: \"load\",\n\t\t\ttitle: title\n\t\t});\t\t\n\t}\n};\n\n/*\nDispay a password prompt and allow the user to login\n*/\nSyncer.prototype.handleLoginEvent = function() {\n\tvar self = this;\n\tthis.getStatus(function(err,isLoggedIn,username) {\n\t\tif(!isLoggedIn) {\n\t\t\t$tw.passwordPrompt.createPrompt({\n\t\t\t\tserviceName: $tw.language.getString(\"LoginToTiddlySpace\"),\n\t\t\t\tcallback: function(data) {\n\t\t\t\t\tself.login(data.username,data.password,function(err,isLoggedIn) {\n\t\t\t\t\t\tself.syncFromServer();\n\t\t\t\t\t});\n\t\t\t\t\treturn true; // Get rid of the password prompt\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n};\n\n/*\nAttempt to login to TiddlyWeb.\n\tusername: username\n\tpassword: password\n\tcallback: invoked with arguments (err,isLoggedIn)\n*/\nSyncer.prototype.login = function(username,password,callback) {\n\tthis.logger.log(\"Attempting to login as\",username);\n\tvar self = this;\n\tif(this.syncadaptor.login) {\n\t\tthis.syncadaptor.login(username,password,function(err) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\tself.getStatus(function(err,isLoggedIn,username) {\n\t\t\t\tif(callback) {\n\t\t\t\t\tcallback(null,isLoggedIn);\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t} else {\n\t\tcallback(null,true);\n\t}\n};\n\n/*\nAttempt to log out of TiddlyWeb\n*/\nSyncer.prototype.handleLogoutEvent = function() {\n\tthis.logger.log(\"Attempting to logout\");\n\tvar self = this;\n\tif(this.syncadaptor.logout) {\n\t\tthis.syncadaptor.logout(function(err) {\n\t\t\tif(err) {\n\t\t\t\tself.logger.alert(err);\n\t\t\t} else {\n\t\t\t\tself.getStatus();\n\t\t\t}\n\t\t});\n\t}\n};\n\n/*\nImmediately refresh from the server\n*/\nSyncer.prototype.handleRefreshEvent = function() {\n\tthis.syncFromServer();\n};\n\n/*\nQueue up a sync task. If there is already a pending task for the tiddler, just update the last modification time\n*/\nSyncer.prototype.enqueueSyncTask = function(task) {\n\tvar self = this,\n\t\tnow = Date.now();\n\t// Set the timestamps on this task\n\ttask.queueTime = now;\n\ttask.lastModificationTime = now;\n\t// Fill in some tiddlerInfo if the tiddler is one we haven't seen before\n\tthis.createTiddlerInfo(task.title);\n\t// Bail if this is a save and the tiddler is already at the changeCount that the server has\n\tif(task.type === \"save\" && this.wiki.getChangeCount(task.title) <= this.tiddlerInfo[task.title].changeCount) {\n\t\treturn;\n\t}\n\t// Check if this tiddler is already in the queue\n\tif($tw.utils.hop(this.taskQueue,task.title)) {\n\t\t// this.logger.log(\"Re-queueing up sync task with type:\",task.type,\"title:\",task.title);\n\t\tvar existingTask = this.taskQueue[task.title];\n\t\t// If so, just update the last modification time\n\t\texistingTask.lastModificationTime = task.lastModificationTime;\n\t\t// If the new task is a save then we upgrade the existing task to a save. Thus a pending load is turned into a save if the tiddler changes locally in the meantime. But a pending save is not modified to become a load\n\t\tif(task.type === \"save\" || task.type === \"delete\") {\n\t\t\texistingTask.type = task.type;\n\t\t}\n\t} else {\n\t\t// this.logger.log(\"Queuing up sync task with type:\",task.type,\"title:\",task.title);\n\t\t// If it is not in the queue, insert it\n\t\tthis.taskQueue[task.title] = task;\n\t\tthis.updateDirtyStatus();\n\t}\n\t// Process the queue\n\t$tw.utils.nextTick(function() {self.processTaskQueue.call(self);});\n};\n\n/*\nReturn the number of tasks in progress\n*/\nSyncer.prototype.numTasksInProgress = function() {\n\treturn $tw.utils.count(this.taskInProgress);\n};\n\n/*\nReturn the number of tasks in the queue\n*/\nSyncer.prototype.numTasksInQueue = function() {\n\treturn $tw.utils.count(this.taskQueue);\n};\n\n/*\nTrigger a timeout if one isn't already outstanding\n*/\nSyncer.prototype.triggerTimeout = function() {\n\tvar self = this;\n\tif(!this.taskTimerId) {\n\t\tthis.taskTimerId = setTimeout(function() {\n\t\t\tself.taskTimerId = null;\n\t\t\tself.processTaskQueue.call(self);\n\t\t},self.taskTimerInterval);\n\t}\n};\n\n/*\nProcess the task queue, performing the next task if appropriate\n*/\nSyncer.prototype.processTaskQueue = function() {\n\tvar self = this;\n\t// Only process a task if the sync adaptor is fully initialised and we're not already performing a task. If we are already performing a task then we'll dispatch the next one when it completes\n\tif(this.syncadaptor.isReady() && this.numTasksInProgress() === 0) {\n\t\t// Choose the next task to perform\n\t\tvar task = this.chooseNextTask();\n\t\t// Perform the task if we had one\n\t\tif(task) {\n\t\t\t// Remove the task from the queue and add it to the in progress list\n\t\t\tdelete this.taskQueue[task.title];\n\t\t\tthis.taskInProgress[task.title] = task;\n\t\t\tthis.updateDirtyStatus();\n\t\t\t// Dispatch the task\n\t\t\tthis.dispatchTask(task,function(err) {\n\t\t\t\tif(err) {\n\t\t\t\t\tself.logger.alert(\"Sync error while processing '\" + task.title + \"':\\n\" + err);\n\t\t\t\t}\n\t\t\t\t// Mark that this task is no longer in progress\n\t\t\t\tdelete self.taskInProgress[task.title];\n\t\t\t\tself.updateDirtyStatus();\n\t\t\t\t// Process the next task\n\t\t\t\tself.processTaskQueue.call(self);\n\t\t\t});\n\t\t} else {\n\t\t\t// Make sure we've set a time if there wasn't a task to perform, but we've still got tasks in the queue\n\t\t\tif(this.numTasksInQueue() > 0) {\n\t\t\t\tthis.triggerTimeout();\n\t\t\t}\n\t\t}\n\t}\n};\n\n/*\nChoose the next applicable task\n*/\nSyncer.prototype.chooseNextTask = function() {\n\tvar self = this,\n\t\tcandidateTask = null,\n\t\tnow = Date.now();\n\t// Select the best candidate task\n\t$tw.utils.each(this.taskQueue,function(task,title) {\n\t\t// Exclude the task if there's one of the same name in progress\n\t\tif($tw.utils.hop(self.taskInProgress,title)) {\n\t\t\treturn;\n\t\t}\n\t\t// Exclude the task if it is a save and the tiddler has been modified recently, but not hit the fallback time\n\t\tif(task.type === \"save\" && (now - task.lastModificationTime) < self.throttleInterval &&\n\t\t\t(now - task.queueTime) < self.fallbackInterval) {\n\t\t\treturn;\n\t\t}\n\t\t// Exclude the task if it is newer than the current best candidate\n\t\tif(candidateTask && candidateTask.queueTime < task.queueTime) {\n\t\t\treturn;\n\t\t}\n\t\t// Now this is our best candidate\n\t\tcandidateTask = task;\n\t});\n\treturn candidateTask;\n};\n\n/*\nDispatch a task and invoke the callback\n*/\nSyncer.prototype.dispatchTask = function(task,callback) {\n\tvar self = this;\n\tif(task.type === \"save\") {\n\t\tvar changeCount = this.wiki.getChangeCount(task.title),\n\t\t\ttiddler = this.wiki.getTiddler(task.title);\n\t\tthis.logger.log(\"Dispatching 'save' task:\",task.title);\n\t\tif(tiddler) {\n\t\t\tthis.syncadaptor.saveTiddler(tiddler,function(err,adaptorInfo,revision) {\n\t\t\t\tif(err) {\n\t\t\t\t\treturn callback(err);\n\t\t\t\t}\n\t\t\t\t// Adjust the info stored about this tiddler\n\t\t\t\tself.tiddlerInfo[task.title] = {\n\t\t\t\t\tchangeCount: changeCount,\n\t\t\t\t\tadaptorInfo: adaptorInfo,\n\t\t\t\t\trevision: revision\n\t\t\t\t};\n\t\t\t\t// Invoke the callback\n\t\t\t\tcallback(null);\n\t\t\t},{\n\t\t\t\ttiddlerInfo: self.tiddlerInfo[task.title]\n\t\t\t});\n\t\t} else {\n\t\t\tthis.logger.log(\" Not Dispatching 'save' task:\",task.title,\"tiddler does not exist\");\n\t\t\treturn callback(null);\n\t\t}\n\t} else if(task.type === \"load\") {\n\t\t// Load the tiddler\n\t\tthis.logger.log(\"Dispatching 'load' task:\",task.title);\n\t\tthis.syncadaptor.loadTiddler(task.title,function(err,tiddlerFields) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\t// Store the tiddler\n\t\t\tif(tiddlerFields) {\n\t\t\t\tself.storeTiddler(tiddlerFields);\n\t\t\t}\n\t\t\t// Invoke the callback\n\t\t\tcallback(null);\n\t\t});\n\t} else if(task.type === \"delete\") {\n\t\t// Delete the tiddler\n\t\tthis.logger.log(\"Dispatching 'delete' task:\",task.title);\n\t\tthis.syncadaptor.deleteTiddler(task.title,function(err) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\tdelete self.tiddlerInfo[task.title];\n\t\t\t// Invoke the callback\n\t\t\tcallback(null);\n\t\t},{\n\t\t\ttiddlerInfo: self.tiddlerInfo[task.title]\n\t\t});\n\t}\n};\n\nexports.Syncer = Syncer;\n\n})();\n",
            "title": "$:/core/modules/syncer.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/tiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/tiddler.js\ntype: application/javascript\nmodule-type: tiddlermethod\n\nExtension methods for the $tw.Tiddler object (constructor and methods required at boot time are in boot/boot.js)\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.hasTag = function(tag) {\n\treturn this.fields.tags && this.fields.tags.indexOf(tag) !== -1;\n};\n\nexports.isPlugin = function() {\n\treturn this.fields.type === \"application/json\" && this.hasField(\"plugin-type\");\n};\n\nexports.isDraft = function() {\n\treturn this.hasField(\"draft.of\");\n};\n\nexports.getFieldString = function(field) {\n\tvar value = this.fields[field];\n\t// Check for a missing field\n\tif(value === undefined || value === null) {\n\t\treturn \"\";\n\t}\n\t// Parse the field with the associated module (if any)\n\tvar fieldModule = $tw.Tiddler.fieldModules[field];\n\tif(fieldModule && fieldModule.stringify) {\n\t\treturn fieldModule.stringify.call(this,value);\n\t} else {\n\t\treturn value.toString();\n\t}\n};\n\n/*\nGet all the fields as a name:value block. Options:\n\texclude: an array of field names to exclude\n*/\nexports.getFieldStringBlock = function(options) {\n\toptions = options || {};\n\tvar exclude = options.exclude || [];\n\tvar fields = [];\n\tfor(var field in this.fields) {\n\t\tif($tw.utils.hop(this.fields,field)) {\n\t\t\tif(exclude.indexOf(field) === -1) {\n\t\t\t\tfields.push(field + \": \" + this.getFieldString(field));\n\t\t\t}\n\t\t}\n\t}\n\treturn fields.join(\"\\n\");\n};\n\n/*\nCompare two tiddlers for equality\ntiddler: the tiddler to compare\nexcludeFields: array of field names to exclude from the comparison\n*/\nexports.isEqual = function(tiddler,excludeFields) {\n\tif(!(tiddler instanceof $tw.Tiddler)) {\n\t\treturn false;\n\t}\n\texcludeFields = excludeFields || [];\n\tvar self = this,\n\t\tdifferences = []; // Fields that have differences\n\t// Add to the differences array\n\tfunction addDifference(fieldName) {\n\t\t// Check for this field being excluded\n\t\tif(excludeFields.indexOf(fieldName) === -1) {\n\t\t\t// Save the field as a difference\n\t\t\t$tw.utils.pushTop(differences,fieldName);\n\t\t}\n\t}\n\t// Returns true if the two values of this field are equal\n\tfunction isFieldValueEqual(fieldName) {\n\t\tvar valueA = self.fields[fieldName],\n\t\t\tvalueB = tiddler.fields[fieldName];\n\t\t// Check for identical string values\n\t\tif(typeof(valueA) === \"string\" && typeof(valueB) === \"string\" && valueA === valueB) {\n\t\t\treturn true;\n\t\t}\n\t\t// Check for identical array values\n\t\tif($tw.utils.isArray(valueA) && $tw.utils.isArray(valueB) && $tw.utils.isArrayEqual(valueA,valueB)) {\n\t\t\treturn true;\n\t\t}\n\t\t// Otherwise the fields must be different\n\t\treturn false;\n\t}\n\t// Compare our fields\n\tfor(var fieldName in this.fields) {\n\t\tif(!isFieldValueEqual(fieldName)) {\n\t\t\taddDifference(fieldName);\n\t\t}\n\t}\n\t// There's a difference for every field in the other tiddler that we don't have\n\tfor(fieldName in tiddler.fields) {\n\t\tif(!(fieldName in this.fields)) {\n\t\t\taddDifference(fieldName);\n\t\t}\n\t}\n\t// Return whether there were any differences\n\treturn differences.length === 0;\n};\n\n})();\n",
            "title": "$:/core/modules/tiddler.js",
            "type": "application/javascript",
            "module-type": "tiddlermethod"
        },
        "$:/core/modules/upgraders/plugins.js": {
            "text": "/*\\\ntitle: $:/core/modules/upgraders/plugins.js\ntype: application/javascript\nmodule-type: upgrader\n\nUpgrader module that checks that plugins are newer than any already installed version\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar UPGRADE_LIBRARY_TITLE = \"$:/UpgradeLibrary\";\n\nvar BLOCKED_PLUGINS = {\n\t\"$:/themes/tiddlywiki/stickytitles\": {\n\t\tversions: [\"*\"]\n\t},\n\t\"$:/plugins/tiddlywiki/fullscreen\": {\n\t\tversions: [\"*\"]\n\t}\n};\n\nexports.upgrade = function(wiki,titles,tiddlers) {\n\tvar self = this,\n\t\tmessages = {},\n\t\tupgradeLibrary,\n\t\tgetLibraryTiddler = function(title) {\n\t\t\tif(!upgradeLibrary) {\n\t\t\t\tupgradeLibrary = wiki.getTiddlerData(UPGRADE_LIBRARY_TITLE,{});\n\t\t\t\tupgradeLibrary.tiddlers = upgradeLibrary.tiddlers || {};\n\t\t\t}\n\t\t\treturn upgradeLibrary.tiddlers[title];\n\t\t};\n\n\t// Go through all the incoming tiddlers\n\t$tw.utils.each(titles,function(title) {\n\t\tvar incomingTiddler = tiddlers[title];\n\t\t// Check if we're dealing with a plugin\n\t\tif(incomingTiddler && incomingTiddler[\"plugin-type\"] && incomingTiddler.version) {\n\t\t\t// Upgrade the incoming plugin if it is in the upgrade library\n\t\t\tvar libraryTiddler = getLibraryTiddler(title);\n\t\t\tif(libraryTiddler && libraryTiddler[\"plugin-type\"] && libraryTiddler.version) {\n\t\t\t\ttiddlers[title] = libraryTiddler;\n\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/Plugins/Upgraded\",{variables: {incoming: incomingTiddler.version, upgraded: libraryTiddler.version}});\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// Suppress the incoming plugin if it is older than the currently installed one\n\t\t\tvar existingTiddler = wiki.getTiddler(title);\n\t\t\tif(existingTiddler && existingTiddler.hasField(\"plugin-type\") && existingTiddler.hasField(\"version\")) {\n\t\t\t\t// Reject the incoming plugin by blanking all its fields\n\t\t\t\tif($tw.utils.checkVersions(existingTiddler.fields.version,incomingTiddler.version)) {\n\t\t\t\t\ttiddlers[title] = Object.create(null);\n\t\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/Plugins/Suppressed/Version\",{variables: {incoming: incomingTiddler.version, existing: existingTiddler.fields.version}});\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif(incomingTiddler && incomingTiddler[\"plugin-type\"]) {\n\t\t\t// Check whether the plugin is on the blocked list\n\t\t\tvar blockInfo = BLOCKED_PLUGINS[title];\n\t\t\tif(blockInfo) {\n\t\t\t\tif(blockInfo.versions.indexOf(\"*\") !== -1 || (incomingTiddler.version && blockInfo.versions.indexOf(incomingTiddler.version) !== -1)) {\n\t\t\t\t\ttiddlers[title] = Object.create(null);\n\t\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/Plugins/Suppressed/Incompatible\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/upgraders/plugins.js",
            "type": "application/javascript",
            "module-type": "upgrader"
        },
        "$:/core/modules/upgraders/system.js": {
            "text": "/*\\\ntitle: $:/core/modules/upgraders/system.js\ntype: application/javascript\nmodule-type: upgrader\n\nUpgrader module that suppresses certain system tiddlers that shouldn't be imported\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar DONT_IMPORT_LIST = [\"$:/StoryList\",\"$:/HistoryList\"],\n\tDONT_IMPORT_PREFIX_LIST = [\"$:/temp/\",\"$:/state/\"];\n\nexports.upgrade = function(wiki,titles,tiddlers) {\n\tvar self = this,\n\t\tmessages = {};\n\t// Check for tiddlers on our list\n\t$tw.utils.each(titles,function(title) {\n\t\tif(DONT_IMPORT_LIST.indexOf(title) !== -1) {\n\t\t\ttiddlers[title] = Object.create(null);\n\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/System/Suppressed\");\n\t\t} else {\n\t\t\tfor(var t=0; t<DONT_IMPORT_PREFIX_LIST.length; t++) {\n\t\t\t\tvar prefix = DONT_IMPORT_PREFIX_LIST[t];\n\t\t\t\tif(title.substr(0,prefix.length) === prefix) {\n\t\t\t\t\ttiddlers[title] = Object.create(null);\n\t\t\t\t\tmessages[title] = $tw.language.getString(\"Import/Upgrader/State/Suppressed\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/upgraders/system.js",
            "type": "application/javascript",
            "module-type": "upgrader"
        },
        "$:/core/modules/upgraders/themetweaks.js": {
            "text": "/*\\\ntitle: $:/core/modules/upgraders/themetweaks.js\ntype: application/javascript\nmodule-type: upgrader\n\nUpgrader module that handles the change in theme tweak storage introduced in 5.0.14-beta.\n\nPreviously, theme tweaks were stored in two data tiddlers:\n\n* $:/themes/tiddlywiki/vanilla/metrics\n* $:/themes/tiddlywiki/vanilla/settings\n\nNow, each tweak is stored in its own separate tiddler.\n\nThis upgrader copies any values from the old format to the new. The old data tiddlers are not deleted in case they have been used to store additional indexes.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar MAPPINGS = {\n\t\"$:/themes/tiddlywiki/vanilla/metrics\": {\n\t\t\"fontsize\": \"$:/themes/tiddlywiki/vanilla/metrics/fontsize\",\n\t\t\"lineheight\": \"$:/themes/tiddlywiki/vanilla/metrics/lineheight\",\n\t\t\"storyleft\": \"$:/themes/tiddlywiki/vanilla/metrics/storyleft\",\n\t\t\"storytop\": \"$:/themes/tiddlywiki/vanilla/metrics/storytop\",\n\t\t\"storyright\": \"$:/themes/tiddlywiki/vanilla/metrics/storyright\",\n\t\t\"storywidth\": \"$:/themes/tiddlywiki/vanilla/metrics/storywidth\",\n\t\t\"tiddlerwidth\": \"$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth\"\n\t},\n\t\"$:/themes/tiddlywiki/vanilla/settings\": {\n\t\t\"fontfamily\": \"$:/themes/tiddlywiki/vanilla/settings/fontfamily\"\n\t}\n};\n\nexports.upgrade = function(wiki,titles,tiddlers) {\n\tvar self = this,\n\t\tmessages = {};\n\t// Check for tiddlers on our list\n\t$tw.utils.each(titles,function(title) {\n\t\tvar mapping = MAPPINGS[title];\n\t\tif(mapping) {\n\t\t\tvar tiddler = new $tw.Tiddler(tiddlers[title]),\n\t\t\t\ttiddlerData = wiki.getTiddlerDataCached(tiddler,{});\n\t\t\tfor(var index in mapping) {\n\t\t\t\tvar mappedTitle = mapping[index];\n\t\t\t\tif(!tiddlers[mappedTitle] || tiddlers[mappedTitle].title !== mappedTitle) {\n\t\t\t\t\ttiddlers[mappedTitle] = {\n\t\t\t\t\t\ttitle: mappedTitle,\n\t\t\t\t\t\ttext: tiddlerData[index]\n\t\t\t\t\t};\n\t\t\t\t\tmessages[mappedTitle] = $tw.language.getString(\"Import/Upgrader/ThemeTweaks/Created\",{variables: {\n\t\t\t\t\t\tfrom: title + \"##\" + index\n\t\t\t\t\t}});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/upgraders/themetweaks.js",
            "type": "application/javascript",
            "module-type": "upgrader"
        },
        "$:/core/modules/utils/crypto.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/crypto.js\ntype: application/javascript\nmodule-type: utils\n\nUtility functions related to crypto.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nLook for an encrypted store area in the text of a TiddlyWiki file\n*/\nexports.extractEncryptedStoreArea = function(text) {\n\tvar encryptedStoreAreaStartMarker = \"<pre id=\\\"encryptedStoreArea\\\" type=\\\"text/plain\\\" style=\\\"display:none;\\\">\",\n\t\tencryptedStoreAreaStart = text.indexOf(encryptedStoreAreaStartMarker);\n\tif(encryptedStoreAreaStart !== -1) {\n\t\tvar encryptedStoreAreaEnd = text.indexOf(\"</pre>\",encryptedStoreAreaStart);\n\t\tif(encryptedStoreAreaEnd !== -1) {\n\t\t\treturn $tw.utils.htmlDecode(text.substring(encryptedStoreAreaStart + encryptedStoreAreaStartMarker.length,encryptedStoreAreaEnd-1));\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nAttempt to extract the tiddlers from an encrypted store area using the current password. If the password is not provided then the password in the password store will be used\n*/\nexports.decryptStoreArea = function(encryptedStoreArea,password) {\n\tvar decryptedText = $tw.crypto.decrypt(encryptedStoreArea,password);\n\tif(decryptedText) {\n\t\tvar json = JSON.parse(decryptedText),\n\t\t\ttiddlers = [];\n\t\tfor(var title in json) {\n\t\t\tif(title !== \"$:/isEncrypted\") {\n\t\t\t\ttiddlers.push(json[title]);\n\t\t\t}\n\t\t}\n\t\treturn tiddlers;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n\n/*\nAttempt to extract the tiddlers from an encrypted store area using the current password. If that fails, the user is prompted for a password.\nencryptedStoreArea: text of the TiddlyWiki encrypted store area\ncallback: function(tiddlers) called with the array of decrypted tiddlers\n\nThe following configuration settings are supported:\n\n$tw.config.usePasswordVault: causes any password entered by the user to also be put into the system password vault\n*/\nexports.decryptStoreAreaInteractive = function(encryptedStoreArea,callback,options) {\n\t// Try to decrypt with the current password\n\tvar tiddlers = $tw.utils.decryptStoreArea(encryptedStoreArea);\n\tif(tiddlers) {\n\t\tcallback(tiddlers);\n\t} else {\n\t\t// Prompt for a new password and keep trying\n\t\t$tw.passwordPrompt.createPrompt({\n\t\t\tserviceName: \"Enter a password to decrypt the imported TiddlyWiki\",\n\t\t\tnoUserName: true,\n\t\t\tcanCancel: true,\n\t\t\tsubmitText: \"Decrypt\",\n\t\t\tcallback: function(data) {\n\t\t\t\t// Exit if the user cancelled\n\t\t\t\tif(!data) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t// Attempt to decrypt the tiddlers\n\t\t\t\tvar tiddlers = $tw.utils.decryptStoreArea(encryptedStoreArea,data.password);\n\t\t\t\tif(tiddlers) {\n\t\t\t\t\tif($tw.config.usePasswordVault) {\n\t\t\t\t\t\t$tw.crypto.setPassword(data.password);\n\t\t\t\t\t}\n\t\t\t\t\tcallback(tiddlers);\n\t\t\t\t\t// Exit and remove the password prompt\n\t\t\t\t\treturn true;\n\t\t\t\t} else {\n\t\t\t\t\t// We didn't decrypt everything, so continue to prompt for password\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/utils/crypto.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/animations/slide.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/animations/slide.js\ntype: application/javascript\nmodule-type: animation\n\nA simple slide animation that varies the height of the element\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nfunction slideOpen(domNode,options) {\n\toptions = options || {};\n\tvar duration = options.duration || $tw.utils.getAnimationDuration();\n\t// Get the current height of the domNode\n\tvar computedStyle = window.getComputedStyle(domNode),\n\t\tcurrMarginBottom = parseInt(computedStyle.marginBottom,10),\n\t\tcurrMarginTop = parseInt(computedStyle.marginTop,10),\n\t\tcurrPaddingBottom = parseInt(computedStyle.paddingBottom,10),\n\t\tcurrPaddingTop = parseInt(computedStyle.paddingTop,10),\n\t\tcurrHeight = domNode.offsetHeight;\n\t// Reset the margin once the transition is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(domNode,[\n\t\t\t{transition: \"none\"},\n\t\t\t{marginBottom: \"\"},\n\t\t\t{marginTop: \"\"},\n\t\t\t{paddingBottom: \"\"},\n\t\t\t{paddingTop: \"\"},\n\t\t\t{height: \"auto\"},\n\t\t\t{opacity: \"\"}\n\t\t]);\n\t\tif(options.callback) {\n\t\t\toptions.callback();\n\t\t}\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(domNode,[\n\t\t{transition: \"none\"},\n\t\t{marginTop: \"0px\"},\n\t\t{marginBottom: \"0px\"},\n\t\t{paddingTop: \"0px\"},\n\t\t{paddingBottom: \"0px\"},\n\t\t{height: \"0px\"},\n\t\t{opacity: \"0\"}\n\t]);\n\t$tw.utils.forceLayout(domNode);\n\t// Transition to the final position\n\t$tw.utils.setStyle(domNode,[\n\t\t{transition: \"margin-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"height \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{marginBottom: currMarginBottom + \"px\"},\n\t\t{marginTop: currMarginTop + \"px\"},\n\t\t{paddingBottom: currPaddingBottom + \"px\"},\n\t\t{paddingTop: currPaddingTop + \"px\"},\n\t\t{height: currHeight + \"px\"},\n\t\t{opacity: \"1\"}\n\t]);\n}\n\nfunction slideClosed(domNode,options) {\n\toptions = options || {};\n\tvar duration = options.duration || $tw.utils.getAnimationDuration(),\n\t\tcurrHeight = domNode.offsetHeight;\n\t// Clear the properties we've set when the animation is over\n\tsetTimeout(function() {\n\t\t$tw.utils.setStyle(domNode,[\n\t\t\t{transition: \"none\"},\n\t\t\t{marginBottom: \"\"},\n\t\t\t{marginTop: \"\"},\n\t\t\t{paddingBottom: \"\"},\n\t\t\t{paddingTop: \"\"},\n\t\t\t{height: \"auto\"},\n\t\t\t{opacity: \"\"}\n\t\t]);\n\t\tif(options.callback) {\n\t\t\toptions.callback();\n\t\t}\n\t},duration);\n\t// Set up the initial position of the element\n\t$tw.utils.setStyle(domNode,[\n\t\t{height: currHeight + \"px\"},\n\t\t{opacity: \"1\"}\n\t]);\n\t$tw.utils.forceLayout(domNode);\n\t// Transition to the final position\n\t$tw.utils.setStyle(domNode,[\n\t\t{transition: \"margin-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"margin-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-top \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"padding-bottom \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"height \" + duration + \"ms ease-in-out, \" +\n\t\t\t\t\t\"opacity \" + duration + \"ms ease-in-out\"},\n\t\t{marginTop: \"0px\"},\n\t\t{marginBottom: \"0px\"},\n\t\t{paddingTop: \"0px\"},\n\t\t{paddingBottom: \"0px\"},\n\t\t{height: \"0px\"},\n\t\t{opacity: \"0\"}\n\t]);\n}\n\nexports.slide = {\n\topen: slideOpen,\n\tclose: slideClosed\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom/animations/slide.js",
            "type": "application/javascript",
            "module-type": "animation"
        },
        "$:/core/modules/utils/dom/animator.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/animator.js\ntype: application/javascript\nmodule-type: utils\n\nOrchestrates animations and transitions\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nfunction Animator() {\n\t// Get the registered animation modules\n\tthis.animations = {};\n\t$tw.modules.applyMethods(\"animation\",this.animations);\n}\n\nAnimator.prototype.perform = function(type,domNode,options) {\n\toptions = options || {};\n\t// Find an animation that can handle this type\n\tvar chosenAnimation;\n\t$tw.utils.each(this.animations,function(animation,name) {\n\t\tif($tw.utils.hop(animation,type)) {\n\t\t\tchosenAnimation = animation[type];\n\t\t}\n\t});\n\tif(!chosenAnimation) {\n\t\tchosenAnimation = function(domNode,options) {\n\t\t\tif(options.callback) {\n\t\t\t\toptions.callback();\n\t\t\t}\n\t\t};\n\t}\n\t// Call the animation\n\tchosenAnimation(domNode,options);\n};\n\nexports.Animator = Animator;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/animator.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/browser.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/browser.js\ntype: application/javascript\nmodule-type: utils\n\nBrowser feature detection\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nSet style properties of an element\n\telement: dom node\n\tstyles: ordered array of {name: value} pairs\n*/\nexports.setStyle = function(element,styles) {\n\tif(element.nodeType === 1) { // Element.ELEMENT_NODE\n\t\tfor(var t=0; t<styles.length; t++) {\n\t\t\tfor(var styleName in styles[t]) {\n\t\t\t\telement.style[$tw.utils.convertStyleNameToPropertyName(styleName)] = styles[t][styleName];\n\t\t\t}\n\t\t}\n\t}\n};\n\n/*\nConverts a standard CSS property name into the local browser-specific equivalent. For example:\n\t\"background-color\" --> \"backgroundColor\"\n\t\"transition\" --> \"webkitTransition\"\n*/\n\nvar styleNameCache = {}; // We'll cache the style name conversions\n\nexports.convertStyleNameToPropertyName = function(styleName) {\n\t// Return from the cache if we can\n\tif(styleNameCache[styleName]) {\n\t\treturn styleNameCache[styleName];\n\t}\n\t// Convert it by first removing any hyphens\n\tvar propertyName = $tw.utils.unHyphenateCss(styleName);\n\t// Then check if it needs a prefix\n\tif($tw.browser && document.body.style[propertyName] === undefined) {\n\t\tvar prefixes = [\"O\",\"MS\",\"Moz\",\"webkit\"];\n\t\tfor(var t=0; t<prefixes.length; t++) {\n\t\t\tvar prefixedName = prefixes[t] + propertyName.substr(0,1).toUpperCase() + propertyName.substr(1);\n\t\t\tif(document.body.style[prefixedName] !== undefined) {\n\t\t\t\tpropertyName = prefixedName;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\t// Put it in the cache too\n\tstyleNameCache[styleName] = propertyName;\n\treturn propertyName;\n};\n\n/*\nConverts a JS format CSS property name back into the dashed form used in CSS declarations. For example:\n\t\"backgroundColor\" --> \"background-color\"\n\t\"webkitTransform\" --> \"-webkit-transform\"\n*/\nexports.convertPropertyNameToStyleName = function(propertyName) {\n\t// Rehyphenate the name\n\tvar styleName = $tw.utils.hyphenateCss(propertyName);\n\t// If there's a webkit prefix, add a dash (other browsers have uppercase prefixes, and so get the dash automatically)\n\tif(styleName.indexOf(\"webkit\") === 0) {\n\t\tstyleName = \"-\" + styleName;\n\t} else if(styleName.indexOf(\"-m-s\") === 0) {\n\t\tstyleName = \"-ms\" + styleName.substr(4);\n\t}\n\treturn styleName;\n};\n\n/*\nRound trip a stylename to a property name and back again. For example:\n\t\"transform\" --> \"webkitTransform\" --> \"-webkit-transform\"\n*/\nexports.roundTripPropertyName = function(propertyName) {\n\treturn $tw.utils.convertPropertyNameToStyleName($tw.utils.convertStyleNameToPropertyName(propertyName));\n};\n\n/*\nConverts a standard event name into the local browser specific equivalent. For example:\n\t\"animationEnd\" --> \"webkitAnimationEnd\"\n*/\n\nvar eventNameCache = {}; // We'll cache the conversions\n\nvar eventNameMappings = {\n\t\"transitionEnd\": {\n\t\tcorrespondingCssProperty: \"transition\",\n\t\tmappings: {\n\t\t\ttransition: \"transitionend\",\n\t\t\tOTransition: \"oTransitionEnd\",\n\t\t\tMSTransition: \"msTransitionEnd\",\n\t\t\tMozTransition: \"transitionend\",\n\t\t\twebkitTransition: \"webkitTransitionEnd\"\n\t\t}\n\t},\n\t\"animationEnd\": {\n\t\tcorrespondingCssProperty: \"animation\",\n\t\tmappings: {\n\t\t\tanimation: \"animationend\",\n\t\t\tOAnimation: \"oAnimationEnd\",\n\t\t\tMSAnimation: \"msAnimationEnd\",\n\t\t\tMozAnimation: \"animationend\",\n\t\t\twebkitAnimation: \"webkitAnimationEnd\"\n\t\t}\n\t}\n};\n\nexports.convertEventName = function(eventName) {\n\tif(eventNameCache[eventName]) {\n\t\treturn eventNameCache[eventName];\n\t}\n\tvar newEventName = eventName,\n\t\tmappings = eventNameMappings[eventName];\n\tif(mappings) {\n\t\tvar convertedProperty = $tw.utils.convertStyleNameToPropertyName(mappings.correspondingCssProperty);\n\t\tif(mappings.mappings[convertedProperty]) {\n\t\t\tnewEventName = mappings.mappings[convertedProperty];\n\t\t}\n\t}\n\t// Put it in the cache too\n\teventNameCache[eventName] = newEventName;\n\treturn newEventName;\n};\n\n/*\nReturn the names of the fullscreen APIs\n*/\nexports.getFullScreenApis = function() {\n\tvar d = document,\n\t\tdb = d.body,\n\t\tresult = {\n\t\t\"_requestFullscreen\": db.webkitRequestFullscreen !== undefined ? \"webkitRequestFullscreen\" :\n\t\t\t\t\t\t\tdb.mozRequestFullScreen !== undefined ? \"mozRequestFullScreen\" :\n\t\t\t\t\t\t\tdb.msRequestFullscreen !== undefined ? \"msRequestFullscreen\" :\n\t\t\t\t\t\t\tdb.requestFullscreen !== undefined ? \"requestFullscreen\" : \"\",\n\t\t\"_exitFullscreen\": d.webkitExitFullscreen !== undefined ? \"webkitExitFullscreen\" :\n\t\t\t\t\t\t\td.mozCancelFullScreen !== undefined ? \"mozCancelFullScreen\" :\n\t\t\t\t\t\t\td.msExitFullscreen !== undefined ? \"msExitFullscreen\" :\n\t\t\t\t\t\t\td.exitFullscreen !== undefined ? \"exitFullscreen\" : \"\",\n\t\t\"_fullscreenElement\": d.webkitFullscreenElement !== undefined ? \"webkitFullscreenElement\" :\n\t\t\t\t\t\t\td.mozFullScreenElement !== undefined ? \"mozFullScreenElement\" :\n\t\t\t\t\t\t\td.msFullscreenElement !== undefined ? \"msFullscreenElement\" :\n\t\t\t\t\t\t\td.fullscreenElement !== undefined ? \"fullscreenElement\" : \"\",\n\t\t\"_fullscreenChange\": d.webkitFullscreenElement !== undefined ? \"webkitfullscreenchange\" :\n\t\t\t\t\t\t\td.mozFullScreenElement !== undefined ? \"mozfullscreenchange\" :\n\t\t\t\t\t\t\td.msFullscreenElement !== undefined ? \"MSFullscreenChange\" :\n\t\t\t\t\t\t\td.fullscreenElement !== undefined ? \"fullscreenchange\" : \"\"\n\t};\n\tif(!result._requestFullscreen || !result._exitFullscreen || !result._fullscreenElement || !result._fullscreenChange) {\n\t\treturn null;\n\t} else {\n\t\treturn result;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom/browser.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/csscolorparser.js": {
            "text": "// (c) Dean McNamee <dean@gmail.com>, 2012.\n//\n// https://github.com/deanm/css-color-parser-js\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to\n// deal in the Software without restriction, including without limitation the\n// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n// sell copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n// IN THE SOFTWARE.\n\n// http://www.w3.org/TR/css3-color/\nvar kCSSColorTable = {\n  \"transparent\": [0,0,0,0], \"aliceblue\": [240,248,255,1],\n  \"antiquewhite\": [250,235,215,1], \"aqua\": [0,255,255,1],\n  \"aquamarine\": [127,255,212,1], \"azure\": [240,255,255,1],\n  \"beige\": [245,245,220,1], \"bisque\": [255,228,196,1],\n  \"black\": [0,0,0,1], \"blanchedalmond\": [255,235,205,1],\n  \"blue\": [0,0,255,1], \"blueviolet\": [138,43,226,1],\n  \"brown\": [165,42,42,1], \"burlywood\": [222,184,135,1],\n  \"cadetblue\": [95,158,160,1], \"chartreuse\": [127,255,0,1],\n  \"chocolate\": [210,105,30,1], \"coral\": [255,127,80,1],\n  \"cornflowerblue\": [100,149,237,1], \"cornsilk\": [255,248,220,1],\n  \"crimson\": [220,20,60,1], \"cyan\": [0,255,255,1],\n  \"darkblue\": [0,0,139,1], \"darkcyan\": [0,139,139,1],\n  \"darkgoldenrod\": [184,134,11,1], \"darkgray\": [169,169,169,1],\n  \"darkgreen\": [0,100,0,1], \"darkgrey\": [169,169,169,1],\n  \"darkkhaki\": [189,183,107,1], \"darkmagenta\": [139,0,139,1],\n  \"darkolivegreen\": [85,107,47,1], \"darkorange\": [255,140,0,1],\n  \"darkorchid\": [153,50,204,1], \"darkred\": [139,0,0,1],\n  \"darksalmon\": [233,150,122,1], \"darkseagreen\": [143,188,143,1],\n  \"darkslateblue\": [72,61,139,1], \"darkslategray\": [47,79,79,1],\n  \"darkslategrey\": [47,79,79,1], \"darkturquoise\": [0,206,209,1],\n  \"darkviolet\": [148,0,211,1], \"deeppink\": [255,20,147,1],\n  \"deepskyblue\": [0,191,255,1], \"dimgray\": [105,105,105,1],\n  \"dimgrey\": [105,105,105,1], \"dodgerblue\": [30,144,255,1],\n  \"firebrick\": [178,34,34,1], \"floralwhite\": [255,250,240,1],\n  \"forestgreen\": [34,139,34,1], \"fuchsia\": [255,0,255,1],\n  \"gainsboro\": [220,220,220,1], \"ghostwhite\": [248,248,255,1],\n  \"gold\": [255,215,0,1], \"goldenrod\": [218,165,32,1],\n  \"gray\": [128,128,128,1], \"green\": [0,128,0,1],\n  \"greenyellow\": [173,255,47,1], \"grey\": [128,128,128,1],\n  \"honeydew\": [240,255,240,1], \"hotpink\": [255,105,180,1],\n  \"indianred\": [205,92,92,1], \"indigo\": [75,0,130,1],\n  \"ivory\": [255,255,240,1], \"khaki\": [240,230,140,1],\n  \"lavender\": [230,230,250,1], \"lavenderblush\": [255,240,245,1],\n  \"lawngreen\": [124,252,0,1], \"lemonchiffon\": [255,250,205,1],\n  \"lightblue\": [173,216,230,1], \"lightcoral\": [240,128,128,1],\n  \"lightcyan\": [224,255,255,1], \"lightgoldenrodyellow\": [250,250,210,1],\n  \"lightgray\": [211,211,211,1], \"lightgreen\": [144,238,144,1],\n  \"lightgrey\": [211,211,211,1], \"lightpink\": [255,182,193,1],\n  \"lightsalmon\": [255,160,122,1], \"lightseagreen\": [32,178,170,1],\n  \"lightskyblue\": [135,206,250,1], \"lightslategray\": [119,136,153,1],\n  \"lightslategrey\": [119,136,153,1], \"lightsteelblue\": [176,196,222,1],\n  \"lightyellow\": [255,255,224,1], \"lime\": [0,255,0,1],\n  \"limegreen\": [50,205,50,1], \"linen\": [250,240,230,1],\n  \"magenta\": [255,0,255,1], \"maroon\": [128,0,0,1],\n  \"mediumaquamarine\": [102,205,170,1], \"mediumblue\": [0,0,205,1],\n  \"mediumorchid\": [186,85,211,1], \"mediumpurple\": [147,112,219,1],\n  \"mediumseagreen\": [60,179,113,1], \"mediumslateblue\": [123,104,238,1],\n  \"mediumspringgreen\": [0,250,154,1], \"mediumturquoise\": [72,209,204,1],\n  \"mediumvioletred\": [199,21,133,1], \"midnightblue\": [25,25,112,1],\n  \"mintcream\": [245,255,250,1], \"mistyrose\": [255,228,225,1],\n  \"moccasin\": [255,228,181,1], \"navajowhite\": [255,222,173,1],\n  \"navy\": [0,0,128,1], \"oldlace\": [253,245,230,1],\n  \"olive\": [128,128,0,1], \"olivedrab\": [107,142,35,1],\n  \"orange\": [255,165,0,1], \"orangered\": [255,69,0,1],\n  \"orchid\": [218,112,214,1], \"palegoldenrod\": [238,232,170,1],\n  \"palegreen\": [152,251,152,1], \"paleturquoise\": [175,238,238,1],\n  \"palevioletred\": [219,112,147,1], \"papayawhip\": [255,239,213,1],\n  \"peachpuff\": [255,218,185,1], \"peru\": [205,133,63,1],\n  \"pink\": [255,192,203,1], \"plum\": [221,160,221,1],\n  \"powderblue\": [176,224,230,1], \"purple\": [128,0,128,1],\n  \"red\": [255,0,0,1], \"rosybrown\": [188,143,143,1],\n  \"royalblue\": [65,105,225,1], \"saddlebrown\": [139,69,19,1],\n  \"salmon\": [250,128,114,1], \"sandybrown\": [244,164,96,1],\n  \"seagreen\": [46,139,87,1], \"seashell\": [255,245,238,1],\n  \"sienna\": [160,82,45,1], \"silver\": [192,192,192,1],\n  \"skyblue\": [135,206,235,1], \"slateblue\": [106,90,205,1],\n  \"slategray\": [112,128,144,1], \"slategrey\": [112,128,144,1],\n  \"snow\": [255,250,250,1], \"springgreen\": [0,255,127,1],\n  \"steelblue\": [70,130,180,1], \"tan\": [210,180,140,1],\n  \"teal\": [0,128,128,1], \"thistle\": [216,191,216,1],\n  \"tomato\": [255,99,71,1], \"turquoise\": [64,224,208,1],\n  \"violet\": [238,130,238,1], \"wheat\": [245,222,179,1],\n  \"white\": [255,255,255,1], \"whitesmoke\": [245,245,245,1],\n  \"yellow\": [255,255,0,1], \"yellowgreen\": [154,205,50,1]}\n\nfunction clamp_css_byte(i) {  // Clamp to integer 0 .. 255.\n  i = Math.round(i);  // Seems to be what Chrome does (vs truncation).\n  return i < 0 ? 0 : i > 255 ? 255 : i;\n}\n\nfunction clamp_css_float(f) {  // Clamp to float 0.0 .. 1.0.\n  return f < 0 ? 0 : f > 1 ? 1 : f;\n}\n\nfunction parse_css_int(str) {  // int or percentage.\n  if (str[str.length - 1] === '%')\n    return clamp_css_byte(parseFloat(str) / 100 * 255);\n  return clamp_css_byte(parseInt(str));\n}\n\nfunction parse_css_float(str) {  // float or percentage.\n  if (str[str.length - 1] === '%')\n    return clamp_css_float(parseFloat(str) / 100);\n  return clamp_css_float(parseFloat(str));\n}\n\nfunction css_hue_to_rgb(m1, m2, h) {\n  if (h < 0) h += 1;\n  else if (h > 1) h -= 1;\n\n  if (h * 6 < 1) return m1 + (m2 - m1) * h * 6;\n  if (h * 2 < 1) return m2;\n  if (h * 3 < 2) return m1 + (m2 - m1) * (2/3 - h) * 6;\n  return m1;\n}\n\nfunction parseCSSColor(css_str) {\n  // Remove all whitespace, not compliant, but should just be more accepting.\n  var str = css_str.replace(/ /g, '').toLowerCase();\n\n  // Color keywords (and transparent) lookup.\n  if (str in kCSSColorTable) return kCSSColorTable[str].slice();  // dup.\n\n  // #abc and #abc123 syntax.\n  if (str[0] === '#') {\n    if (str.length === 4) {\n      var iv = parseInt(str.substr(1), 16);  // TODO(deanm): Stricter parsing.\n      if (!(iv >= 0 && iv <= 0xfff)) return null;  // Covers NaN.\n      return [((iv & 0xf00) >> 4) | ((iv & 0xf00) >> 8),\n              (iv & 0xf0) | ((iv & 0xf0) >> 4),\n              (iv & 0xf) | ((iv & 0xf) << 4),\n              1];\n    } else if (str.length === 7) {\n      var iv = parseInt(str.substr(1), 16);  // TODO(deanm): Stricter parsing.\n      if (!(iv >= 0 && iv <= 0xffffff)) return null;  // Covers NaN.\n      return [(iv & 0xff0000) >> 16,\n              (iv & 0xff00) >> 8,\n              iv & 0xff,\n              1];\n    }\n\n    return null;\n  }\n\n  var op = str.indexOf('('), ep = str.indexOf(')');\n  if (op !== -1 && ep + 1 === str.length) {\n    var fname = str.substr(0, op);\n    var params = str.substr(op+1, ep-(op+1)).split(',');\n    var alpha = 1;  // To allow case fallthrough.\n    switch (fname) {\n      case 'rgba':\n        if (params.length !== 4) return null;\n        alpha = parse_css_float(params.pop());\n        // Fall through.\n      case 'rgb':\n        if (params.length !== 3) return null;\n        return [parse_css_int(params[0]),\n                parse_css_int(params[1]),\n                parse_css_int(params[2]),\n                alpha];\n      case 'hsla':\n        if (params.length !== 4) return null;\n        alpha = parse_css_float(params.pop());\n        // Fall through.\n      case 'hsl':\n        if (params.length !== 3) return null;\n        var h = (((parseFloat(params[0]) % 360) + 360) % 360) / 360;  // 0 .. 1\n        // NOTE(deanm): According to the CSS spec s/l should only be\n        // percentages, but we don't bother and let float or percentage.\n        var s = parse_css_float(params[1]);\n        var l = parse_css_float(params[2]);\n        var m2 = l <= 0.5 ? l * (s + 1) : l + s - l * s;\n        var m1 = l * 2 - m2;\n        return [clamp_css_byte(css_hue_to_rgb(m1, m2, h+1/3) * 255),\n                clamp_css_byte(css_hue_to_rgb(m1, m2, h) * 255),\n                clamp_css_byte(css_hue_to_rgb(m1, m2, h-1/3) * 255),\n                alpha];\n      default:\n        return null;\n    }\n  }\n\n  return null;\n}\n\ntry { exports.parseCSSColor = parseCSSColor } catch(e) { }\n",
            "title": "$:/core/modules/utils/dom/csscolorparser.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom.js\ntype: application/javascript\nmodule-type: utils\n\nVarious static DOM-related utility functions.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nDetermines whether element 'a' contains element 'b'\nCode thanks to John Resig, http://ejohn.org/blog/comparing-document-position/\n*/\nexports.domContains = function(a,b) {\n\treturn a.contains ?\n\t\ta !== b && a.contains(b) :\n\t\t!!(a.compareDocumentPosition(b) & 16);\n};\n\nexports.removeChildren = function(node) {\n\twhile(node.hasChildNodes()) {\n\t\tnode.removeChild(node.firstChild);\n\t}\n};\n\nexports.hasClass = function(el,className) {\n\treturn el && el.className && el.className.toString().split(\" \").indexOf(className) !== -1;\n};\n\nexports.addClass = function(el,className) {\n\tvar c = el.className.split(\" \");\n\tif(c.indexOf(className) === -1) {\n\t\tc.push(className);\n\t}\n\tel.className = c.join(\" \");\n};\n\nexports.removeClass = function(el,className) {\n\tvar c = el.className.split(\" \"),\n\t\tp = c.indexOf(className);\n\tif(p !== -1) {\n\t\tc.splice(p,1);\n\t\tel.className = c.join(\" \");\n\t}\n};\n\nexports.toggleClass = function(el,className,status) {\n\tif(status === undefined) {\n\t\tstatus = !exports.hasClass(el,className);\n\t}\n\tif(status) {\n\t\texports.addClass(el,className);\n\t} else {\n\t\texports.removeClass(el,className);\n\t}\n};\n\n/*\nGet the first parent element that has scrollbars or use the body as fallback.\n*/\nexports.getScrollContainer = function(el) {\n\tvar doc = el.ownerDocument;\n\twhile(el.parentNode) {\t\n\t\tel = el.parentNode;\n\t\tif(el.scrollTop) {\n\t\t\treturn el;\n\t\t}\n\t}\n\treturn doc.body;\n};\n\n/*\nGet the scroll position of the viewport\nReturns:\n\t{\n\t\tx: horizontal scroll position in pixels,\n\t\ty: vertical scroll position in pixels\n\t}\n*/\nexports.getScrollPosition = function() {\n\tif(\"scrollX\" in window) {\n\t\treturn {x: window.scrollX, y: window.scrollY};\n\t} else {\n\t\treturn {x: document.documentElement.scrollLeft, y: document.documentElement.scrollTop};\n\t}\n};\n\n/*\nAdjust the height of a textarea to fit its content, preserving scroll position, and return the height\n*/\nexports.resizeTextAreaToFit = function(domNode,minHeight) {\n\t// Get the scroll container and register the current scroll position\n\tvar container = $tw.utils.getScrollContainer(domNode),\n\t\tscrollTop = container.scrollTop;\n    // Measure the specified minimum height\n\tdomNode.style.height = minHeight;\n\tvar measuredHeight = domNode.offsetHeight;\n\t// Set its height to auto so that it snaps to the correct height\n\tdomNode.style.height = \"auto\";\n\t// Calculate the revised height\n\tvar newHeight = Math.max(domNode.scrollHeight + domNode.offsetHeight - domNode.clientHeight,measuredHeight);\n\t// Only try to change the height if it has changed\n\tif(newHeight !== domNode.offsetHeight) {\n\t\tdomNode.style.height = newHeight + \"px\";\n\t\t// Make sure that the dimensions of the textarea are recalculated\n\t\t$tw.utils.forceLayout(domNode);\n\t\t// Set the container to the position we registered at the beginning\n\t\tcontainer.scrollTop = scrollTop;\n\t}\n\treturn newHeight;\n};\n\n/*\nGets the bounding rectangle of an element in absolute page coordinates\n*/\nexports.getBoundingPageRect = function(element) {\n\tvar scrollPos = $tw.utils.getScrollPosition(),\n\t\tclientRect = element.getBoundingClientRect();\n\treturn {\n\t\tleft: clientRect.left + scrollPos.x,\n\t\twidth: clientRect.width,\n\t\tright: clientRect.right + scrollPos.x,\n\t\ttop: clientRect.top + scrollPos.y,\n\t\theight: clientRect.height,\n\t\tbottom: clientRect.bottom + scrollPos.y\n\t};\n};\n\n/*\nSaves a named password in the browser\n*/\nexports.savePassword = function(name,password) {\n\ttry {\n\t\tif(window.localStorage) {\n\t\t\tlocalStorage.setItem(\"tw5-password-\" + name,password);\n\t\t}\n\t} catch(e) {\n\t}\n};\n\n/*\nRetrieve a named password from the browser\n*/\nexports.getPassword = function(name) {\n\ttry {\n\t\treturn window.localStorage ? localStorage.getItem(\"tw5-password-\" + name) : \"\";\n\t} catch(e) {\n\t\treturn \"\";\n\t}\n};\n\n/*\nForce layout of a dom node and its descendents\n*/\nexports.forceLayout = function(element) {\n\tvar dummy = element.offsetWidth;\n};\n\n/*\nPulse an element for debugging purposes\n*/\nexports.pulseElement = function(element) {\n\t// Event handler to remove the class at the end\n\telement.addEventListener($tw.browser.animationEnd,function handler(event) {\n\t\telement.removeEventListener($tw.browser.animationEnd,handler,false);\n\t\t$tw.utils.removeClass(element,\"pulse\");\n\t},false);\n\t// Apply the pulse class\n\t$tw.utils.removeClass(element,\"pulse\");\n\t$tw.utils.forceLayout(element);\n\t$tw.utils.addClass(element,\"pulse\");\n};\n\n/*\nAttach specified event handlers to a DOM node\ndomNode: where to attach the event handlers\nevents: array of event handlers to be added (see below)\nEach entry in the events array is an object with these properties:\nhandlerFunction: optional event handler function\nhandlerObject: optional event handler object\nhandlerMethod: optionally specifies object handler method name (defaults to `handleEvent`)\n*/\nexports.addEventListeners = function(domNode,events) {\n\t$tw.utils.each(events,function(eventInfo) {\n\t\tvar handler;\n\t\tif(eventInfo.handlerFunction) {\n\t\t\thandler = eventInfo.handlerFunction;\n\t\t} else if(eventInfo.handlerObject) {\n\t\t\tif(eventInfo.handlerMethod) {\n\t\t\t\thandler = function(event) {\n\t\t\t\t\teventInfo.handlerObject[eventInfo.handlerMethod].call(eventInfo.handlerObject,event);\n\t\t\t\t};\t\n\t\t\t} else {\n\t\t\t\thandler = eventInfo.handlerObject;\n\t\t\t}\n\t\t}\n\t\tdomNode.addEventListener(eventInfo.name,handler,false);\n\t});\n};\n\n/*\nGet the computed styles applied to an element as an array of strings of individual CSS properties\n*/\nexports.getComputedStyles = function(domNode) {\n\tvar textAreaStyles = window.getComputedStyle(domNode,null),\n\t\tstyleDefs = [],\n\t\tname;\n\tfor(var t=0; t<textAreaStyles.length; t++) {\n\t\tname = textAreaStyles[t];\n\t\tstyleDefs.push(name + \": \" + textAreaStyles.getPropertyValue(name) + \";\");\n\t}\n\treturn styleDefs;\n};\n\n/*\nApply a set of styles passed as an array of strings of individual CSS properties\n*/\nexports.setStyles = function(domNode,styleDefs) {\n\tdomNode.style.cssText = styleDefs.join(\"\");\n};\n\n/*\nCopy the computed styles from a source element to a destination element\n*/\nexports.copyStyles = function(srcDomNode,dstDomNode) {\n\t$tw.utils.setStyles(dstDomNode,$tw.utils.getComputedStyles(srcDomNode));\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/http.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/http.js\ntype: application/javascript\nmodule-type: utils\n\nBrowser HTTP support\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nA quick and dirty HTTP function; to be refactored later. Options are:\n\turl: URL to retrieve\n\ttype: GET, PUT, POST etc\n\tcallback: function invoked with (err,data)\n*/\nexports.httpRequest = function(options) {\n\tvar type = options.type || \"GET\",\n\t\theaders = options.headers || {accept: \"application/json\"},\n\t\trequest = new XMLHttpRequest(),\n\t\tdata = \"\",\n\t\tf,results;\n\t// Massage the data hashmap into a string\n\tif(options.data) {\n\t\tif(typeof options.data === \"string\") { // Already a string\n\t\t\tdata = options.data;\n\t\t} else { // A hashmap of strings\n\t\t\tresults = [];\n\t\t\t$tw.utils.each(options.data,function(dataItem,dataItemTitle) {\n\t\t\t\tresults.push(dataItemTitle + \"=\" + encodeURIComponent(dataItem));\n\t\t\t});\n\t\t\tdata = results.join(\"&\");\n\t\t}\n\t}\n\t// Set up the state change handler\n\trequest.onreadystatechange = function() {\n\t\tif(this.readyState === 4) {\n\t\t\tif(this.status === 200 || this.status === 201 || this.status === 204) {\n\t\t\t\t// Success!\n\t\t\t\toptions.callback(null,this.responseText,this);\n\t\t\t\treturn;\n\t\t\t}\n\t\t// Something went wrong\n\t\toptions.callback($tw.language.getString(\"Error/XMLHttpRequest\") + \": \" + this.status);\n\t\t}\n\t};\n\t// Make the request\n\trequest.open(type,options.url,true);\n\tif(headers) {\n\t\t$tw.utils.each(headers,function(header,headerTitle,object) {\n\t\t\trequest.setRequestHeader(headerTitle,header);\n\t\t});\n\t}\n\tif(data && !$tw.utils.hop(headers,\"Content-type\")) {\n\t\trequest.setRequestHeader(\"Content-type\",\"application/x-www-form-urlencoded; charset=UTF-8\");\n\t}\n\ttry {\n\t\trequest.send(data);\n\t} catch(e) {\n\t\toptions.callback(e);\n\t}\n\treturn request;\n};\n\n})();\n",
            "title": "$:/core/modules/utils/dom/http.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/keyboard.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/keyboard.js\ntype: application/javascript\nmodule-type: utils\n\nKeyboard utilities; now deprecated. Instead, use $tw.keyboardManager\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n[\"parseKeyDescriptor\",\"checkKeyDescriptor\"].forEach(function(method) {\n\texports[method] = function() {\n\t\tif($tw.keyboardManager) {\n\t\t\treturn $tw.keyboardManager[method].apply($tw.keyboardManager,Array.prototype.slice.call(arguments,0));\n\t\t} else {\n\t\t\treturn null\n\t\t}\n\t};\n});\n\n})();\n",
            "title": "$:/core/modules/utils/dom/keyboard.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/modal.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/modal.js\ntype: application/javascript\nmodule-type: utils\n\nModal message mechanism\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nvar Modal = function(wiki) {\n\tthis.wiki = wiki;\n\tthis.modalCount = 0;\n};\n\n/*\nDisplay a modal dialogue\n\ttitle: Title of tiddler to display\n\toptions: see below\nOptions include:\n\tdownloadLink: Text of a big download link to include\n*/\nModal.prototype.display = function(title,options) {\n\toptions = options || {};\n\tvar self = this,\n\t\trefreshHandler,\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\ttiddler = this.wiki.getTiddler(title);\n\t// Don't do anything if the tiddler doesn't exist\n\tif(!tiddler) {\n\t\treturn;\n\t}\n\t// Create the variables\n\tvar variables = $tw.utils.extend({currentTiddler: title},options.variables);\n\t// Create the wrapper divs\n\tvar wrapper = document.createElement(\"div\"),\n\t\tmodalBackdrop = document.createElement(\"div\"),\n\t\tmodalWrapper = document.createElement(\"div\"),\n\t\tmodalHeader = document.createElement(\"div\"),\n\t\theaderTitle = document.createElement(\"h3\"),\n\t\tmodalBody = document.createElement(\"div\"),\n\t\tmodalLink = document.createElement(\"a\"),\n\t\tmodalFooter = document.createElement(\"div\"),\n\t\tmodalFooterHelp = document.createElement(\"span\"),\n\t\tmodalFooterButtons = document.createElement(\"span\");\n\t// Up the modal count and adjust the body class\n\tthis.modalCount++;\n\tthis.adjustPageClass();\n\t// Add classes\n\t$tw.utils.addClass(wrapper,\"tc-modal-wrapper\");\n\t$tw.utils.addClass(modalBackdrop,\"tc-modal-backdrop\");\n\t$tw.utils.addClass(modalWrapper,\"tc-modal\");\n\t$tw.utils.addClass(modalHeader,\"tc-modal-header\");\n\t$tw.utils.addClass(modalBody,\"tc-modal-body\");\n\t$tw.utils.addClass(modalFooter,\"tc-modal-footer\");\n\t// Join them together\n\twrapper.appendChild(modalBackdrop);\n\twrapper.appendChild(modalWrapper);\n\tmodalHeader.appendChild(headerTitle);\n\tmodalWrapper.appendChild(modalHeader);\n\tmodalWrapper.appendChild(modalBody);\n\tmodalFooter.appendChild(modalFooterHelp);\n\tmodalFooter.appendChild(modalFooterButtons);\n\tmodalWrapper.appendChild(modalFooter);\n\t// Render the title of the message\n\tvar headerWidgetNode = this.wiki.makeTranscludeWidget(title,{\n\t\tfield: \"subtitle\",\n\t\tmode: \"inline\",\n\t\tchildren: [{\n\t\t\ttype: \"text\",\n\t\t\tattributes: {\n\t\t\t\ttext: {\n\t\t\t\t\ttype: \"string\",\n\t\t\t\t\tvalue: title\n\t\t}}}],\n\t\tparentWidget: $tw.rootWidget,\n\t\tdocument: document,\n\t\tvariables: variables\n\t});\n\theaderWidgetNode.render(headerTitle,null);\n\t// Render the body of the message\n\tvar bodyWidgetNode = this.wiki.makeTranscludeWidget(title,{\n\t\tparentWidget: $tw.rootWidget,\n\t\tdocument: document,\n\t\tvariables: variables\n\t});\n\tbodyWidgetNode.render(modalBody,null);\n\t// Setup the link if present\n\tif(options.downloadLink) {\n\t\tmodalLink.href = options.downloadLink;\n\t\tmodalLink.appendChild(document.createTextNode(\"Right-click to save changes\"));\n\t\tmodalBody.appendChild(modalLink);\n\t}\n\t// Render the footer of the message\n\tif(tiddler && tiddler.fields && tiddler.fields.help) {\n\t\tvar link = document.createElement(\"a\");\n\t\tlink.setAttribute(\"href\",tiddler.fields.help);\n\t\tlink.setAttribute(\"target\",\"_blank\");\n\t\tlink.setAttribute(\"rel\",\"noopener noreferrer\");\n\t\tlink.appendChild(document.createTextNode(\"Help\"));\n\t\tmodalFooterHelp.appendChild(link);\n\t\tmodalFooterHelp.style.float = \"left\";\n\t}\n\tvar footerWidgetNode = this.wiki.makeTranscludeWidget(title,{\n\t\tfield: \"footer\",\n\t\tmode: \"inline\",\n\t\tchildren: [{\n\t\t\ttype: \"button\",\n\t\t\tattributes: {\n\t\t\t\tmessage: {\n\t\t\t\t\ttype: \"string\",\n\t\t\t\t\tvalue: \"tm-close-tiddler\"\n\t\t\t\t}\n\t\t\t},\n\t\t\tchildren: [{\n\t\t\t\ttype: \"text\",\n\t\t\t\tattributes: {\n\t\t\t\t\ttext: {\n\t\t\t\t\t\ttype: \"string\",\n\t\t\t\t\t\tvalue: $tw.language.getString(\"Buttons/Close/Caption\")\n\t\t\t}}}\n\t\t]}],\n\t\tparentWidget: $tw.rootWidget,\n\t\tdocument: document,\n\t\tvariables: variables\n\t});\n\tfooterWidgetNode.render(modalFooterButtons,null);\n\t// Set up the refresh handler\n\trefreshHandler = function(changes) {\n\t\theaderWidgetNode.refresh(changes,modalHeader,null);\n\t\tbodyWidgetNode.refresh(changes,modalBody,null);\n\t\tfooterWidgetNode.refresh(changes,modalFooterButtons,null);\n\t};\n\tthis.wiki.addEventListener(\"change\",refreshHandler);\n\t// Add the close event handler\n\tvar closeHandler = function(event) {\n\t\t// Remove our refresh handler\n\t\tself.wiki.removeEventListener(\"change\",refreshHandler);\n\t\t// Decrease the modal count and adjust the body class\n\t\tself.modalCount--;\n\t\tself.adjustPageClass();\n\t\t// Force layout and animate the modal message away\n\t\t$tw.utils.forceLayout(modalBackdrop);\n\t\t$tw.utils.forceLayout(modalWrapper);\n\t\t$tw.utils.setStyle(modalBackdrop,[\n\t\t\t{opacity: \"0\"}\n\t\t]);\n\t\t$tw.utils.setStyle(modalWrapper,[\n\t\t\t{transform: \"translateY(\" + window.innerHeight + \"px)\"}\n\t\t]);\n\t\t// Set up an event for the transition end\n\t\twindow.setTimeout(function() {\n\t\t\tif(wrapper.parentNode) {\n\t\t\t\t// Remove the modal message from the DOM\n\t\t\t\tdocument.body.removeChild(wrapper);\n\t\t\t}\n\t\t},duration);\n\t\t// Don't let anyone else handle the tm-close-tiddler message\n\t\treturn false;\n\t};\n\theaderWidgetNode.addEventListener(\"tm-close-tiddler\",closeHandler,false);\n\tbodyWidgetNode.addEventListener(\"tm-close-tiddler\",closeHandler,false);\n\tfooterWidgetNode.addEventListener(\"tm-close-tiddler\",closeHandler,false);\n\t// Set the initial styles for the message\n\t$tw.utils.setStyle(modalBackdrop,[\n\t\t{opacity: \"0\"}\n\t]);\n\t$tw.utils.setStyle(modalWrapper,[\n\t\t{transformOrigin: \"0% 0%\"},\n\t\t{transform: \"translateY(\" + (-window.innerHeight) + \"px)\"}\n\t]);\n\t// Put the message into the document\n\tdocument.body.appendChild(wrapper);\n\t// Set up animation for the styles\n\t$tw.utils.setStyle(modalBackdrop,[\n\t\t{transition: \"opacity \" + duration + \"ms ease-out\"}\n\t]);\n\t$tw.utils.setStyle(modalWrapper,[\n\t\t{transition: $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out\"}\n\t]);\n\t// Force layout\n\t$tw.utils.forceLayout(modalBackdrop);\n\t$tw.utils.forceLayout(modalWrapper);\n\t// Set final animated styles\n\t$tw.utils.setStyle(modalBackdrop,[\n\t\t{opacity: \"0.7\"}\n\t]);\n\t$tw.utils.setStyle(modalWrapper,[\n\t\t{transform: \"translateY(0px)\"}\n\t]);\n};\n\nModal.prototype.adjustPageClass = function() {\n\tif($tw.pageContainer) {\n\t\t$tw.utils.toggleClass($tw.pageContainer,\"tc-modal-displayed\",this.modalCount > 0);\n\t}\n};\n\nexports.Modal = Modal;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/modal.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/notifier.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/notifier.js\ntype: application/javascript\nmodule-type: utils\n\nNotifier mechanism\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nvar Notifier = function(wiki) {\n\tthis.wiki = wiki;\n};\n\n/*\nDisplay a notification\n\ttitle: Title of tiddler containing the notification text\n\toptions: see below\nOptions include:\n*/\nNotifier.prototype.display = function(title,options) {\n\toptions = options || {};\n\t// Create the wrapper divs\n\tvar self = this,\n\t\tnotification = document.createElement(\"div\"),\n\t\ttiddler = this.wiki.getTiddler(title),\n\t\tduration = $tw.utils.getAnimationDuration(),\n\t\trefreshHandler;\n\t// Don't do anything if the tiddler doesn't exist\n\tif(!tiddler) {\n\t\treturn;\n\t}\n\t// Add classes\n\t$tw.utils.addClass(notification,\"tc-notification\");\n\t// Create the variables\n\tvar variables = $tw.utils.extend({currentTiddler: title},options.variables);\n\t// Render the body of the notification\n\tvar widgetNode = this.wiki.makeTranscludeWidget(title,{parentWidget: $tw.rootWidget, document: document, variables: variables});\n\twidgetNode.render(notification,null);\n\trefreshHandler = function(changes) {\n\t\twidgetNode.refresh(changes,notification,null);\n\t};\n\tthis.wiki.addEventListener(\"change\",refreshHandler);\n\t// Set the initial styles for the notification\n\t$tw.utils.setStyle(notification,[\n\t\t{opacity: \"0\"},\n\t\t{transformOrigin: \"0% 0%\"},\n\t\t{transform: \"translateY(\" + (-window.innerHeight) + \"px)\"},\n\t\t{transition: \"opacity \" + duration + \"ms ease-out, \" + $tw.utils.roundTripPropertyName(\"transform\") + \" \" + duration + \"ms ease-in-out\"}\n\t]);\n\t// Add the notification to the DOM\n\tdocument.body.appendChild(notification);\n\t// Force layout\n\t$tw.utils.forceLayout(notification);\n\t// Set final animated styles\n\t$tw.utils.setStyle(notification,[\n\t\t{opacity: \"1.0\"},\n\t\t{transform: \"translateY(0px)\"}\n\t]);\n\t// Set a timer to remove the notification\n\twindow.setTimeout(function() {\n\t\t// Remove our change event handler\n\t\tself.wiki.removeEventListener(\"change\",refreshHandler);\n\t\t// Force layout and animate the notification away\n\t\t$tw.utils.forceLayout(notification);\n\t\t$tw.utils.setStyle(notification,[\n\t\t\t{opacity: \"0.0\"},\n\t\t\t{transform: \"translateX(\" + (notification.offsetWidth) + \"px)\"}\n\t\t]);\n\t\t// Remove the modal message from the DOM once the transition ends\n\t\tsetTimeout(function() {\n\t\t\tif(notification.parentNode) {\n\t\t\t\tdocument.body.removeChild(notification);\n\t\t\t}\n\t\t},duration);\n\t},$tw.config.preferences.notificationDuration);\n};\n\nexports.Notifier = Notifier;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/notifier.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/popup.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/popup.js\ntype: application/javascript\nmodule-type: utils\n\nModule that creates a $tw.utils.Popup object prototype that manages popups in the browser\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nCreates a Popup object with these options:\n\trootElement: the DOM element to which the popup zapper should be attached\n*/\nvar Popup = function(options) {\n\toptions = options || {};\n\tthis.rootElement = options.rootElement || document.documentElement;\n\tthis.popups = []; // Array of {title:,wiki:,domNode:} objects\n};\n\n/*\nTrigger a popup open or closed. Parameters are in a hashmap:\n\ttitle: title of the tiddler where the popup details are stored\n\tdomNode: dom node to which the popup will be positioned\n\twiki: wiki\n\tforce: if specified, forces the popup state to true or false (instead of toggling it)\n*/\nPopup.prototype.triggerPopup = function(options) {\n\t// Check if this popup is already active\n\tvar index = this.findPopup(options.title);\n\t// Compute the new state\n\tvar state = index === -1;\n\tif(options.force !== undefined) {\n\t\tstate = options.force;\n\t}\n\t// Show or cancel the popup according to the new state\n\tif(state) {\n\t\tthis.show(options);\n\t} else {\n\t\tthis.cancel(index);\n\t}\n};\n\nPopup.prototype.findPopup = function(title) {\n\tvar index = -1;\n\tfor(var t=0; t<this.popups.length; t++) {\n\t\tif(this.popups[t].title === title) {\n\t\t\tindex = t;\n\t\t}\n\t}\n\treturn index;\n};\n\nPopup.prototype.handleEvent = function(event) {\n\tif(event.type === \"click\") {\n\t\t// Find out what was clicked on\n\t\tvar info = this.popupInfo(event.target),\n\t\t\tcancelLevel = info.popupLevel - 1;\n\t\t// Don't remove the level that was clicked on if we clicked on a handle\n\t\tif(info.isHandle) {\n\t\t\tcancelLevel++;\n\t\t}\n\t\t// Cancel\n\t\tthis.cancel(cancelLevel);\n\t}\n};\n\n/*\nFind the popup level containing a DOM node. Returns:\npopupLevel: count of the number of nested popups containing the specified element\nisHandle: true if the specified element is within a popup handle\n*/\nPopup.prototype.popupInfo = function(domNode) {\n\tvar isHandle = false,\n\t\tpopupCount = 0,\n\t\tnode = domNode;\n\t// First check ancestors to see if we're within a popup handle\n\twhile(node) {\n\t\tif($tw.utils.hasClass(node,\"tc-popup-handle\")) {\n\t\t\tisHandle = true;\n\t\t\tpopupCount++;\n\t\t}\n\t\tif($tw.utils.hasClass(node,\"tc-popup-keep\")) {\n\t\t\tisHandle = true;\n\t\t}\n\t\tnode = node.parentNode;\n\t}\n\t// Then count the number of ancestor popups\n\tnode = domNode;\n\twhile(node) {\n\t\tif($tw.utils.hasClass(node,\"tc-popup\")) {\n\t\t\tpopupCount++;\n\t\t}\n\t\tnode = node.parentNode;\n\t}\n\tvar info = {\n\t\tpopupLevel: popupCount,\n\t\tisHandle: isHandle\n\t};\n\treturn info;\n};\n\n/*\nDisplay a popup by adding it to the stack\n*/\nPopup.prototype.show = function(options) {\n\t// Find out what was clicked on\n\tvar info = this.popupInfo(options.domNode);\n\t// Cancel any higher level popups\n\tthis.cancel(info.popupLevel);\n\t// Store the popup details if not already there\n\tif(this.findPopup(options.title) === -1) {\n\t\tthis.popups.push({\n\t\t\ttitle: options.title,\n\t\t\twiki: options.wiki,\n\t\t\tdomNode: options.domNode\n\t\t});\n\t}\n\t// Set the state tiddler\n\toptions.wiki.setTextReference(options.title,\n\t\t\t\"(\" + options.domNode.offsetLeft + \",\" + options.domNode.offsetTop + \",\" + \n\t\t\t\toptions.domNode.offsetWidth + \",\" + options.domNode.offsetHeight + \")\");\n\t// Add the click handler if we have any popups\n\tif(this.popups.length > 0) {\n\t\tthis.rootElement.addEventListener(\"click\",this,true);\t\t\n\t}\n};\n\n/*\nCancel all popups at or above a specified level or DOM node\nlevel: popup level to cancel (0 cancels all popups)\n*/\nPopup.prototype.cancel = function(level) {\n\tvar numPopups = this.popups.length;\n\tlevel = Math.max(0,Math.min(level,numPopups));\n\tfor(var t=level; t<numPopups; t++) {\n\t\tvar popup = this.popups.pop();\n\t\tif(popup.title) {\n\t\t\tpopup.wiki.deleteTiddler(popup.title);\n\t\t}\n\t}\n\tif(this.popups.length === 0) {\n\t\tthis.rootElement.removeEventListener(\"click\",this,false);\n\t}\n};\n\n/*\nReturns true if the specified title and text identifies an active popup\n*/\nPopup.prototype.readPopupState = function(text) {\n\tvar popupLocationRegExp = /^\\((-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+)\\)$/;\n\treturn popupLocationRegExp.test(text);\n};\n\nexports.Popup = Popup;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/popup.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/dom/scroller.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/dom/scroller.js\ntype: application/javascript\nmodule-type: utils\n\nModule that creates a $tw.utils.Scroller object prototype that manages scrolling in the browser\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nEvent handler for when the `tm-scroll` event hits the document body\n*/\nvar PageScroller = function() {\n\tthis.idRequestFrame = null;\n\tthis.requestAnimationFrame = window.requestAnimationFrame ||\n\t\twindow.webkitRequestAnimationFrame ||\n\t\twindow.mozRequestAnimationFrame ||\n\t\tfunction(callback) {\n\t\t\treturn window.setTimeout(callback, 1000/60);\n\t\t};\n\tthis.cancelAnimationFrame = window.cancelAnimationFrame ||\n\t\twindow.webkitCancelAnimationFrame ||\n\t\twindow.webkitCancelRequestAnimationFrame ||\n\t\twindow.mozCancelAnimationFrame ||\n\t\twindow.mozCancelRequestAnimationFrame ||\n\t\tfunction(id) {\n\t\t\twindow.clearTimeout(id);\n\t\t};\n};\n\nPageScroller.prototype.cancelScroll = function() {\n\tif(this.idRequestFrame) {\n\t\tthis.cancelAnimationFrame.call(window,this.idRequestFrame);\n\t\tthis.idRequestFrame = null;\n\t}\n};\n\n/*\nHandle an event\n*/\nPageScroller.prototype.handleEvent = function(event) {\n\tif(event.type === \"tm-scroll\") {\n\t\treturn this.scrollIntoView(event.target);\n\t}\n\treturn true;\n};\n\n/*\nHandle a scroll event hitting the page document\n*/\nPageScroller.prototype.scrollIntoView = function(element) {\n\tvar duration = $tw.utils.getAnimationDuration();\n\t// Now get ready to scroll the body\n\tthis.cancelScroll();\n\tthis.startTime = Date.now();\n\tvar scrollPosition = $tw.utils.getScrollPosition();\n\t// Get the client bounds of the element and adjust by the scroll position\n\tvar clientBounds = element.getBoundingClientRect(),\n\t\tbounds = {\n\t\t\tleft: clientBounds.left + scrollPosition.x,\n\t\t\ttop: clientBounds.top + scrollPosition.y,\n\t\t\twidth: clientBounds.width,\n\t\t\theight: clientBounds.height\n\t\t};\n\t// We'll consider the horizontal and vertical scroll directions separately via this function\n\t// targetPos/targetSize - position and size of the target element\n\t// currentPos/currentSize - position and size of the current scroll viewport\n\t// returns: new position of the scroll viewport\n\tvar getEndPos = function(targetPos,targetSize,currentPos,currentSize) {\n\t\t\tvar newPos = currentPos;\n\t\t\t// If the target is above/left of the current view, then scroll to it's top/left\n\t\t\tif(targetPos <= currentPos) {\n\t\t\t\tnewPos = targetPos;\n\t\t\t// If the target is smaller than the window and the scroll position is too far up, then scroll till the target is at the bottom of the window\n\t\t\t} else if(targetSize < currentSize && currentPos < (targetPos + targetSize - currentSize)) {\n\t\t\t\tnewPos = targetPos + targetSize - currentSize;\n\t\t\t// If the target is big, then just scroll to the top\n\t\t\t} else if(currentPos < targetPos) {\n\t\t\t\tnewPos = targetPos;\n\t\t\t// Otherwise, stay where we are\n\t\t\t} else {\n\t\t\t\tnewPos = currentPos;\n\t\t\t}\n\t\t\t// If we are scrolling within 50 pixels of the top/left then snap to zero\n\t\t\tif(newPos < 50) {\n\t\t\t\tnewPos = 0;\n\t\t\t}\n\t\t\treturn newPos;\n\t\t},\n\t\tendX = getEndPos(bounds.left,bounds.width,scrollPosition.x,window.innerWidth),\n\t\tendY = getEndPos(bounds.top,bounds.height,scrollPosition.y,window.innerHeight);\n\t// Only scroll if the position has changed\n\tif(endX !== scrollPosition.x || endY !== scrollPosition.y) {\n\t\tvar self = this,\n\t\t\tdrawFrame;\n\t\tdrawFrame = function () {\n\t\t\tvar t;\n\t\t\tif(duration <= 0) {\n\t\t\t\tt = 1;\n\t\t\t} else {\n\t\t\t\tt = ((Date.now()) - self.startTime) / duration;\t\n\t\t\t}\n\t\t\tif(t >= 1) {\n\t\t\t\tself.cancelScroll();\n\t\t\t\tt = 1;\n\t\t\t}\n\t\t\tt = $tw.utils.slowInSlowOut(t);\n\t\t\twindow.scrollTo(scrollPosition.x + (endX - scrollPosition.x) * t,scrollPosition.y + (endY - scrollPosition.y) * t);\n\t\t\tif(t < 1) {\n\t\t\t\tself.idRequestFrame = self.requestAnimationFrame.call(window,drawFrame);\n\t\t\t}\n\t\t};\n\t\tdrawFrame();\n\t}\n};\n\nexports.PageScroller = PageScroller;\n\n})();\n",
            "title": "$:/core/modules/utils/dom/scroller.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/edition-info.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/edition-info.js\ntype: application/javascript\nmodule-type: utils-node\n\nInformation about the available editions\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar fs = require(\"fs\"),\n\tpath = require(\"path\");\n\nvar editionInfo;\n\nexports.getEditionInfo = function() {\n\tif(!editionInfo) {\n\t\t// Enumerate the edition paths\n\t\tvar editionPaths = $tw.getLibraryItemSearchPaths($tw.config.editionsPath,$tw.config.editionsEnvVar);\n\t\teditionInfo = {};\n\t\tfor(var editionIndex=0; editionIndex<editionPaths.length; editionIndex++) {\n\t\t\tvar editionPath = editionPaths[editionIndex];\n\t\t\t// Enumerate the folders\n\t\t\tvar entries = fs.readdirSync(editionPath);\n\t\t\tfor(var entryIndex=0; entryIndex<entries.length; entryIndex++) {\n\t\t\t\tvar entry = entries[entryIndex];\n\t\t\t\t// Check if directories have a valid tiddlywiki.info\n\t\t\t\tif(!editionInfo[entry] && $tw.utils.isDirectory(path.resolve(editionPath,entry))) {\n\t\t\t\t\tvar info;\n\t\t\t\t\ttry {\n\t\t\t\t\t\tinfo = JSON.parse(fs.readFileSync(path.resolve(editionPath,entry,\"tiddlywiki.info\"),\"utf8\"));\n\t\t\t\t\t} catch(ex) {\n\t\t\t\t\t}\n\t\t\t\t\tif(info) {\n\t\t\t\t\t\teditionInfo[entry] = info;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn editionInfo;\n};\n\n})();\n",
            "title": "$:/core/modules/utils/edition-info.js",
            "type": "application/javascript",
            "module-type": "utils-node"
        },
        "$:/core/modules/utils/fakedom.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/fakedom.js\ntype: application/javascript\nmodule-type: global\n\nA barebones implementation of DOM interfaces needed by the rendering mechanism.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Sequence number used to enable us to track objects for testing\nvar sequenceNumber = null;\n\nvar bumpSequenceNumber = function(object) {\n\tif(sequenceNumber !== null) {\n\t\tobject.sequenceNumber = sequenceNumber++;\n\t}\n};\n\nvar TW_TextNode = function(text) {\n\tbumpSequenceNumber(this);\n\tthis.textContent = text;\n};\n\nObject.defineProperty(TW_TextNode.prototype, \"nodeType\", {\n\tget: function() {\n\t\treturn 3;\n\t}\n});\n\nObject.defineProperty(TW_TextNode.prototype, \"formattedTextContent\", {\n\tget: function() {\n\t\treturn this.textContent.replace(/(\\r?\\n)/g,\"\");\n\t}\n});\n\nvar TW_Element = function(tag,namespace) {\n\tbumpSequenceNumber(this);\n\tthis.isTiddlyWikiFakeDom = true;\n\tthis.tag = tag;\n\tthis.attributes = {};\n\tthis.isRaw = false;\n\tthis.children = [];\n\tthis.style = {};\n\tthis.namespaceURI = namespace || \"http://www.w3.org/1999/xhtml\";\n};\n\nObject.defineProperty(TW_Element.prototype, \"nodeType\", {\n\tget: function() {\n\t\treturn 1;\n\t}\n});\n\nTW_Element.prototype.getAttribute = function(name) {\n\tif(this.isRaw) {\n\t\tthrow \"Cannot getAttribute on a raw TW_Element\";\n\t}\n\treturn this.attributes[name];\n};\n\nTW_Element.prototype.setAttribute = function(name,value) {\n\tif(this.isRaw) {\n\t\tthrow \"Cannot setAttribute on a raw TW_Element\";\n\t}\n\tthis.attributes[name] = value;\n};\n\nTW_Element.prototype.setAttributeNS = function(namespace,name,value) {\n\tthis.setAttribute(name,value);\n};\n\nTW_Element.prototype.removeAttribute = function(name) {\n\tif(this.isRaw) {\n\t\tthrow \"Cannot removeAttribute on a raw TW_Element\";\n\t}\n\tif($tw.utils.hop(this.attributes,name)) {\n\t\tdelete this.attributes[name];\n\t}\n};\n\nTW_Element.prototype.appendChild = function(node) {\n\tthis.children.push(node);\n\tnode.parentNode = this;\n};\n\nTW_Element.prototype.insertBefore = function(node,nextSibling) {\n\tif(nextSibling) {\n\t\tvar p = this.children.indexOf(nextSibling);\n\t\tif(p !== -1) {\n\t\t\tthis.children.splice(p,0,node);\n\t\t\tnode.parentNode = this;\n\t\t} else {\n\t\t\tthis.appendChild(node);\n\t\t}\n\t} else {\n\t\tthis.appendChild(node);\n\t}\n};\n\nTW_Element.prototype.removeChild = function(node) {\n\tvar p = this.children.indexOf(node);\n\tif(p !== -1) {\n\t\tthis.children.splice(p,1);\n\t}\n};\n\nTW_Element.prototype.hasChildNodes = function() {\n\treturn !!this.children.length;\n};\n\nObject.defineProperty(TW_Element.prototype, \"childNodes\", {\n\tget: function() {\n\t\treturn this.children;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"firstChild\", {\n\tget: function() {\n\t\treturn this.children[0];\n\t}\n});\n\nTW_Element.prototype.addEventListener = function(type,listener,useCapture) {\n\t// Do nothing\n};\n\nObject.defineProperty(TW_Element.prototype, \"tagName\", {\n\tget: function() {\n\t\treturn this.tag || \"\";\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"className\", {\n\tget: function() {\n\t\treturn this.attributes[\"class\"] || \"\";\n\t},\n\tset: function(value) {\n\t\tthis.attributes[\"class\"] = value;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"value\", {\n\tget: function() {\n\t\treturn this.attributes.value || \"\";\n\t},\n\tset: function(value) {\n\t\tthis.attributes.value = value;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"outerHTML\", {\n\tget: function() {\n\t\tvar output = [],attr,a,v;\n\t\toutput.push(\"<\",this.tag);\n\t\tif(this.attributes) {\n\t\t\tattr = [];\n\t\t\tfor(a in this.attributes) {\n\t\t\t\tattr.push(a);\n\t\t\t}\n\t\t\tattr.sort();\n\t\t\tfor(a=0; a<attr.length; a++) {\n\t\t\t\tv = this.attributes[attr[a]];\n\t\t\t\tif(v !== undefined) {\n\t\t\t\t\toutput.push(\" \",attr[a],\"=\\\"\",$tw.utils.htmlEncode(v),\"\\\"\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif(this.style) {\n\t\t\tvar style = [];\n\t\t\tfor(var s in this.style) {\n\t\t\t\tstyle.push(s + \":\" + this.style[s] + \";\");\n\t\t\t}\n\t\t\tif(style.length > 0) {\n\t\t\t\toutput.push(\" style=\\\"\",style.join(\"\"),\"\\\"\")\n\t\t\t}\n\t\t}\n\t\toutput.push(\">\");\n\t\tif($tw.config.htmlVoidElements.indexOf(this.tag) === -1) {\n\t\t\toutput.push(this.innerHTML);\n\t\t\toutput.push(\"</\",this.tag,\">\");\n\t\t}\n\t\treturn output.join(\"\");\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"innerHTML\", {\n\tget: function() {\n\t\tif(this.isRaw) {\n\t\t\treturn this.rawHTML;\n\t\t} else {\n\t\t\tvar b = [];\n\t\t\t$tw.utils.each(this.children,function(node) {\n\t\t\t\tif(node instanceof TW_Element) {\n\t\t\t\t\tb.push(node.outerHTML);\n\t\t\t\t} else if(node instanceof TW_TextNode) {\n\t\t\t\t\tb.push($tw.utils.htmlEncode(node.textContent));\n\t\t\t\t}\n\t\t\t});\n\t\t\treturn b.join(\"\");\n\t\t}\n\t},\n\tset: function(value) {\n\t\tthis.isRaw = true;\n\t\tthis.rawHTML = value;\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"textContent\", {\n\tget: function() {\n\t\tif(this.isRaw) {\n\t\t\tthrow \"Cannot get textContent on a raw TW_Element\";\n\t\t} else {\n\t\t\tvar b = [];\n\t\t\t$tw.utils.each(this.children,function(node) {\n\t\t\t\tb.push(node.textContent);\n\t\t\t});\n\t\t\treturn b.join(\"\");\n\t\t}\n\t},\n\tset: function(value) {\n\t\tthis.children = [new TW_TextNode(value)];\n\t}\n});\n\nObject.defineProperty(TW_Element.prototype, \"formattedTextContent\", {\n\tget: function() {\n\t\tif(this.isRaw) {\n\t\t\tthrow \"Cannot get formattedTextContent on a raw TW_Element\";\n\t\t} else {\n\t\t\tvar b = [],\n\t\t\t\tisBlock = $tw.config.htmlBlockElements.indexOf(this.tag) !== -1;\n\t\t\tif(isBlock) {\n\t\t\t\tb.push(\"\\n\");\n\t\t\t}\n\t\t\tif(this.tag === \"li\") {\n\t\t\t\tb.push(\"* \");\n\t\t\t}\n\t\t\t$tw.utils.each(this.children,function(node) {\n\t\t\t\tb.push(node.formattedTextContent);\n\t\t\t});\n\t\t\tif(isBlock) {\n\t\t\t\tb.push(\"\\n\");\n\t\t\t}\n\t\t\treturn b.join(\"\");\n\t\t}\n\t}\n});\n\nvar document = {\n\tsetSequenceNumber: function(value) {\n\t\tsequenceNumber = value;\n\t},\n\tcreateElementNS: function(namespace,tag) {\n\t\treturn new TW_Element(tag,namespace);\n\t},\n\tcreateElement: function(tag) {\n\t\treturn new TW_Element(tag);\n\t},\n\tcreateTextNode: function(text) {\n\t\treturn new TW_TextNode(text);\n\t},\n\tcompatMode: \"CSS1Compat\", // For KaTeX to know that we're not a browser in quirks mode\n\tisTiddlyWikiFakeDom: true\n};\n\nexports.fakeDocument = document;\n\n})();\n",
            "title": "$:/core/modules/utils/fakedom.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/utils/filesystem.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/filesystem.js\ntype: application/javascript\nmodule-type: utils-node\n\nFile system utilities\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar fs = require(\"fs\"),\n\tpath = require(\"path\");\n\n/*\nRecursively (and synchronously) copy a directory and all its content\n*/\nexports.copyDirectory = function(srcPath,dstPath) {\n\t// Remove any trailing path separators\n\tsrcPath = $tw.utils.removeTrailingSeparator(srcPath);\n\tdstPath = $tw.utils.removeTrailingSeparator(dstPath);\n\t// Create the destination directory\n\tvar err = $tw.utils.createDirectory(dstPath);\n\tif(err) {\n\t\treturn err;\n\t}\n\t// Function to copy a folder full of files\n\tvar copy = function(srcPath,dstPath) {\n\t\tvar srcStats = fs.lstatSync(srcPath),\n\t\t\tdstExists = fs.existsSync(dstPath);\n\t\tif(srcStats.isFile()) {\n\t\t\t$tw.utils.copyFile(srcPath,dstPath);\n\t\t} else if(srcStats.isDirectory()) {\n\t\t\tvar items = fs.readdirSync(srcPath);\n\t\t\tfor(var t=0; t<items.length; t++) {\n\t\t\t\tvar item = items[t],\n\t\t\t\t\terr = copy(srcPath + path.sep + item,dstPath + path.sep + item);\n\t\t\t\tif(err) {\n\t\t\t\t\treturn err;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\tcopy(srcPath,dstPath);\n\treturn null;\n};\n\n/*\nCopy a file\n*/\nvar FILE_BUFFER_LENGTH = 64 * 1024,\n\tfileBuffer;\n\nexports.copyFile = function(srcPath,dstPath) {\n\t// Create buffer if required\n\tif(!fileBuffer) {\n\t\tfileBuffer = new Buffer(FILE_BUFFER_LENGTH);\n\t}\n\t// Create any directories in the destination\n\t$tw.utils.createDirectory(path.dirname(dstPath));\n\t// Copy the file\n\tvar srcFile = fs.openSync(srcPath,\"r\"),\n\t\tdstFile = fs.openSync(dstPath,\"w\"),\n\t\tbytesRead = 1,\n\t\tpos = 0;\n\twhile (bytesRead > 0) {\n\t\tbytesRead = fs.readSync(srcFile,fileBuffer,0,FILE_BUFFER_LENGTH,pos);\n\t\tfs.writeSync(dstFile,fileBuffer,0,bytesRead);\n\t\tpos += bytesRead;\n\t}\n\tfs.closeSync(srcFile);\n\tfs.closeSync(dstFile);\n\treturn null;\n};\n\n/*\nRemove trailing path separator\n*/\nexports.removeTrailingSeparator = function(dirPath) {\n\tvar len = dirPath.length;\n\tif(dirPath.charAt(len-1) === path.sep) {\n\t\tdirPath = dirPath.substr(0,len-1);\n\t}\n\treturn dirPath;\n};\n\n/*\nRecursively create a directory\n*/\nexports.createDirectory = function(dirPath) {\n\tif(dirPath.substr(dirPath.length-1,1) !== path.sep) {\n\t\tdirPath = dirPath + path.sep;\n\t}\n\tvar pos = 1;\n\tpos = dirPath.indexOf(path.sep,pos);\n\twhile(pos !== -1) {\n\t\tvar subDirPath = dirPath.substr(0,pos);\n\t\tif(!$tw.utils.isDirectory(subDirPath)) {\n\t\t\ttry {\n\t\t\t\tfs.mkdirSync(subDirPath);\n\t\t\t} catch(e) {\n\t\t\t\treturn \"Error creating directory '\" + subDirPath + \"'\";\n\t\t\t}\n\t\t}\n\t\tpos = dirPath.indexOf(path.sep,pos + 1);\n\t}\n\treturn null;\n};\n\n/*\nRecursively create directories needed to contain a specified file\n*/\nexports.createFileDirectories = function(filePath) {\n\treturn $tw.utils.createDirectory(path.dirname(filePath));\n};\n\n/*\nRecursively delete a directory\n*/\nexports.deleteDirectory = function(dirPath) {\n\tif(fs.existsSync(dirPath)) {\n\t\tvar entries = fs.readdirSync(dirPath);\n\t\tfor(var entryIndex=0; entryIndex<entries.length; entryIndex++) {\n\t\t\tvar currPath = dirPath + path.sep + entries[entryIndex];\n\t\t\tif(fs.lstatSync(currPath).isDirectory()) {\n\t\t\t\t$tw.utils.deleteDirectory(currPath);\n\t\t\t} else {\n\t\t\t\tfs.unlinkSync(currPath);\n\t\t\t}\n\t\t}\n\tfs.rmdirSync(dirPath);\n\t}\n\treturn null;\n};\n\n/*\nCheck if a path identifies a directory\n*/\nexports.isDirectory = function(dirPath) {\n\treturn fs.existsSync(dirPath) && fs.statSync(dirPath).isDirectory();\n};\n\n/*\nCheck if a path identifies a directory that is empty\n*/\nexports.isDirectoryEmpty = function(dirPath) {\n\tif(!$tw.utils.isDirectory(dirPath)) {\n\t\treturn false;\n\t}\n\tvar files = fs.readdirSync(dirPath),\n\t\tempty = true;\n\t$tw.utils.each(files,function(file,index) {\n\t\tif(file.charAt(0) !== \".\") {\n\t\t\tempty = false;\n\t\t}\n\t});\n\treturn empty;\n};\n\n/*\nRecursively delete a tree of empty directories\n*/\nexports.deleteEmptyDirs = function(dirpath,callback) {\n\tvar self = this;\n\tfs.readdir(dirpath,function(err,files) {\n\t\tif(err) {\n\t\t\treturn callback(err);\n\t\t}\n\t\tif(files.length > 0) {\n\t\t\treturn callback(null);\n\t\t}\n\t\tfs.rmdir(dirpath,function(err) {\n\t\t\tif(err) {\n\t\t\t\treturn callback(err);\n\t\t\t}\n\t\t\tself.deleteEmptyDirs(path.dirname(dirpath),callback);\n\t\t});\n\t});\n};\n\n})();\n",
            "title": "$:/core/modules/utils/filesystem.js",
            "type": "application/javascript",
            "module-type": "utils-node"
        },
        "$:/core/modules/utils/logger.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/logger.js\ntype: application/javascript\nmodule-type: utils\n\nA basic logging implementation\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar ALERT_TAG = \"$:/tags/Alert\";\n\n/*\nMake a new logger\n*/\nfunction Logger(componentName) {\n\tthis.componentName = componentName || \"\";\n}\n\n/*\nLog a message\n*/\nLogger.prototype.log = function(/* args */) {\n\tif(console !== undefined && console.log !== undefined) {\n\t\treturn Function.apply.call(console.log, console, [this.componentName + \":\"].concat(Array.prototype.slice.call(arguments,0)));\n\t}\n};\n\n/*\nAlert a message\n*/\nLogger.prototype.alert = function(/* args */) {\n\t// Prepare the text of the alert\n\tvar text = Array.prototype.join.call(arguments,\" \");\n\t// Create alert tiddlers in the browser\n\tif($tw.browser) {\n\t\t// Check if there is an existing alert with the same text and the same component\n\t\tvar existingAlerts = $tw.wiki.getTiddlersWithTag(ALERT_TAG),\n\t\t\talertFields,\n\t\t\texistingCount,\n\t\t\tself = this;\n\t\t$tw.utils.each(existingAlerts,function(title) {\n\t\t\tvar tiddler = $tw.wiki.getTiddler(title);\n\t\t\tif(tiddler.fields.text === text && tiddler.fields.component === self.componentName && tiddler.fields.modified && (!alertFields || tiddler.fields.modified < alertFields.modified)) {\n\t\t\t\t\talertFields = $tw.utils.extend({},tiddler.fields);\n\t\t\t}\n\t\t});\n\t\tif(alertFields) {\n\t\t\texistingCount = alertFields.count || 1;\n\t\t} else {\n\t\t\talertFields = {\n\t\t\t\ttitle: $tw.wiki.generateNewTitle(\"$:/temp/alerts/alert\",{prefix: \"\"}),\n\t\t\t\ttext: text,\n\t\t\t\ttags: [ALERT_TAG],\n\t\t\t\tcomponent: this.componentName\n\t\t\t};\n\t\t\texistingCount = 0;\n\t\t}\n\t\talertFields.modified = new Date();\n\t\tif(++existingCount > 1) {\n\t\t\talertFields.count = existingCount;\n\t\t} else {\n\t\t\talertFields.count = undefined;\n\t\t}\n\t\t$tw.wiki.addTiddler(new $tw.Tiddler(alertFields));\n\t\t// Log the alert as well\n\t\tthis.log.apply(this,Array.prototype.slice.call(arguments,0));\n\t} else {\n\t\t// Print an orange message to the console if not in the browser\n\t\tconsole.error(\"\\x1b[1;33m\" + text + \"\\x1b[0m\");\n\t}\n};\n\nexports.Logger = Logger;\n\n})();\n",
            "title": "$:/core/modules/utils/logger.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/parsetree.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/parsetree.js\ntype: application/javascript\nmodule-type: utils\n\nParse tree utility functions.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nexports.addAttributeToParseTreeNode = function(node,name,value) {\n\tnode.attributes = node.attributes || {};\n\tnode.attributes[name] = {type: \"string\", value: value};\n};\n\nexports.getAttributeValueFromParseTreeNode = function(node,name,defaultValue) {\n\tif(node.attributes && node.attributes[name] && node.attributes[name].value !== undefined) {\n\t\treturn node.attributes[name].value;\n\t}\n\treturn defaultValue;\n};\n\nexports.addClassToParseTreeNode = function(node,classString) {\n\tvar classes = [];\n\tnode.attributes = node.attributes || {};\n\tnode.attributes[\"class\"] = node.attributes[\"class\"] || {type: \"string\", value: \"\"};\n\tif(node.attributes[\"class\"].type === \"string\") {\n\t\tif(node.attributes[\"class\"].value !== \"\") {\n\t\t\tclasses = node.attributes[\"class\"].value.split(\" \");\n\t\t}\n\t\tif(classString !== \"\") {\n\t\t\t$tw.utils.pushTop(classes,classString.split(\" \"));\n\t\t}\n\t\tnode.attributes[\"class\"].value = classes.join(\" \");\n\t}\n};\n\nexports.addStyleToParseTreeNode = function(node,name,value) {\n\t\tnode.attributes = node.attributes || {};\n\t\tnode.attributes.style = node.attributes.style || {type: \"string\", value: \"\"};\n\t\tif(node.attributes.style.type === \"string\") {\n\t\t\tnode.attributes.style.value += name + \":\" + value + \";\";\n\t\t}\n};\n\nexports.findParseTreeNode = function(nodeArray,search) {\n\tfor(var t=0; t<nodeArray.length; t++) {\n\t\tif(nodeArray[t].type === search.type && nodeArray[t].tag === search.tag) {\n\t\t\treturn nodeArray[t];\n\t\t}\n\t}\n\treturn undefined;\n};\n\n/*\nHelper to get the text of a parse tree node or array of nodes\n*/\nexports.getParseTreeText = function getParseTreeText(tree) {\n\tvar output = [];\n\tif($tw.utils.isArray(tree)) {\n\t\t$tw.utils.each(tree,function(node) {\n\t\t\toutput.push(getParseTreeText(node));\n\t\t});\n\t} else {\n\t\tif(tree.type === \"text\") {\n\t\t\toutput.push(tree.text);\n\t\t}\n\t\tif(tree.children) {\n\t\t\treturn getParseTreeText(tree.children);\n\t\t}\n\t}\n\treturn output.join(\"\");\n};\n\n})();\n",
            "title": "$:/core/modules/utils/parsetree.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/performance.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/performance.js\ntype: application/javascript\nmodule-type: global\n\nPerformance measurement.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nfunction Performance(enabled) {\n\tthis.enabled = !!enabled;\n\tthis.measures = {}; // Hashmap of current values of measurements\n\tthis.logger = new $tw.utils.Logger(\"performance\");\n}\n\n/*\nWrap performance reporting around a top level function\n*/\nPerformance.prototype.report = function(name,fn) {\n\tvar self = this;\n\tif(this.enabled) {\n\t\treturn function() {\n\t\t\tself.measures = {};\n\t\t\tvar startTime = $tw.utils.timer(),\n\t\t\t\tresult = fn.apply(this,arguments);\n\t\t\tself.logger.log(name + \": \" + $tw.utils.timer(startTime).toFixed(2) + \"ms\");\n\t\t\tfor(var m in self.measures) {\n\t\t\t\tself.logger.log(\"+\" + m + \": \" + self.measures[m].toFixed(2) + \"ms\");\n\t\t\t}\n\t\t\treturn result;\n\t\t};\n\t} else {\n\t\treturn fn;\n\t}\n};\n\n/*\nWrap performance measurements around a subfunction\n*/\nPerformance.prototype.measure = function(name,fn) {\n\tvar self = this;\n\tif(this.enabled) {\n\t\treturn function() {\n\t\t\tvar startTime = $tw.utils.timer(),\n\t\t\t\tresult = fn.apply(this,arguments),\n\t\t\t\tvalue = self.measures[name] || 0;\n\t\t\tself.measures[name] = value + $tw.utils.timer(startTime);\n\t\t\treturn result;\n\t\t};\n\t} else {\n\t\treturn fn;\n\t}\n};\n\nexports.Performance = Performance;\n\n})();\n",
            "title": "$:/core/modules/utils/performance.js",
            "type": "application/javascript",
            "module-type": "global"
        },
        "$:/core/modules/utils/pluginmaker.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/pluginmaker.js\ntype: application/javascript\nmodule-type: utils\n\nA quick and dirty way to pack up plugins within the browser.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nRepack a plugin, and then delete any non-shadow payload tiddlers\n*/\nexports.repackPlugin = function(title,additionalTiddlers,excludeTiddlers) {\n\tadditionalTiddlers = additionalTiddlers || [];\n\texcludeTiddlers = excludeTiddlers || [];\n\t// Get the plugin tiddler\n\tvar pluginTiddler = $tw.wiki.getTiddler(title);\n\tif(!pluginTiddler) {\n\t\tthrow \"No such tiddler as \" + title;\n\t}\n\t// Extract the JSON\n\tvar jsonPluginTiddler;\n\ttry {\n\t\tjsonPluginTiddler = JSON.parse(pluginTiddler.fields.text);\n\t} catch(e) {\n\t\tthrow \"Cannot parse plugin tiddler \" + title + \"\\n\" + $tw.language.getString(\"Error/Caption\") + \": \" + e;\n\t}\n\t// Get the list of tiddlers\n\tvar tiddlers = Object.keys(jsonPluginTiddler.tiddlers);\n\t// Add the additional tiddlers\n\t$tw.utils.pushTop(tiddlers,additionalTiddlers);\n\t// Remove any excluded tiddlers\n\tfor(var t=tiddlers.length-1; t>=0; t--) {\n\t\tif(excludeTiddlers.indexOf(tiddlers[t]) !== -1) {\n\t\t\ttiddlers.splice(t,1);\n\t\t}\n\t}\n\t// Pack up the tiddlers into a block of JSON\n\tvar plugins = {};\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = $tw.wiki.getTiddler(title),\n\t\t\tfields = {};\n\t\t$tw.utils.each(tiddler.fields,function (value,name) {\n\t\t\tfields[name] = tiddler.getFieldString(name);\n\t\t});\n\t\tplugins[title] = fields;\n\t});\n\t// Retrieve and bump the version number\n\tvar pluginVersion = $tw.utils.parseVersion(pluginTiddler.getFieldString(\"version\") || \"0.0.0\") || {\n\t\t\tmajor: \"0\",\n\t\t\tminor: \"0\",\n\t\t\tpatch: \"0\"\n\t\t};\n\tpluginVersion.patch++;\n\tvar version = pluginVersion.major + \".\" + pluginVersion.minor + \".\" + pluginVersion.patch;\n\tif(pluginVersion.prerelease) {\n\t\tversion += \"-\" + pluginVersion.prerelease;\n\t}\n\tif(pluginVersion.build) {\n\t\tversion += \"+\" + pluginVersion.build;\n\t}\n\t// Save the tiddler\n\t$tw.wiki.addTiddler(new $tw.Tiddler(pluginTiddler,{text: JSON.stringify({tiddlers: plugins},null,4), version: version}));\n\t// Delete any non-shadow constituent tiddlers\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tif($tw.wiki.tiddlerExists(title)) {\n\t\t\t$tw.wiki.deleteTiddler(title);\n\t\t}\n\t});\n\t// Trigger an autosave\n\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n\t// Return a heartwarming confirmation\n\treturn \"Plugin \" + title + \" successfully saved\";\n};\n\n})();\n",
            "title": "$:/core/modules/utils/pluginmaker.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/utils/utils.js": {
            "text": "/*\\\ntitle: $:/core/modules/utils/utils.js\ntype: application/javascript\nmodule-type: utils\n\nVarious static utility functions.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nDisplay a warning, in colour if we're on a terminal\n*/\nexports.warning = function(text) {\n\tconsole.log($tw.node ? \"\\x1b[1;33m\" + text + \"\\x1b[0m\" : text);\n};\n\n/*\nRepeats a string\n*/\nexports.repeat = function(str,count) {\n\tvar result = \"\";\n\tfor(var t=0;t<count;t++) {\n\t\tresult += str;\n\t}\n\treturn result;\n};\n\n/*\nTrim whitespace from the start and end of a string\nThanks to Steven Levithan, http://blog.stevenlevithan.com/archives/faster-trim-javascript\n*/\nexports.trim = function(str) {\n\tif(typeof str === \"string\") {\n\t\treturn str.replace(/^\\s\\s*/, '').replace(/\\s\\s*$/, '');\n\t} else {\n\t\treturn str;\n\t}\n};\n\n/*\nFind the line break preceding a given position in a string\nReturns position immediately after that line break, or the start of the string\n*/\nexports.findPrecedingLineBreak = function(text,pos) {\n\tvar result = text.lastIndexOf(\"\\n\",pos - 1);\n\tif(result === -1) {\n\t\tresult = 0;\n\t} else {\n\t\tresult++;\n\t\tif(text.charAt(result) === \"\\r\") {\n\t\t\tresult++;\n\t\t}\n\t}\n\treturn result;\n};\n\n/*\nFind the line break following a given position in a string\n*/\nexports.findFollowingLineBreak = function(text,pos) {\n\t// Cut to just past the following line break, or to the end of the text\n\tvar result = text.indexOf(\"\\n\",pos);\n\tif(result === -1) {\n\t\tresult = text.length;\n\t} else {\n\t\tif(text.charAt(result) === \"\\r\") {\n\t\t\tresult++;\n\t\t}\n\t}\n\treturn result;\n};\n\n/*\nReturn the number of keys in an object\n*/\nexports.count = function(object) {\n\treturn Object.keys(object || {}).length;\n};\n\n/*\nCheck if an array is equal by value and by reference.\n*/\nexports.isArrayEqual = function(array1,array2) {\n\tif(array1 === array2) {\n\t\treturn true;\n\t}\n\tarray1 = array1 || [];\n\tarray2 = array2 || [];\n\tif(array1.length !== array2.length) {\n\t\treturn false;\n\t}\n\treturn array1.every(function(value,index) {\n\t\treturn value === array2[index];\n\t});\n};\n\n/*\nPush entries onto an array, removing them first if they already exist in the array\n\tarray: array to modify (assumed to be free of duplicates)\n\tvalue: a single value to push or an array of values to push\n*/\nexports.pushTop = function(array,value) {\n\tvar t,p;\n\tif($tw.utils.isArray(value)) {\n\t\t// Remove any array entries that are duplicated in the new values\n\t\tif(value.length !== 0) {\n\t\t\tif(array.length !== 0) {\n\t\t\t\tif(value.length < array.length) {\n\t\t\t\t\tfor(t=0; t<value.length; t++) {\n\t\t\t\t\t\tp = array.indexOf(value[t]);\n\t\t\t\t\t\tif(p !== -1) {\n\t\t\t\t\t\t\tarray.splice(p,1);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfor(t=array.length-1; t>=0; t--) {\n\t\t\t\t\t\tp = value.indexOf(array[t]);\n\t\t\t\t\t\tif(p !== -1) {\n\t\t\t\t\t\t\tarray.splice(t,1);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Push the values on top of the main array\n\t\t\tarray.push.apply(array,value);\n\t\t}\n\t} else {\n\t\tp = array.indexOf(value);\n\t\tif(p !== -1) {\n\t\t\tarray.splice(p,1);\n\t\t}\n\t\tarray.push(value);\n\t}\n\treturn array;\n};\n\n/*\nRemove entries from an array\n\tarray: array to modify\n\tvalue: a single value to remove, or an array of values to remove\n*/\nexports.removeArrayEntries = function(array,value) {\n\tvar t,p;\n\tif($tw.utils.isArray(value)) {\n\t\tfor(t=0; t<value.length; t++) {\n\t\t\tp = array.indexOf(value[t]);\n\t\t\tif(p !== -1) {\n\t\t\t\tarray.splice(p,1);\n\t\t\t}\n\t\t}\n\t} else {\n\t\tp = array.indexOf(value);\n\t\tif(p !== -1) {\n\t\t\tarray.splice(p,1);\n\t\t}\n\t}\n};\n\n/*\nCheck whether any members of a hashmap are present in another hashmap\n*/\nexports.checkDependencies = function(dependencies,changes) {\n\tvar hit = false;\n\t$tw.utils.each(changes,function(change,title) {\n\t\tif($tw.utils.hop(dependencies,title)) {\n\t\t\thit = true;\n\t\t}\n\t});\n\treturn hit;\n};\n\nexports.extend = function(object /* [, src] */) {\n\t$tw.utils.each(Array.prototype.slice.call(arguments, 1), function(source) {\n\t\tif(source) {\n\t\t\tfor(var property in source) {\n\t\t\t\tobject[property] = source[property];\n\t\t\t}\n\t\t}\n\t});\n\treturn object;\n};\n\nexports.deepCopy = function(object) {\n\tvar result,t;\n\tif($tw.utils.isArray(object)) {\n\t\t// Copy arrays\n\t\tresult = object.slice(0);\n\t} else if(typeof object === \"object\") {\n\t\tresult = {};\n\t\tfor(t in object) {\n\t\t\tif(object[t] !== undefined) {\n\t\t\t\tresult[t] = $tw.utils.deepCopy(object[t]);\n\t\t\t}\n\t\t}\n\t} else {\n\t\tresult = object;\n\t}\n\treturn result;\n};\n\nexports.extendDeepCopy = function(object,extendedProperties) {\n\tvar result = $tw.utils.deepCopy(object),t;\n\tfor(t in extendedProperties) {\n\t\tif(extendedProperties[t] !== undefined) {\n\t\t\tresult[t] = $tw.utils.deepCopy(extendedProperties[t]);\n\t\t}\n\t}\n\treturn result;\n};\n\nexports.deepFreeze = function deepFreeze(object) {\n\tvar property, key;\n\tObject.freeze(object);\n\tfor(key in object) {\n\t\tproperty = object[key];\n\t\tif($tw.utils.hop(object,key) && (typeof property === \"object\") && !Object.isFrozen(property)) {\n\t\t\tdeepFreeze(property);\n\t\t}\n\t}\n};\n\nexports.slowInSlowOut = function(t) {\n\treturn (1 - ((Math.cos(t * Math.PI) + 1) / 2));\n};\n\nexports.formatDateString = function(date,template) {\n\tvar result = \"\",\n\t\tt = template,\n\t\tmatches = [\n\t\t\t[/^0hh12/, function() {\n\t\t\t\treturn $tw.utils.pad($tw.utils.getHours12(date));\n\t\t\t}],\n\t\t\t[/^wYYYY/, function() {\n\t\t\t\treturn $tw.utils.getYearForWeekNo(date);\n\t\t\t}],\n\t\t\t[/^hh12/, function() {\n\t\t\t\treturn $tw.utils.getHours12(date);\n\t\t\t}],\n\t\t\t[/^DDth/, function() {\n\t\t\t\treturn date.getDate() + $tw.utils.getDaySuffix(date);\n\t\t\t}],\n\t\t\t[/^YYYY/, function() {\n\t\t\t\treturn date.getFullYear();\n\t\t\t}],\n\t\t\t[/^0hh/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getHours());\n\t\t\t}],\n\t\t\t[/^0mm/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getMinutes());\n\t\t\t}],\n\t\t\t[/^0ss/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getSeconds());\n\t\t\t}],\n\t\t\t[/^0DD/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getDate());\n\t\t\t}],\n\t\t\t[/^0MM/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getMonth()+1);\n\t\t\t}],\n\t\t\t[/^0WW/, function() {\n\t\t\t\treturn $tw.utils.pad($tw.utils.getWeek(date));\n\t\t\t}],\n\t\t\t[/^ddd/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Short/Day/\" + date.getDay());\n\t\t\t}],\n\t\t\t[/^mmm/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Short/Month/\" + (date.getMonth() + 1));\n\t\t\t}],\n\t\t\t[/^DDD/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Long/Day/\" + date.getDay());\n\t\t\t}],\n\t\t\t[/^MMM/, function() {\n\t\t\t\treturn $tw.language.getString(\"Date/Long/Month/\" + (date.getMonth() + 1));\n\t\t\t}],\n\t\t\t[/^TZD/, function() {\n\t\t\t\tvar tz = date.getTimezoneOffset(),\n\t\t\t\tatz = Math.abs(tz);\n\t\t\t\treturn (tz < 0 ? '+' : '-') + $tw.utils.pad(Math.floor(atz / 60)) + ':' + $tw.utils.pad(atz % 60);\n\t\t\t}],\n\t\t\t[/^wYY/, function() {\n\t\t\t\treturn $tw.utils.pad($tw.utils.getYearForWeekNo(date) - 2000);\n\t\t\t}],\n\t\t\t[/^[ap]m/, function() {\n\t\t\t\treturn $tw.utils.getAmPm(date).toLowerCase();\n\t\t\t}],\n\t\t\t[/^hh/, function() {\n\t\t\t\treturn date.getHours();\n\t\t\t}],\n\t\t\t[/^mm/, function() {\n\t\t\t\treturn date.getMinutes();\n\t\t\t}],\n\t\t\t[/^ss/, function() {\n\t\t\t\treturn date.getSeconds();\n\t\t\t}],\n\t\t\t[/^[AP]M/, function() {\n\t\t\t\treturn $tw.utils.getAmPm(date).toUpperCase();\n\t\t\t}],\n\t\t\t[/^DD/, function() {\n\t\t\t\treturn date.getDate();\n\t\t\t}],\n\t\t\t[/^MM/, function() {\n\t\t\t\treturn date.getMonth() + 1;\n\t\t\t}],\n\t\t\t[/^WW/, function() {\n\t\t\t\treturn $tw.utils.getWeek(date);\n\t\t\t}],\n\t\t\t[/^YY/, function() {\n\t\t\t\treturn $tw.utils.pad(date.getFullYear() - 2000);\n\t\t\t}]\n\t\t];\n\twhile(t.length){\n\t\tvar matchString = \"\";\n\t\t$tw.utils.each(matches, function(m) {\n\t\t\tvar match = m[0].exec(t);\n\t\t\tif(match) {\n\t\t\t\tmatchString = m[1].call();\n\t\t\t\tt = t.substr(match[0].length);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t});\n\t\tif(matchString) {\n\t\t\tresult += matchString;\n\t\t} else {\n\t\t\tresult += t.charAt(0);\n\t\t\tt = t.substr(1);\n\t\t}\n\t}\n\tresult = result.replace(/\\\\(.)/g,\"$1\");\n\treturn result;\n};\n\nexports.getAmPm = function(date) {\n\treturn $tw.language.getString(\"Date/Period/\" + (date.getHours() >= 12 ? \"pm\" : \"am\"));\n};\n\nexports.getDaySuffix = function(date) {\n\treturn $tw.language.getString(\"Date/DaySuffix/\" + date.getDate());\n};\n\nexports.getWeek = function(date) {\n\tvar dt = new Date(date.getTime());\n\tvar d = dt.getDay();\n\tif(d === 0) {\n\t\td = 7; // JavaScript Sun=0, ISO Sun=7\n\t}\n\tdt.setTime(dt.getTime() + (4 - d) * 86400000);// shift day to Thurs of same week to calculate weekNo\n\tvar n = Math.floor((dt.getTime()-new Date(dt.getFullYear(),0,1) + 3600000) / 86400000);\n\treturn Math.floor(n / 7) + 1;\n};\n\nexports.getYearForWeekNo = function(date) {\n\tvar dt = new Date(date.getTime());\n\tvar d = dt.getDay();\n\tif(d === 0) {\n\t\td = 7; // JavaScript Sun=0, ISO Sun=7\n\t}\n\tdt.setTime(dt.getTime() + (4 - d) * 86400000);// shift day to Thurs of same week\n\treturn dt.getFullYear();\n};\n\nexports.getHours12 = function(date) {\n\tvar h = date.getHours();\n\treturn h > 12 ? h-12 : ( h > 0 ? h : 12 );\n};\n\n/*\nConvert a date delta in milliseconds into a string representation of \"23 seconds ago\", \"27 minutes ago\" etc.\n\tdelta: delta in milliseconds\nReturns an object with these members:\n\tdescription: string describing the delta period\n\tupdatePeriod: time in millisecond until the string will be inaccurate\n*/\nexports.getRelativeDate = function(delta) {\n\tvar futurep = false;\n\tif(delta < 0) {\n\t\tdelta = -1 * delta;\n\t\tfuturep = true;\n\t}\n\tvar units = [\n\t\t{name: \"Years\",   duration:      365 * 24 * 60 * 60 * 1000},\n\t\t{name: \"Months\",  duration: (365/12) * 24 * 60 * 60 * 1000},\n\t\t{name: \"Days\",    duration:            24 * 60 * 60 * 1000},\n\t\t{name: \"Hours\",   duration:                 60 * 60 * 1000},\n\t\t{name: \"Minutes\", duration:                      60 * 1000},\n\t\t{name: \"Seconds\", duration:                           1000}\n\t];\n\tfor(var t=0; t<units.length; t++) {\n\t\tvar result = Math.floor(delta / units[t].duration);\n\t\tif(result >= 2) {\n\t\t\treturn {\n\t\t\t\tdelta: delta,\n\t\t\t\tdescription: $tw.language.getString(\n\t\t\t\t\t\"RelativeDate/\" + (futurep ? \"Future\" : \"Past\") + \"/\" + units[t].name,\n\t\t\t\t\t{variables:\n\t\t\t\t\t\t{period: result.toString()}\n\t\t\t\t\t}\n\t\t\t\t),\n\t\t\t\tupdatePeriod: units[t].duration\n\t\t\t};\n\t\t}\n\t}\n\treturn {\n\t\tdelta: delta,\n\t\tdescription: $tw.language.getString(\n\t\t\t\"RelativeDate/\" + (futurep ? \"Future\" : \"Past\") + \"/Second\",\n\t\t\t{variables:\n\t\t\t\t{period: \"1\"}\n\t\t\t}\n\t\t),\n\t\tupdatePeriod: 1000\n\t};\n};\n\n// Convert & to \"&amp;\", < to \"&lt;\", > to \"&gt;\", \" to \"&quot;\"\nexports.htmlEncode = function(s) {\n\tif(s) {\n\t\treturn s.toString().replace(/&/mg,\"&amp;\").replace(/</mg,\"&lt;\").replace(/>/mg,\"&gt;\").replace(/\\\"/mg,\"&quot;\");\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\n// Converts all HTML entities to their character equivalents\nexports.entityDecode = function(s) {\n\tvar converter = String.fromCodePoint || String.fromCharCode,\n\t\te = s.substr(1,s.length-2); // Strip the & and the ;\n\tif(e.charAt(0) === \"#\") {\n\t\tif(e.charAt(1) === \"x\" || e.charAt(1) === \"X\") {\n\t\t\treturn converter(parseInt(e.substr(2),16));\t\n\t\t} else {\n\t\t\treturn converter(parseInt(e.substr(1),10));\n\t\t}\n\t} else {\n\t\tvar c = $tw.config.htmlEntities[e];\n\t\tif(c) {\n\t\t\treturn converter(c);\n\t\t} else {\n\t\t\treturn s; // Couldn't convert it as an entity, just return it raw\n\t\t}\n\t}\n};\n\nexports.unescapeLineBreaks = function(s) {\n\treturn s.replace(/\\\\n/mg,\"\\n\").replace(/\\\\b/mg,\" \").replace(/\\\\s/mg,\"\\\\\").replace(/\\r/mg,\"\");\n};\n\n/*\n * Returns an escape sequence for given character. Uses \\x for characters <=\n * 0xFF to save space, \\u for the rest.\n *\n * The code needs to be in sync with th code template in the compilation\n * function for \"action\" nodes.\n */\n// Copied from peg.js, thanks to David Majda\nexports.escape = function(ch) {\n\tvar charCode = ch.charCodeAt(0);\n\tif(charCode <= 0xFF) {\n\t\treturn '\\\\x' + $tw.utils.pad(charCode.toString(16).toUpperCase());\n\t} else {\n\t\treturn '\\\\u' + $tw.utils.pad(charCode.toString(16).toUpperCase(),4);\n\t}\n};\n\n// Turns a string into a legal JavaScript string\n// Copied from peg.js, thanks to David Majda\nexports.stringify = function(s) {\n\t/*\n\t* ECMA-262, 5th ed., 7.8.4: All characters may appear literally in a string\n\t* literal except for the closing quote character, backslash, carriage return,\n\t* line separator, paragraph separator, and line feed. Any character may\n\t* appear in the form of an escape sequence.\n\t*\n\t* For portability, we also escape all non-ASCII characters.\n\t*/\n\treturn (s || \"\")\n\t\t.replace(/\\\\/g, '\\\\\\\\')            // backslash\n\t\t.replace(/\"/g, '\\\\\"')              // double quote character\n\t\t.replace(/'/g, \"\\\\'\")              // single quote character\n\t\t.replace(/\\r/g, '\\\\r')             // carriage return\n\t\t.replace(/\\n/g, '\\\\n')             // line feed\n\t\t.replace(/[\\x80-\\uFFFF]/g, exports.escape); // non-ASCII characters\n};\n\n/*\nEscape the RegExp special characters with a preceding backslash\n*/\nexports.escapeRegExp = function(s) {\n    return s.replace(/[\\-\\/\\\\\\^\\$\\*\\+\\?\\.\\(\\)\\|\\[\\]\\{\\}]/g, '\\\\$&');\n};\n\n// Checks whether a link target is external, i.e. not a tiddler title\nexports.isLinkExternal = function(to) {\n\tvar externalRegExp = /^(?:file|http|https|mailto|ftp|irc|news|data|skype):[^\\s<>{}\\[\\]`|\"\\\\^]+(?:\\/|\\b)/i;\n\treturn externalRegExp.test(to);\n};\n\nexports.nextTick = function(fn) {\n/*global window: false */\n\tif(typeof process === \"undefined\") {\n\t\t// Apparently it would be faster to use postMessage - http://dbaron.org/log/20100309-faster-timeouts\n\t\twindow.setTimeout(fn,4);\n\t} else {\n\t\tprocess.nextTick(fn);\n\t}\n};\n\n/*\nConvert a hyphenated CSS property name into a camel case one\n*/\nexports.unHyphenateCss = function(propName) {\n\treturn propName.replace(/-([a-z])/gi, function(match0,match1) {\n\t\treturn match1.toUpperCase();\n\t});\n};\n\n/*\nConvert a camelcase CSS property name into a dashed one (\"backgroundColor\" --> \"background-color\")\n*/\nexports.hyphenateCss = function(propName) {\n\treturn propName.replace(/([A-Z])/g, function(match0,match1) {\n\t\treturn \"-\" + match1.toLowerCase();\n\t});\n};\n\n/*\nParse a text reference of one of these forms:\n* title\n* !!field\n* title!!field\n* title##index\n* etc\nReturns an object with the following fields, all optional:\n* title: tiddler title\n* field: tiddler field name\n* index: JSON property index\n*/\nexports.parseTextReference = function(textRef) {\n\t// Separate out the title, field name and/or JSON indices\n\tvar reTextRef = /(?:(.*?)!!(.+))|(?:(.*?)##(.+))|(.*)/mg,\n\t\tmatch = reTextRef.exec(textRef),\n\t\tresult = {};\n\tif(match && reTextRef.lastIndex === textRef.length) {\n\t\t// Return the parts\n\t\tif(match[1]) {\n\t\t\tresult.title = match[1];\n\t\t}\n\t\tif(match[2]) {\n\t\t\tresult.field = match[2];\n\t\t}\n\t\tif(match[3]) {\n\t\t\tresult.title = match[3];\n\t\t}\n\t\tif(match[4]) {\n\t\t\tresult.index = match[4];\n\t\t}\n\t\tif(match[5]) {\n\t\t\tresult.title = match[5];\n\t\t}\n\t} else {\n\t\t// If we couldn't parse it\n\t\tresult.title = textRef\n\t}\n\treturn result;\n};\n\n/*\nChecks whether a string is a valid fieldname\n*/\nexports.isValidFieldName = function(name) {\n\tif(!name || typeof name !== \"string\") {\n\t\treturn false;\n\t}\n\tname = name.toLowerCase().trim();\n\tvar fieldValidatorRegEx = /^[a-z0-9\\-\\._]+$/mg;\n\treturn fieldValidatorRegEx.test(name);\n};\n\n/*\nExtract the version number from the meta tag or from the boot file\n*/\n\n// Browser version\nexports.extractVersionInfo = function() {\n\tif($tw.packageInfo) {\n\t\treturn $tw.packageInfo.version;\n\t} else {\n\t\tvar metatags = document.getElementsByTagName(\"meta\");\n\t\tfor(var t=0; t<metatags.length; t++) {\n\t\t\tvar m = metatags[t];\n\t\t\tif(m.name === \"tiddlywiki-version\") {\n\t\t\t\treturn m.content;\n\t\t\t}\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nGet the animation duration in ms\n*/\nexports.getAnimationDuration = function() {\n\treturn parseInt($tw.wiki.getTiddlerText(\"$:/config/AnimationDuration\",\"400\"),10);\n};\n\n/*\nHash a string to a number\nDerived from http://stackoverflow.com/a/15710692\n*/\nexports.hashString = function(str) {\n\treturn str.split(\"\").reduce(function(a,b) {\n\t\ta = ((a << 5) - a) + b.charCodeAt(0);\n\t\treturn a & a;\n\t},0);\n};\n\n/*\nDecode a base64 string\n*/\nexports.base64Decode = function(string64) {\n\tif($tw.browser) {\n\t\t// TODO\n\t\tthrow \"$tw.utils.base64Decode() doesn't work in the browser\";\n\t} else {\n\t\treturn (new Buffer(string64,\"base64\")).toString();\n\t}\n};\n\n/*\nConvert a hashmap into a tiddler dictionary format sequence of name:value pairs\n*/\nexports.makeTiddlerDictionary = function(data) {\n\tvar output = [];\n\tfor(var name in data) {\n\t\toutput.push(name + \": \" + data[name]);\n\t}\n\treturn output.join(\"\\n\");\n};\n\n/*\nHigh resolution microsecond timer for profiling\n*/\nexports.timer = function(base) {\n\tvar m;\n\tif($tw.node) {\n\t\tvar r = process.hrtime();\t\t\n\t\tm =  r[0] * 1e3 + (r[1] / 1e6);\n\t} else if(window.performance) {\n\t\tm = performance.now();\n\t} else {\n\t\tm = Date.now();\n\t}\n\tif(typeof base !== \"undefined\") {\n\t\tm = m - base;\n\t}\n\treturn m;\n};\n\n/*\nConvert text and content type to a data URI\n*/\nexports.makeDataUri = function(text,type) {\n\ttype = type || \"text/vnd.tiddlywiki\";\n\tvar typeInfo = $tw.config.contentTypeInfo[type] || $tw.config.contentTypeInfo[\"text/plain\"],\n\t\tisBase64 = typeInfo.encoding === \"base64\",\n\t\tparts = [];\n\tparts.push(\"data:\");\n\tparts.push(type);\n\tparts.push(isBase64 ? \";base64\" : \"\");\n\tparts.push(\",\");\n\tparts.push(isBase64 ? text : encodeURIComponent(text));\n\treturn parts.join(\"\");\n};\n\n/*\nUseful for finding out the fully escaped CSS selector equivalent to a given tag. For example:\n\n$tw.utils.tagToCssSelector(\"$:/tags/Stylesheet\") --> tc-tagged-\\%24\\%3A\\%2Ftags\\%2FStylesheet\n*/\nexports.tagToCssSelector = function(tagName) {\n\treturn \"tc-tagged-\" + encodeURIComponent(tagName).replace(/[!\"#$%&'()*+,\\-./:;<=>?@[\\\\\\]^`{\\|}~,]/mg,function(c) {\n\t\treturn \"\\\\\" + c;\n\t});\n};\n\n\n/*\nIE does not have sign function\n*/\nexports.sign = Math.sign || function(x) {\n\tx = +x; // convert to a number\n\tif (x === 0 || isNaN(x)) {\n\t\treturn x;\n\t}\n\treturn x > 0 ? 1 : -1;\n};\n\n/*\nIE does not have an endsWith function\n*/\nexports.strEndsWith = function(str,ending,position) {\n\tif(str.endsWith) {\n\t\treturn str.endsWith(ending,position);\n\t} else {\n\t\tif (typeof position !== 'number' || !isFinite(position) || Math.floor(position) !== position || position > str.length) {\n\t\t\tposition = str.length;\n\t\t}\n\t\tposition -= str.length;\n\t\tvar lastIndex = str.indexOf(ending, position);\n\t\treturn lastIndex !== -1 && lastIndex === position;\n\t}\n};\n\n})();\n",
            "title": "$:/core/modules/utils/utils.js",
            "type": "application/javascript",
            "module-type": "utils"
        },
        "$:/core/modules/widgets/action-deletefield.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-deletefield.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to delete fields of a tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar DeleteFieldWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nDeleteFieldWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nDeleteFieldWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nDeleteFieldWidget.prototype.execute = function() {\n\tthis.actionTiddler = this.getAttribute(\"$tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.actionField = this.getAttribute(\"$field\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nDeleteFieldWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$tiddler\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nDeleteFieldWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar self = this,\n\t\ttiddler = this.wiki.getTiddler(self.actionTiddler),\n\t\tremoveFields = {};\n\tif(this.actionField) {\n\t\tremoveFields[this.actionField] = undefined;\n\t}\n\tif(tiddler) {\n\t\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\t\tif(name.charAt(0) !== \"$\" && name !== \"title\") {\n\t\t\t\tremoveFields[name] = undefined;\n\t\t\t}\n\t\t});\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getModificationFields(),tiddler,removeFields,this.wiki.getCreationFields()));\n\t}\n\treturn true; // Action was invoked\n};\n\nexports[\"action-deletefield\"] = DeleteFieldWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-deletefield.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-deletetiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-deletetiddler.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to delete a tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar DeleteTiddlerWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nDeleteTiddlerWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nDeleteTiddlerWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nDeleteTiddlerWidget.prototype.execute = function() {\n\tthis.actionFilter = this.getAttribute(\"$filter\");\n\tthis.actionTiddler = this.getAttribute(\"$tiddler\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nDeleteTiddlerWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$filter\"] || changedAttributes[\"$tiddler\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nDeleteTiddlerWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar tiddlers = [];\n\tif(this.actionFilter) {\n\t\ttiddlers = this.wiki.filterTiddlers(this.actionFilter,this);\n\t}\n\tif(this.actionTiddler) {\n\t\ttiddlers.push(this.actionTiddler);\n\t}\n\tfor(var t=0; t<tiddlers.length; t++) {\n\t\tthis.wiki.deleteTiddler(tiddlers[t]);\n\t}\n\treturn true; // Action was invoked\n};\n\nexports[\"action-deletetiddler\"] = DeleteTiddlerWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-deletetiddler.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-listops.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-listops.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to apply list operations to any tiddler field (defaults to the 'list' field of the current tiddler)\n\n\\*/\n(function() {\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\nvar ActionListopsWidget = function(parseTreeNode, options) {\n\tthis.initialise(parseTreeNode, options);\n};\n/**\n * Inherit from the base widget class\n */\nActionListopsWidget.prototype = new Widget();\n/**\n * Render this widget into the DOM\n */\nActionListopsWidget.prototype.render = function(parent, nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n/**\n * Compute the internal state of the widget\n */\nActionListopsWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.target = this.getAttribute(\"$tiddler\", this.getVariable(\n\t\t\"currentTiddler\"));\n\tthis.filter = this.getAttribute(\"$filter\");\n\tthis.subfilter = this.getAttribute(\"$subfilter\");\n\tthis.listField = this.getAttribute(\"$field\", \"list\");\n\tthis.listIndex = this.getAttribute(\"$index\");\n\tthis.filtertags = this.getAttribute(\"$tags\");\n};\n/**\n * \tRefresh the widget by ensuring our attributes are up to date\n */\nActionListopsWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.$tiddler || changedAttributes.$filter ||\n\t\tchangedAttributes.$subfilter || changedAttributes.$field ||\n\t\tchangedAttributes.$index || changedAttributes.$tags) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n/**\n * \tInvoke the action associated with this widget\n */\nActionListopsWidget.prototype.invokeAction = function(triggeringWidget,\n\tevent) {\n\t//Apply the specified filters to the lists\n\tvar field = this.listField,\n\t\tindex,\n\t\ttype = \"!!\",\n\t\tlist = this.listField;\n\tif(this.listIndex) {\n\t\tfield = undefined;\n\t\tindex = this.listIndex;\n\t\ttype = \"##\";\n\t\tlist = this.listIndex;\n\t}\n\tif(this.filter) {\n\t\tthis.wiki.setText(this.target, field, index, $tw.utils.stringifyList(\n\t\t\tthis.wiki\n\t\t\t.filterTiddlers(this.filter, this)));\n\t}\n\tif(this.subfilter) {\n\t\tvar subfilter = \"[list[\" + this.target + type + list + \"]] \" + this.subfilter;\n\t\tthis.wiki.setText(this.target, field, index, $tw.utils.stringifyList(\n\t\t\tthis.wiki\n\t\t\t.filterTiddlers(subfilter, this)));\n\t}\n\tif(this.filtertags) {\n\t\tvar tagfilter = \"[list[\" + this.target + \"!!tags]] \" + this.filtertags;\n\t\tthis.wiki.setText(this.target, \"tags\", undefined, $tw.utils.stringifyList(\n\t\t\tthis.wiki.filterTiddlers(tagfilter, this)));\n\t}\n\treturn true; // Action was invoked\n};\n\nexports[\"action-listops\"] = ActionListopsWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-listops.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-navigate.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-navigate.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to navigate to a tiddler\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar NavigateWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nNavigateWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nNavigateWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nNavigateWidget.prototype.execute = function() {\n\tthis.actionTo = this.getAttribute(\"$to\");\n\tthis.actionScroll = this.getAttribute(\"$scroll\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nNavigateWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$to\"] || changedAttributes[\"$scroll\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nNavigateWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar bounds = triggeringWidget && triggeringWidget.getBoundingClientRect && triggeringWidget.getBoundingClientRect(),\n\t\tsuppressNavigation = event.metaKey || event.ctrlKey || (event.button === 1);\n\tif(this.actionScroll === \"yes\") {\n\t\tsuppressNavigation = false;\n\t} else if(this.actionScroll === \"no\") {\n\t\tsuppressNavigation = true;\n\t}\n\tthis.dispatchEvent({\n\t\ttype: \"tm-navigate\",\n\t\tnavigateTo: this.actionTo === undefined ? this.getVariable(\"currentTiddler\") : this.actionTo,\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\"),\n\t\tnavigateFromNode: triggeringWidget,\n\t\tnavigateFromClientRect: bounds && { top: bounds.top, left: bounds.left, width: bounds.width, right: bounds.right, bottom: bounds.bottom, height: bounds.height\n\t\t},\n\t\tnavigateSuppressNavigation: suppressNavigation\n\t});\n\treturn true; // Action was invoked\n};\n\nexports[\"action-navigate\"] = NavigateWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-navigate.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-sendmessage.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-sendmessage.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to send a message\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SendMessageWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSendMessageWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSendMessageWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nSendMessageWidget.prototype.execute = function() {\n\tthis.actionMessage = this.getAttribute(\"$message\");\n\tthis.actionParam = this.getAttribute(\"$param\");\n\tthis.actionName = this.getAttribute(\"$name\");\n\tthis.actionValue = this.getAttribute(\"$value\",\"\");\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nSendMessageWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(Object.keys(changedAttributes).length) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nSendMessageWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\t// Get the string parameter\n\tvar param = this.actionParam;\n\t// Assemble the attributes as a hashmap\n\tvar paramObject = Object.create(null);\n\tvar count = 0;\n\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\tif(name.charAt(0) !== \"$\") {\n\t\t\tparamObject[name] = attribute;\n\t\t\tcount++;\n\t\t}\n\t});\n\t// Add name/value pair if present\n\tif(this.actionName) {\n\t\tparamObject[this.actionName] = this.actionValue;\n\t}\n\t// Dispatch the message\n\tthis.dispatchEvent({\n\t\ttype: this.actionMessage,\n\t\tparam: param,\n\t\tparamObject: paramObject,\n\t\ttiddlerTitle: this.getVariable(\"currentTiddler\"),\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\")\n\t});\n\treturn true; // Action was invoked\n};\n\nexports[\"action-sendmessage\"] = SendMessageWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-sendmessage.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/action-setfield.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/action-setfield.js\ntype: application/javascript\nmodule-type: widget\n\nAction widget to set a single field or index on a tiddler.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SetFieldWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSetFieldWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSetFieldWidget.prototype.render = function(parent,nextSibling) {\n\tthis.computeAttributes();\n\tthis.execute();\n};\n\n/*\nCompute the internal state of the widget\n*/\nSetFieldWidget.prototype.execute = function() {\n\tthis.actionTiddler = this.getAttribute(\"$tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.actionField = this.getAttribute(\"$field\");\n\tthis.actionIndex = this.getAttribute(\"$index\");\n\tthis.actionValue = this.getAttribute(\"$value\");\n\tthis.actionTimestamp = this.getAttribute(\"$timestamp\",\"yes\") === \"yes\";\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nSetFieldWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"$tiddler\"] || changedAttributes[\"$field\"] || changedAttributes[\"$index\"] || changedAttributes[\"$value\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nInvoke the action associated with this widget\n*/\nSetFieldWidget.prototype.invokeAction = function(triggeringWidget,event) {\n\tvar self = this,\n\t\toptions = {};\n\toptions.suppressTimestamp = !this.actionTimestamp;\n\tif((typeof this.actionField == \"string\") || (typeof this.actionIndex == \"string\")  || (typeof this.actionValue == \"string\")) {\n\t\tthis.wiki.setText(this.actionTiddler,this.actionField,this.actionIndex,this.actionValue,options);\n\t}\n\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\tif(name.charAt(0) !== \"$\") {\n\t\t\tself.wiki.setText(self.actionTiddler,name,undefined,attribute,options);\n\t\t}\n\t});\n\treturn true; // Action was invoked\n};\n\nexports[\"action-setfield\"] = SetFieldWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/action-setfield.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/browse.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/browse.js\ntype: application/javascript\nmodule-type: widget\n\nBrowse widget for browsing for files to import\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar BrowseWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nBrowseWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nBrowseWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar domNode = this.document.createElement(\"input\");\n\tdomNode.setAttribute(\"type\",\"file\");\n\tif(this.browseMultiple) {\n\t\tdomNode.setAttribute(\"multiple\",\"multiple\");\n\t}\n\tif(this.tooltip) {\n\t\tdomNode.setAttribute(\"title\",this.tooltip);\n\t}\n\t// Nw.js supports \"nwsaveas\" to force a \"save as\" dialogue that allows a new or existing file to be selected\n\tif(this.nwsaveas) {\n\t\tdomNode.setAttribute(\"nwsaveas\",this.nwsaveas);\n\t}\n\t// Nw.js supports \"webkitdirectory\" to allow a directory to be selected\n\tif(this.webkitdirectory) {\n\t\tdomNode.setAttribute(\"webkitdirectory\",this.webkitdirectory);\n\t}\n\t// Add a click event handler\n\tdomNode.addEventListener(\"change\",function (event) {\n\t\tif(self.message) {\n\t\t\tself.dispatchEvent({type: self.message, param: self.param, files: event.target.files});\n\t\t} else {\n\t\t\tself.wiki.readFiles(event.target.files,function(tiddlerFieldsArray) {\n\t\t\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify(tiddlerFieldsArray)});\n\t\t\t});\n\t\t}\n\t\treturn false;\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nBrowseWidget.prototype.execute = function() {\n\tthis.browseMultiple = this.getAttribute(\"multiple\");\n\tthis.message = this.getAttribute(\"message\");\n\tthis.param = this.getAttribute(\"param\");\n\tthis.tooltip = this.getAttribute(\"tooltip\");\n\tthis.nwsaveas = this.getAttribute(\"nwsaveas\");\n\tthis.webkitdirectory = this.getAttribute(\"webkitdirectory\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nBrowseWidget.prototype.refresh = function(changedTiddlers) {\n\treturn false;\n};\n\nexports.browse = BrowseWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/browse.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/button.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/button.js\ntype: application/javascript\nmodule-type: widget\n\nButton widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ButtonWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nButtonWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nButtonWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar tag = \"button\";\n\tif(this.buttonTag && $tw.config.htmlUnsafeElements.indexOf(this.buttonTag) === -1) {\n\t\ttag = this.buttonTag;\n\t}\n\tvar domNode = this.document.createElement(tag);\n\t// Assign classes\n\tvar classes = this[\"class\"].split(\" \") || [],\n\t\tisPoppedUp = this.popup && this.isPoppedUp();\n\tif(this.selectedClass) {\n\t\tif(this.set && this.setTo && this.isSelected()) {\n\t\t\t$tw.utils.pushTop(classes,this.selectedClass.split(\" \"));\n\t\t}\n\t\tif(isPoppedUp) {\n\t\t\t$tw.utils.pushTop(classes,this.selectedClass.split(\" \"));\n\t\t}\n\t}\n\tif(isPoppedUp) {\n\t\t$tw.utils.pushTop(classes,\"tc-popup-handle\");\n\t}\n\tdomNode.className = classes.join(\" \");\n\t// Assign other attributes\n\tif(this.style) {\n\t\tdomNode.setAttribute(\"style\",this.style);\n\t}\n\tif(this.tooltip) {\n\t\tdomNode.setAttribute(\"title\",this.tooltip);\n\t}\n\tif(this[\"aria-label\"]) {\n\t\tdomNode.setAttribute(\"aria-label\",this[\"aria-label\"]);\n\t}\n\t// Add a click event handler\n\tdomNode.addEventListener(\"click\",function (event) {\n\t\tvar handled = false;\n\t\tif(self.invokeActions(this,event)) {\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.to) {\n\t\t\tself.navigateTo(event);\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.message) {\n\t\t\tself.dispatchMessage(event);\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.popup) {\n\t\t\tself.triggerPopup(event);\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.set) {\n\t\t\tself.setTiddler();\n\t\t\thandled = true;\n\t\t}\n\t\tif(self.actions) {\n\t\t\tself.invokeActionString(self.actions,self,event);\n\t\t}\n\t\tif(handled) {\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n\t\t}\n\t\treturn handled;\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nWe don't allow actions to propagate because we trigger actions ourselves\n*/\nButtonWidget.prototype.allowActionPropagation = function() {\n\treturn false;\n};\n\nButtonWidget.prototype.getBoundingClientRect = function() {\n\treturn this.domNodes[0].getBoundingClientRect();\n};\n\nButtonWidget.prototype.isSelected = function() {\n    return this.wiki.getTextReference(this.set,this.defaultSetValue,this.getVariable(\"currentTiddler\")) === this.setTo;\n};\n\nButtonWidget.prototype.isPoppedUp = function() {\n\tvar tiddler = this.wiki.getTiddler(this.popup);\n\tvar result = tiddler && tiddler.fields.text ? $tw.popup.readPopupState(tiddler.fields.text) : false;\n\treturn result;\n};\n\nButtonWidget.prototype.navigateTo = function(event) {\n\tvar bounds = this.getBoundingClientRect();\n\tthis.dispatchEvent({\n\t\ttype: \"tm-navigate\",\n\t\tnavigateTo: this.to,\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\"),\n\t\tnavigateFromNode: this,\n\t\tnavigateFromClientRect: { top: bounds.top, left: bounds.left, width: bounds.width, right: bounds.right, bottom: bounds.bottom, height: bounds.height\n\t\t},\n\t\tnavigateSuppressNavigation: event.metaKey || event.ctrlKey || (event.button === 1)\n\t});\n};\n\nButtonWidget.prototype.dispatchMessage = function(event) {\n\tthis.dispatchEvent({type: this.message, param: this.param, tiddlerTitle: this.getVariable(\"currentTiddler\")});\n};\n\nButtonWidget.prototype.triggerPopup = function(event) {\n\t$tw.popup.triggerPopup({\n\t\tdomNode: this.domNodes[0],\n\t\ttitle: this.popup,\n\t\twiki: this.wiki\n\t});\n};\n\nButtonWidget.prototype.setTiddler = function() {\n\tthis.wiki.setTextReference(this.set,this.setTo,this.getVariable(\"currentTiddler\"));\n};\n\n/*\nCompute the internal state of the widget\n*/\nButtonWidget.prototype.execute = function() {\n\t// Get attributes\n\tthis.actions = this.getAttribute(\"actions\");\n\tthis.to = this.getAttribute(\"to\");\n\tthis.message = this.getAttribute(\"message\");\n\tthis.param = this.getAttribute(\"param\");\n\tthis.set = this.getAttribute(\"set\");\n\tthis.setTo = this.getAttribute(\"setTo\");\n\tthis.popup = this.getAttribute(\"popup\");\n\tthis.hover = this.getAttribute(\"hover\");\n\tthis[\"class\"] = this.getAttribute(\"class\",\"\");\n\tthis[\"aria-label\"] = this.getAttribute(\"aria-label\");\n\tthis.tooltip = this.getAttribute(\"tooltip\");\n\tthis.style = this.getAttribute(\"style\");\n\tthis.selectedClass = this.getAttribute(\"selectedClass\");\n\tthis.defaultSetValue = this.getAttribute(\"default\",\"\");\n\tthis.buttonTag = this.getAttribute(\"tag\");\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nButtonWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.to || changedAttributes.message || changedAttributes.param || changedAttributes.set || changedAttributes.setTo || changedAttributes.popup || changedAttributes.hover || changedAttributes[\"class\"] || changedAttributes.selectedClass || changedAttributes.style || (this.set && changedTiddlers[this.set]) || (this.popup && changedTiddlers[this.popup])) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.button = ButtonWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/button.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/checkbox.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/checkbox.js\ntype: application/javascript\nmodule-type: widget\n\nCheckbox widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar CheckboxWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nCheckboxWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nCheckboxWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Create our elements\n\tthis.labelDomNode = this.document.createElement(\"label\");\n\tthis.labelDomNode.setAttribute(\"class\",this.checkboxClass);\n\tthis.inputDomNode = this.document.createElement(\"input\");\n\tthis.inputDomNode.setAttribute(\"type\",\"checkbox\");\n\tif(this.getValue()) {\n\t\tthis.inputDomNode.setAttribute(\"checked\",\"true\");\n\t}\n\tthis.labelDomNode.appendChild(this.inputDomNode);\n\tthis.spanDomNode = this.document.createElement(\"span\");\n\tthis.labelDomNode.appendChild(this.spanDomNode);\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(this.inputDomNode,[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n\t// Insert the label into the DOM and render any children\n\tparent.insertBefore(this.labelDomNode,nextSibling);\n\tthis.renderChildren(this.spanDomNode,null);\n\tthis.domNodes.push(this.labelDomNode);\n};\n\nCheckboxWidget.prototype.getValue = function() {\n\tvar tiddler = this.wiki.getTiddler(this.checkboxTitle);\n\tif(tiddler) {\n\t\tif(this.checkboxTag) {\n\t\t\tif(this.checkboxInvertTag) {\n\t\t\t\treturn !tiddler.hasTag(this.checkboxTag);\n\t\t\t} else {\n\t\t\t\treturn tiddler.hasTag(this.checkboxTag);\n\t\t\t}\n\t\t}\n\t\tif(this.checkboxField) {\n\t\t\tvar value = tiddler.fields[this.checkboxField] || this.checkboxDefault || \"\";\n\t\t\tif(value === this.checkboxChecked) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tif(value === this.checkboxUnchecked) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif(this.checkboxTag) {\n\t\t\treturn false;\n\t\t}\n\t\tif(this.checkboxField) {\n\t\t\tif(this.checkboxDefault === this.checkboxChecked) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tif(this.checkboxDefault === this.checkboxUnchecked) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n};\n\nCheckboxWidget.prototype.handleChangeEvent = function(event) {\n\tvar checked = this.inputDomNode.checked,\n\t\ttiddler = this.wiki.getTiddler(this.checkboxTitle),\n\t\tfallbackFields = {text: \"\"},\n\t\tnewFields = {title: this.checkboxTitle},\n\t\thasChanged = false,\n\t\ttagCheck = false,\n\t\thasTag = tiddler && tiddler.hasTag(this.checkboxTag);\n\tif(this.checkboxTag && this.checkboxInvertTag === \"yes\") {\n\t\ttagCheck = hasTag === checked;\n\t} else {\n\t\ttagCheck = hasTag !== checked;\n\t}\n\t// Set the tag if specified\n\tif(this.checkboxTag && (!tiddler || tagCheck)) {\n\t\tnewFields.tags = tiddler ? (tiddler.fields.tags || []).slice(0) : [];\n\t\tvar pos = newFields.tags.indexOf(this.checkboxTag);\n\t\tif(pos !== -1) {\n\t\t\tnewFields.tags.splice(pos,1);\n\t\t}\n\t\tif(this.checkboxInvertTag === \"yes\" && !checked) {\n\t\t\tnewFields.tags.push(this.checkboxTag);\n\t\t} else if(this.checkboxInvertTag !== \"yes\" && checked) {\n\t\t\tnewFields.tags.push(this.checkboxTag);\n\t\t}\n\t\thasChanged = true;\n\t}\n\t// Set the field if specified\n\tif(this.checkboxField) {\n\t\tvar value = checked ? this.checkboxChecked : this.checkboxUnchecked;\n\t\tif(!tiddler || tiddler.fields[this.checkboxField] !== value) {\n\t\t\tnewFields[this.checkboxField] = value;\n\t\t\thasChanged = true;\n\t\t}\n\t}\n\tif(hasChanged) {\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getCreationFields(),fallbackFields,tiddler,newFields,this.wiki.getModificationFields()));\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nCheckboxWidget.prototype.execute = function() {\n\t// Get the parameters from the attributes\n\tthis.checkboxTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.checkboxTag = this.getAttribute(\"tag\");\n\tthis.checkboxField = this.getAttribute(\"field\");\n\tthis.checkboxChecked = this.getAttribute(\"checked\");\n\tthis.checkboxUnchecked = this.getAttribute(\"unchecked\");\n\tthis.checkboxDefault = this.getAttribute(\"default\");\n\tthis.checkboxClass = this.getAttribute(\"class\",\"\");\n\tthis.checkboxInvertTag = this.getAttribute(\"invertTag\",\"\");\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nCheckboxWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.tag || changedAttributes.invertTag || changedAttributes.field || changedAttributes.checked || changedAttributes.unchecked || changedAttributes[\"default\"] || changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\tvar refreshed = false;\n\t\tif(changedTiddlers[this.checkboxTitle]) {\n\t\t\tthis.inputDomNode.checked = this.getValue();\n\t\t\trefreshed = true;\n\t\t}\n\t\treturn this.refreshChildren(changedTiddlers) || refreshed;\n\t}\n};\n\nexports.checkbox = CheckboxWidget;\n\n})();",
            "title": "$:/core/modules/widgets/checkbox.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/codeblock.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/codeblock.js\ntype: application/javascript\nmodule-type: widget\n\nCode block node widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar CodeBlockWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nCodeBlockWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nCodeBlockWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar codeNode = this.document.createElement(\"code\"),\n\t\tdomNode = this.document.createElement(\"pre\");\n\tcodeNode.appendChild(this.document.createTextNode(this.getAttribute(\"code\")));\n\tdomNode.appendChild(codeNode);\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.domNodes.push(domNode);\n\tif(this.postRender) {\n\t\tthis.postRender();\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nCodeBlockWidget.prototype.execute = function() {\n\tthis.language = this.getAttribute(\"language\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nCodeBlockWidget.prototype.refresh = function(changedTiddlers) {\n\treturn false;\n};\n\nexports.codeblock = CodeBlockWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/codeblock.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/count.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/count.js\ntype: application/javascript\nmodule-type: widget\n\nCount widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar CountWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nCountWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nCountWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar textNode = this.document.createTextNode(this.currentCount);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nCountWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.filter = this.getAttribute(\"filter\");\n\t// Execute the filter\n\tif(this.filter) {\n\t\tthis.currentCount = this.wiki.filterTiddlers(this.filter,this).length;\n\t} else {\n\t\tthis.currentCount = undefined;\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nCountWidget.prototype.refresh = function(changedTiddlers) {\n\t// Re-execute the filter to get the count\n\tthis.computeAttributes();\n\tvar oldCount = this.currentCount;\n\tthis.execute();\n\tif(this.currentCount !== oldCount) {\n\t\t// Regenerate and rerender the widget and replace the existing DOM node\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\n\t}\n\n};\n\nexports.count = CountWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/count.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/dropzone.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/dropzone.js\ntype: application/javascript\nmodule-type: widget\n\nDropzone widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar DropZoneWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nDropZoneWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nDropZoneWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar domNode = this.document.createElement(\"div\");\n\tdomNode.className = \"tc-dropzone\";\n\t// Add event handlers\n\t$tw.utils.addEventListeners(domNode,[\n\t\t{name: \"dragenter\", handlerObject: this, handlerMethod: \"handleDragEnterEvent\"},\n\t\t{name: \"dragover\", handlerObject: this, handlerMethod: \"handleDragOverEvent\"},\n\t\t{name: \"dragleave\", handlerObject: this, handlerMethod: \"handleDragLeaveEvent\"},\n\t\t{name: \"drop\", handlerObject: this, handlerMethod: \"handleDropEvent\"},\n\t\t{name: \"paste\", handlerObject: this, handlerMethod: \"handlePasteEvent\"}\n\t]);\n\tdomNode.addEventListener(\"click\",function (event) {\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nDropZoneWidget.prototype.enterDrag = function() {\n\t// Check for this window being the source of the drag\n\tif($tw.dragInProgress) {\n\t\treturn false;\n\t}\n\t// We count enter/leave events\n\tthis.dragEnterCount = (this.dragEnterCount || 0) + 1;\n\t// If we're entering for the first time we need to apply highlighting\n\tif(this.dragEnterCount === 1) {\n\t\t$tw.utils.addClass(this.domNodes[0],\"tc-dragover\");\n\t}\n};\n\nDropZoneWidget.prototype.leaveDrag = function() {\n\t// Reduce the enter count\n\tthis.dragEnterCount = (this.dragEnterCount || 0) - 1;\n\t// Remove highlighting if we're leaving externally\n\tif(this.dragEnterCount <= 0) {\n\t\t$tw.utils.removeClass(this.domNodes[0],\"tc-dragover\");\n\t}\n};\n\nDropZoneWidget.prototype.handleDragEnterEvent  = function(event) {\n\tthis.enterDrag();\n\t// Tell the browser that we're ready to handle the drop\n\tevent.preventDefault();\n\t// Tell the browser not to ripple the drag up to any parent drop handlers\n\tevent.stopPropagation();\n};\n\nDropZoneWidget.prototype.handleDragOverEvent  = function(event) {\n\t// Check for being over a TEXTAREA or INPUT\n\tif([\"TEXTAREA\",\"INPUT\"].indexOf(event.target.tagName) !== -1) {\n\t\treturn false;\n\t}\n\t// Check for this window being the source of the drag\n\tif($tw.dragInProgress) {\n\t\treturn false;\n\t}\n\t// Tell the browser that we're still interested in the drop\n\tevent.preventDefault();\n\tevent.dataTransfer.dropEffect = \"copy\"; // Explicitly show this is a copy\n};\n\nDropZoneWidget.prototype.handleDragLeaveEvent  = function(event) {\n\tthis.leaveDrag();\n};\n\nDropZoneWidget.prototype.handleDropEvent  = function(event) {\n\tthis.leaveDrag();\n\t// Check for being over a TEXTAREA or INPUT\n\tif([\"TEXTAREA\",\"INPUT\"].indexOf(event.target.tagName) !== -1) {\n\t\treturn false;\n\t}\n\t// Check for this window being the source of the drag\n\tif($tw.dragInProgress) {\n\t\treturn false;\n\t}\n\tvar self = this,\n\t\tdataTransfer = event.dataTransfer;\n\t// Reset the enter count\n\tthis.dragEnterCount = 0;\n\t// Remove highlighting\n\t$tw.utils.removeClass(this.domNodes[0],\"tc-dragover\");\n\t// Import any files in the drop\n\tvar numFiles = this.wiki.readFiles(dataTransfer.files,function(tiddlerFieldsArray) {\n\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify(tiddlerFieldsArray)});\n\t});\n\t// Try to import the various data types we understand\n\tif(numFiles === 0) {\n\t\tthis.importData(dataTransfer);\n\t}\n\t// Tell the browser that we handled the drop\n\tevent.preventDefault();\n\t// Stop the drop ripple up to any parent handlers\n\tevent.stopPropagation();\n};\n\nDropZoneWidget.prototype.importData = function(dataTransfer) {\n\t// Try each provided data type in turn\n\tfor(var t=0; t<this.importDataTypes.length; t++) {\n\t\tif(!$tw.browser.isIE || this.importDataTypes[t].IECompatible) {\n\t\t\t// Get the data\n\t\t\tvar dataType = this.importDataTypes[t];\n\t\t\t\tvar data = dataTransfer.getData(dataType.type);\n\t\t\t// Import the tiddlers in the data\n\t\t\tif(data !== \"\" && data !== null) {\n\t\t\t\tif($tw.log.IMPORT) {\n\t\t\t\t\tconsole.log(\"Importing data type '\" + dataType.type + \"', data: '\" + data + \"'\")\n\t\t\t\t}\n\t\t\t\tvar tiddlerFields = dataType.convertToFields(data);\n\t\t\t\tif(!tiddlerFields.title) {\n\t\t\t\t\ttiddlerFields.title = this.wiki.generateNewTitle(\"Untitled\");\n\t\t\t\t}\n\t\t\t\tthis.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify([tiddlerFields])});\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n};\n\nDropZoneWidget.prototype.importDataTypes = [\n\t{type: \"text/vnd.tiddler\", IECompatible: false, convertToFields: function(data) {\n\t\treturn JSON.parse(data);\n\t}},\n\t{type: \"URL\", IECompatible: true, convertToFields: function(data) {\n\t\t// Check for tiddler data URI\n\t\tvar match = decodeURIComponent(data).match(/^data\\:text\\/vnd\\.tiddler,(.*)/i);\n\t\tif(match) {\n\t\t\treturn JSON.parse(match[1]);\n\t\t} else {\n\t\t\treturn { // As URL string\n\t\t\t\ttext: data\n\t\t\t};\n\t\t}\n\t}},\n\t{type: \"text/x-moz-url\", IECompatible: false, convertToFields: function(data) {\n\t\t// Check for tiddler data URI\n\t\tvar match = decodeURIComponent(data).match(/^data\\:text\\/vnd\\.tiddler,(.*)/i);\n\t\tif(match) {\n\t\t\treturn JSON.parse(match[1]);\n\t\t} else {\n\t\t\treturn { // As URL string\n\t\t\t\ttext: data\n\t\t\t};\n\t\t}\n\t}},\n\t{type: \"text/html\", IECompatible: false, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}},\n\t{type: \"text/plain\", IECompatible: false, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}},\n\t{type: \"Text\", IECompatible: true, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}},\n\t{type: \"text/uri-list\", IECompatible: false, convertToFields: function(data) {\n\t\treturn {\n\t\t\ttext: data\n\t\t};\n\t}}\n];\n\nDropZoneWidget.prototype.handlePasteEvent  = function(event) {\n\t// Let the browser handle it if we're in a textarea or input box\n\tif([\"TEXTAREA\",\"INPUT\"].indexOf(event.target.tagName) == -1) {\n\t\tvar self = this,\n\t\t\titems = event.clipboardData.items;\n\t\t// Enumerate the clipboard items\n\t\tfor(var t = 0; t<items.length; t++) {\n\t\t\tvar item = items[t];\n\t\t\tif(item.kind === \"file\") {\n\t\t\t\t// Import any files\n\t\t\t\tthis.wiki.readFile(item.getAsFile(),function(tiddlerFieldsArray) {\n\t\t\t\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify(tiddlerFieldsArray)});\n\t\t\t\t});\n\t\t\t} else if(item.kind === \"string\") {\n\t\t\t\t// Create tiddlers from string items\n\t\t\t\tvar type = item.type;\n\t\t\t\titem.getAsString(function(str) {\n\t\t\t\t\tvar tiddlerFields = {\n\t\t\t\t\t\ttitle: self.wiki.generateNewTitle(\"Untitled\"),\n\t\t\t\t\t\ttext: str,\n\t\t\t\t\t\ttype: type\n\t\t\t\t\t};\n\t\t\t\t\tif($tw.log.IMPORT) {\n\t\t\t\t\t\tconsole.log(\"Importing string '\" + str + \"', type: '\" + type + \"'\");\n\t\t\t\t\t}\n\t\t\t\t\tself.dispatchEvent({type: \"tm-import-tiddlers\", param: JSON.stringify([tiddlerFields])});\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t\t// Tell the browser that we've handled the paste\n\t\tevent.stopPropagation();\n\t\tevent.preventDefault();\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nDropZoneWidget.prototype.execute = function() {\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nDropZoneWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.dropzone = DropZoneWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/dropzone.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-binary.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-binary.js\ntype: application/javascript\nmodule-type: widget\n\nEdit-binary widget; placeholder for editing binary tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar BINARY_WARNING_MESSAGE = \"$:/core/ui/BinaryWarning\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditBinaryWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditBinaryWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditBinaryWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEditBinaryWidget.prototype.execute = function() {\n\t// Construct the child widgets\n\tthis.makeChildWidgets([{\n\t\ttype: \"transclude\",\n\t\tattributes: {\n\t\t\ttiddler: {type: \"string\", value: BINARY_WARNING_MESSAGE}\n\t\t}\n\t}]);\n};\n\n/*\nRefresh by refreshing our child widget\n*/\nEditBinaryWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports[\"edit-binary\"] = EditBinaryWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-binary.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-bitmap.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-bitmap.js\ntype: application/javascript\nmodule-type: widget\n\nEdit-bitmap widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n// Default image sizes\nvar DEFAULT_IMAGE_WIDTH = 600,\n\tDEFAULT_IMAGE_HEIGHT = 370;\n\n// Configuration tiddlers\nvar LINE_WIDTH_TITLE = \"$:/config/BitmapEditor/LineWidth\",\n\tLINE_COLOUR_TITLE = \"$:/config/BitmapEditor/Colour\",\n\tLINE_OPACITY_TITLE = \"$:/config/BitmapEditor/Opacity\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditBitmapWidget = function(parseTreeNode,options) {\n\t// Initialise the editor operations if they've not been done already\n\tif(!this.editorOperations) {\n\t\tEditBitmapWidget.prototype.editorOperations = {};\n\t\t$tw.modules.applyMethods(\"bitmapeditoroperation\",this.editorOperations);\n\t}\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditBitmapWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditBitmapWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Create the wrapper for the toolbar and render its content\n\tthis.toolbarNode = this.document.createElement(\"div\");\n\tthis.toolbarNode.className = \"tc-editor-toolbar\";\n\tparent.insertBefore(this.toolbarNode,nextSibling);\n\tthis.domNodes.push(this.toolbarNode);\n\t// Create the on-screen canvas\n\tthis.canvasDomNode = $tw.utils.domMaker(\"canvas\",{\n\t\tdocument: this.document,\n\t\t\"class\":\"tc-edit-bitmapeditor\",\n\t\teventListeners: [{\n\t\t\tname: \"touchstart\", handlerObject: this, handlerMethod: \"handleTouchStartEvent\"\n\t\t},{\n\t\t\tname: \"touchmove\", handlerObject: this, handlerMethod: \"handleTouchMoveEvent\"\n\t\t},{\n\t\t\tname: \"touchend\", handlerObject: this, handlerMethod: \"handleTouchEndEvent\"\n\t\t},{\n\t\t\tname: \"mousedown\", handlerObject: this, handlerMethod: \"handleMouseDownEvent\"\n\t\t},{\n\t\t\tname: \"mousemove\", handlerObject: this, handlerMethod: \"handleMouseMoveEvent\"\n\t\t},{\n\t\t\tname: \"mouseup\", handlerObject: this, handlerMethod: \"handleMouseUpEvent\"\n\t\t}]\n\t});\n\t// Set the width and height variables\n\tthis.setVariable(\"tv-bitmap-editor-width\",this.canvasDomNode.width + \"px\");\n\tthis.setVariable(\"tv-bitmap-editor-height\",this.canvasDomNode.height + \"px\");\n\t// Render toolbar child widgets\n\tthis.renderChildren(this.toolbarNode,null);\n\t// // Insert the elements into the DOM\n\tparent.insertBefore(this.canvasDomNode,nextSibling);\n\tthis.domNodes.push(this.canvasDomNode);\n\t// Load the image into the canvas\n\tif($tw.browser) {\n\t\tthis.loadCanvas();\n\t}\n\t// Add widget message listeners\n\tthis.addEventListeners([\n\t\t{type: \"tm-edit-bitmap-operation\", handler: \"handleEditBitmapOperationMessage\"}\n\t]);\n};\n\n/*\nHandle an edit bitmap operation message from the toolbar\n*/\nEditBitmapWidget.prototype.handleEditBitmapOperationMessage = function(event) {\n\t// Invoke the handler\n\tvar handler = this.editorOperations[event.param];\n\tif(handler) {\n\t\thandler.call(this,event);\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nEditBitmapWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.editTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nJust refresh the toolbar\n*/\nEditBitmapWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nSet the bitmap size variables and refresh the toolbar\n*/\nEditBitmapWidget.prototype.refreshToolbar = function() {\n\t// Set the width and height variables\n\tthis.setVariable(\"tv-bitmap-editor-width\",this.canvasDomNode.width + \"px\");\n\tthis.setVariable(\"tv-bitmap-editor-height\",this.canvasDomNode.height + \"px\");\n\t// Refresh each of our child widgets\n\t$tw.utils.each(this.children,function(childWidget) {\n\t\tchildWidget.refreshSelf();\n\t});\n};\n\nEditBitmapWidget.prototype.loadCanvas = function() {\n\tvar tiddler = this.wiki.getTiddler(this.editTitle),\n\t\tcurrImage = new Image();\n\t// Set up event handlers for loading the image\n\tvar self = this;\n\tcurrImage.onload = function() {\n\t\t// Copy the image to the on-screen canvas\n\t\tself.initCanvas(self.canvasDomNode,currImage.width,currImage.height,currImage);\n\t\t// And also copy the current bitmap to the off-screen canvas\n\t\tself.currCanvas = self.document.createElement(\"canvas\");\n\t\tself.initCanvas(self.currCanvas,currImage.width,currImage.height,currImage);\n\t\t// Set the width and height input boxes\n\t\tself.refreshToolbar();\n\t};\n\tcurrImage.onerror = function() {\n\t\t// Set the on-screen canvas size and clear it\n\t\tself.initCanvas(self.canvasDomNode,DEFAULT_IMAGE_WIDTH,DEFAULT_IMAGE_HEIGHT);\n\t\t// Set the off-screen canvas size and clear it\n\t\tself.currCanvas = self.document.createElement(\"canvas\");\n\t\tself.initCanvas(self.currCanvas,DEFAULT_IMAGE_WIDTH,DEFAULT_IMAGE_HEIGHT);\n\t\t// Set the width and height input boxes\n\t\tself.refreshToolbar();\n\t};\n\t// Get the current bitmap into an image object\n\tcurrImage.src = \"data:\" + tiddler.fields.type + \";base64,\" + tiddler.fields.text;\n};\n\nEditBitmapWidget.prototype.initCanvas = function(canvas,width,height,image) {\n\tcanvas.width = width;\n\tcanvas.height = height;\n\tvar ctx = canvas.getContext(\"2d\");\n\tif(image) {\n\t\tctx.drawImage(image,0,0);\n\t} else {\n\t\tctx.fillStyle = \"#fff\";\n\t\tctx.fillRect(0,0,canvas.width,canvas.height);\n\t}\n};\n\n/*\n** Change the size of the canvas, preserving the current image\n*/\nEditBitmapWidget.prototype.changeCanvasSize = function(newWidth,newHeight) {\n\t// Create and size a new canvas\n\tvar newCanvas = this.document.createElement(\"canvas\");\n\tthis.initCanvas(newCanvas,newWidth,newHeight);\n\t// Copy the old image\n\tvar ctx = newCanvas.getContext(\"2d\");\n\tctx.drawImage(this.currCanvas,0,0);\n\t// Set the new canvas as the current one\n\tthis.currCanvas = newCanvas;\n\t// Set the size of the onscreen canvas\n\tthis.canvasDomNode.width = newWidth;\n\tthis.canvasDomNode.height = newHeight;\n\t// Paint the onscreen canvas with the offscreen canvas\n\tctx = this.canvasDomNode.getContext(\"2d\");\n\tctx.drawImage(this.currCanvas,0,0);\n};\n\nEditBitmapWidget.prototype.handleTouchStartEvent = function(event) {\n\tthis.brushDown = true;\n\tthis.strokeStart(event.touches[0].clientX,event.touches[0].clientY);\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleTouchMoveEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.strokeMove(event.touches[0].clientX,event.touches[0].clientY);\n\t}\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleTouchEndEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.brushDown = false;\n\t\tthis.strokeEnd();\n\t}\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleMouseDownEvent = function(event) {\n\tthis.strokeStart(event.clientX,event.clientY);\n\tthis.brushDown = true;\n\tevent.preventDefault();\n\tevent.stopPropagation();\n\treturn false;\n};\n\nEditBitmapWidget.prototype.handleMouseMoveEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.strokeMove(event.clientX,event.clientY);\n\t\tevent.preventDefault();\n\t\tevent.stopPropagation();\n\t\treturn false;\n\t}\n\treturn true;\n};\n\nEditBitmapWidget.prototype.handleMouseUpEvent = function(event) {\n\tif(this.brushDown) {\n\t\tthis.brushDown = false;\n\t\tthis.strokeEnd();\n\t\tevent.preventDefault();\n\t\tevent.stopPropagation();\n\t\treturn false;\n\t}\n\treturn true;\n};\n\nEditBitmapWidget.prototype.adjustCoordinates = function(x,y) {\n\tvar canvasRect = this.canvasDomNode.getBoundingClientRect(),\n\t\tscale = this.canvasDomNode.width/canvasRect.width;\n\treturn {x: (x - canvasRect.left) * scale, y: (y - canvasRect.top) * scale};\n};\n\nEditBitmapWidget.prototype.strokeStart = function(x,y) {\n\t// Start off a new stroke\n\tthis.stroke = [this.adjustCoordinates(x,y)];\n};\n\nEditBitmapWidget.prototype.strokeMove = function(x,y) {\n\tvar ctx = this.canvasDomNode.getContext(\"2d\"),\n\t\tt;\n\t// Add the new position to the end of the stroke\n\tthis.stroke.push(this.adjustCoordinates(x,y));\n\t// Redraw the previous image\n\tctx.drawImage(this.currCanvas,0,0);\n\t// Render the stroke\n\tctx.globalAlpha = parseFloat(this.wiki.getTiddlerText(LINE_OPACITY_TITLE,\"1.0\"));\n\tctx.strokeStyle = this.wiki.getTiddlerText(LINE_COLOUR_TITLE,\"#ff0\");\n\tctx.lineWidth = parseFloat(this.wiki.getTiddlerText(LINE_WIDTH_TITLE,\"3\"));\n\tctx.lineCap = \"round\";\n\tctx.lineJoin = \"round\";\n\tctx.beginPath();\n\tctx.moveTo(this.stroke[0].x,this.stroke[0].y);\n\tfor(t=1; t<this.stroke.length-1; t++) {\n\t\tvar s1 = this.stroke[t],\n\t\t\ts2 = this.stroke[t-1],\n\t\t\ttx = (s1.x + s2.x)/2,\n\t\t\tty = (s1.y + s2.y)/2;\n\t\tctx.quadraticCurveTo(s2.x,s2.y,tx,ty);\n\t}\n\tctx.stroke();\n};\n\nEditBitmapWidget.prototype.strokeEnd = function() {\n\t// Copy the bitmap to the off-screen canvas\n\tvar ctx = this.currCanvas.getContext(\"2d\");\n\tctx.drawImage(this.canvasDomNode,0,0);\n\t// Save the image into the tiddler\n\tthis.saveChanges();\n};\n\nEditBitmapWidget.prototype.saveChanges = function() {\n\tvar tiddler = this.wiki.getTiddler(this.editTitle);\n\tif(tiddler) {\n\t\t// data URIs look like \"data:<type>;base64,<text>\"\n\t\tvar dataURL = this.canvasDomNode.toDataURL(tiddler.fields.type),\n\t\t\tposColon = dataURL.indexOf(\":\"),\n\t\t\tposSemiColon = dataURL.indexOf(\";\"),\n\t\t\tposComma = dataURL.indexOf(\",\"),\n\t\t\ttype = dataURL.substring(posColon+1,posSemiColon),\n\t\t\ttext = dataURL.substring(posComma+1);\n\t\tvar update = {type: type, text: text};\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getModificationFields(),tiddler,update,this.wiki.getCreationFields()));\n\t}\n};\n\nexports[\"edit-bitmap\"] = EditBitmapWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-bitmap.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-shortcut.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-shortcut.js\ntype: application/javascript\nmodule-type: widget\n\nWidget to display an editable keyboard shortcut\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditShortcutWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditShortcutWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditShortcutWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.inputNode = this.document.createElement(\"input\");\n\t// Assign classes\n\tif(this.shortcutClass) {\n\t\tthis.inputNode.className = this.shortcutClass;\t\t\n\t}\n\t// Assign other attributes\n\tif(this.shortcutStyle) {\n\t\tthis.inputNode.setAttribute(\"style\",this.shortcutStyle);\n\t}\n\tif(this.shortcutTooltip) {\n\t\tthis.inputNode.setAttribute(\"title\",this.shortcutTooltip);\n\t}\n\tif(this.shortcutPlaceholder) {\n\t\tthis.inputNode.setAttribute(\"placeholder\",this.shortcutPlaceholder);\n\t}\n\tif(this.shortcutAriaLabel) {\n\t\tthis.inputNode.setAttribute(\"aria-label\",this.shortcutAriaLabel);\n\t}\n\t// Assign the current shortcut\n\tthis.updateInputNode();\n\t// Add event handlers\n\t$tw.utils.addEventListeners(this.inputNode,[\n\t\t{name: \"keydown\", handlerObject: this, handlerMethod: \"handleKeydownEvent\"}\n\t]);\n\t// Link into the DOM\n\tparent.insertBefore(this.inputNode,nextSibling);\n\tthis.domNodes.push(this.inputNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEditShortcutWidget.prototype.execute = function() {\n\tthis.shortcutTiddler = this.getAttribute(\"tiddler\");\n\tthis.shortcutField = this.getAttribute(\"field\");\n\tthis.shortcutIndex = this.getAttribute(\"index\");\n\tthis.shortcutPlaceholder = this.getAttribute(\"placeholder\");\n\tthis.shortcutDefault = this.getAttribute(\"default\",\"\");\n\tthis.shortcutClass = this.getAttribute(\"class\");\n\tthis.shortcutStyle = this.getAttribute(\"style\");\n\tthis.shortcutTooltip = this.getAttribute(\"tooltip\");\n\tthis.shortcutAriaLabel = this.getAttribute(\"aria-label\");\n};\n\n/*\nUpdate the value of the input node\n*/\nEditShortcutWidget.prototype.updateInputNode = function() {\n\tif(this.shortcutField) {\n\t\tvar tiddler = this.wiki.getTiddler(this.shortcutTiddler);\n\t\tif(tiddler && $tw.utils.hop(tiddler.fields,this.shortcutField)) {\n\t\t\tthis.inputNode.value = tiddler.getFieldString(this.shortcutField);\n\t\t} else {\n\t\t\tthis.inputNode.value = this.shortcutDefault;\n\t\t}\n\t} else if(this.shortcutIndex) {\n\t\tthis.inputNode.value = this.wiki.extractTiddlerDataItem(this.shortcutTiddler,this.shortcutIndex,this.shortcutDefault);\n\t} else {\n\t\tthis.inputNode.value = this.wiki.getTiddlerText(this.shortcutTiddler,this.shortcutDefault);\n\t}\n};\n\n/*\nHandle a dom \"keydown\" event\n*/\nEditShortcutWidget.prototype.handleKeydownEvent = function(event) {\n\t// Ignore shift, ctrl, meta, alt\n\tif(event.keyCode && $tw.keyboardManager.getModifierKeys().indexOf(event.keyCode) === -1) {\n\t\t// Get the shortcut text representation\n\t\tvar value = $tw.keyboardManager.getPrintableShortcuts([{\n\t\t\tctrlKey: event.ctrlKey,\n\t\t\tshiftKey: event.shiftKey,\n\t\t\taltKey: event.altKey,\n\t\t\tmetaKey: event.metaKey,\n\t\t\tkeyCode: event.keyCode\n\t\t}]);\n\t\tif(value.length > 0) {\n\t\t\tthis.wiki.setText(this.shortcutTiddler,this.shortcutField,this.shortcutIndex,value[0]);\n\t\t}\n\t\t// Ignore the keydown if it was already handled\n\t\tevent.preventDefault();\n\t\tevent.stopPropagation();\n\t\treturn true;\t\t\n\t} else {\n\t\treturn false;\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget needed re-rendering\n*/\nEditShortcutWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedAttributes.placeholder || changedAttributes[\"default\"] || changedAttributes[\"class\"] || changedAttributes.style || changedAttributes.tooltip || changedAttributes[\"aria-label\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else if(changedTiddlers[this.shortcutTiddler]) {\n\t\tthis.updateInputNode();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports[\"edit-shortcut\"] = EditShortcutWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-shortcut.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit-text.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit-text.js\ntype: application/javascript\nmodule-type: widget\n\nEdit-text widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar editTextWidgetFactory = require(\"$:/core/modules/editor/factory.js\").editTextWidgetFactory,\n\tFramedEngine = require(\"$:/core/modules/editor/engines/framed.js\").FramedEngine,\n\tSimpleEngine = require(\"$:/core/modules/editor/engines/simple.js\").SimpleEngine;\n\nexports[\"edit-text\"] = editTextWidgetFactory(FramedEngine,SimpleEngine);\n\n})();\n",
            "title": "$:/core/modules/widgets/edit-text.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/edit.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/edit.js\ntype: application/javascript\nmodule-type: widget\n\nEdit widget is a meta-widget chooses the appropriate actual editting widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EditWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEditWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEditWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n// Mappings from content type to editor type are stored in tiddlers with this prefix\nvar EDITOR_MAPPING_PREFIX = \"$:/config/EditorTypeMappings/\";\n\n/*\nCompute the internal state of the widget\n*/\nEditWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.editTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.editField = this.getAttribute(\"field\",\"text\");\n\tthis.editIndex = this.getAttribute(\"index\");\n\tthis.editClass = this.getAttribute(\"class\");\n\tthis.editPlaceholder = this.getAttribute(\"placeholder\");\n\t// Choose the appropriate edit widget\n\tthis.editorType = this.getEditorType();\n\t// Make the child widgets\n\tthis.makeChildWidgets([{\n\t\ttype: \"edit-\" + this.editorType,\n\t\tattributes: {\n\t\t\ttiddler: {type: \"string\", value: this.editTitle},\n\t\t\tfield: {type: \"string\", value: this.editField},\n\t\t\tindex: {type: \"string\", value: this.editIndex},\n\t\t\t\"class\": {type: \"string\", value: this.editClass},\n\t\t\t\"placeholder\": {type: \"string\", value: this.editPlaceholder}\n\t\t},\n\t\tchildren: this.parseTreeNode.children\n\t}]);\n};\n\nEditWidget.prototype.getEditorType = function() {\n\t// Get the content type of the thing we're editing\n\tvar type;\n\tif(this.editField === \"text\") {\n\t\tvar tiddler = this.wiki.getTiddler(this.editTitle);\n\t\tif(tiddler) {\n\t\t\ttype = tiddler.fields.type;\n\t\t}\n\t}\n\ttype = type || \"text/vnd.tiddlywiki\";\n\tvar editorType = this.wiki.getTiddlerText(EDITOR_MAPPING_PREFIX + type);\n\tif(!editorType) {\n\t\tvar typeInfo = $tw.config.contentTypeInfo[type];\n\t\tif(typeInfo && typeInfo.encoding === \"base64\") {\n\t\t\teditorType = \"binary\";\n\t\t} else {\n\t\t\teditorType = \"text\";\n\t\t}\n\t}\n\treturn editorType;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nEditWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\t// Refresh if an attribute has changed, or the type associated with the target tiddler has changed\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || (changedTiddlers[this.editTitle] && this.getEditorType() !== this.editorType)) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.edit = EditWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/edit.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/element.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/element.js\ntype: application/javascript\nmodule-type: widget\n\nElement widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ElementWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nElementWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nElementWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Neuter blacklisted elements\n\tvar tag = this.parseTreeNode.tag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"safe-\" + tag;\n\t}\n\tvar domNode = this.document.createElementNS(this.namespace,tag);\n\tthis.assignAttributes(domNode,{excludeEventAttributes: true});\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nElementWidget.prototype.execute = function() {\n\t// Select the namespace for the tag\n\tvar tagNamespaces = {\n\t\t\tsvg: \"http://www.w3.org/2000/svg\",\n\t\t\tmath: \"http://www.w3.org/1998/Math/MathML\",\n\t\t\tbody: \"http://www.w3.org/1999/xhtml\"\n\t\t};\n\tthis.namespace = tagNamespaces[this.parseTreeNode.tag];\n\tif(this.namespace) {\n\t\tthis.setVariable(\"namespace\",this.namespace);\n\t} else {\n\t\tthis.namespace = this.getVariable(\"namespace\",{defaultValue: \"http://www.w3.org/1999/xhtml\"});\n\t}\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nElementWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes(),\n\t\thasChangedAttributes = $tw.utils.count(changedAttributes) > 0;\n\tif(hasChangedAttributes) {\n\t\t// Update our attributes\n\t\tthis.assignAttributes(this.domNodes[0],{excludeEventAttributes: true});\n\t}\n\treturn this.refreshChildren(changedTiddlers) || hasChangedAttributes;\n};\n\nexports.element = ElementWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/element.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/encrypt.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/encrypt.js\ntype: application/javascript\nmodule-type: widget\n\nEncrypt widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EncryptWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEncryptWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEncryptWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar textNode = this.document.createTextNode(this.encryptedText);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEncryptWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.filter = this.getAttribute(\"filter\",\"[!is[system]]\");\n\t// Encrypt the filtered tiddlers\n\tvar tiddlers = this.wiki.filterTiddlers(this.filter),\n\t\tjson = {},\n\t\tself = this;\n\t$tw.utils.each(tiddlers,function(title) {\n\t\tvar tiddler = self.wiki.getTiddler(title),\n\t\t\tjsonTiddler = {};\n\t\tfor(var f in tiddler.fields) {\n\t\t\tjsonTiddler[f] = tiddler.getFieldString(f);\n\t\t}\n\t\tjson[title] = jsonTiddler;\n\t});\n\tthis.encryptedText = $tw.utils.htmlEncode($tw.crypto.encrypt(JSON.stringify(json)));\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nEncryptWidget.prototype.refresh = function(changedTiddlers) {\n\t// We don't need to worry about refreshing because the encrypt widget isn't for interactive use\n\treturn false;\n};\n\nexports.encrypt = EncryptWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/encrypt.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/entity.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/entity.js\ntype: application/javascript\nmodule-type: widget\n\nHTML entity widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar EntityWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nEntityWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nEntityWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.execute();\n\tvar entityString = this.getAttribute(\"entity\",this.parseTreeNode.entity || \"\"),\n\t\ttextNode = this.document.createTextNode($tw.utils.entityDecode(entityString));\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nEntityWidget.prototype.execute = function() {\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nEntityWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.entity) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.entity = EntityWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/entity.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/fieldmangler.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/fieldmangler.js\ntype: application/javascript\nmodule-type: widget\n\nField mangler widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar FieldManglerWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.addEventListeners([\n\t\t{type: \"tm-remove-field\", handler: \"handleRemoveFieldEvent\"},\n\t\t{type: \"tm-add-field\", handler: \"handleAddFieldEvent\"},\n\t\t{type: \"tm-remove-tag\", handler: \"handleRemoveTagEvent\"},\n\t\t{type: \"tm-add-tag\", handler: \"handleAddTagEvent\"}\n\t]);\n};\n\n/*\nInherit from the base widget class\n*/\nFieldManglerWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nFieldManglerWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nFieldManglerWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.mangleTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nFieldManglerWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nFieldManglerWidget.prototype.handleRemoveFieldEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle),\n\t\tdeletion = {};\n\tdeletion[event.param] = undefined;\n\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,deletion));\n\treturn true;\n};\n\nFieldManglerWidget.prototype.handleAddFieldEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle),\n\t\taddition = this.wiki.getModificationFields(),\n\t\thadInvalidFieldName = false,\n\t\taddField = function(name,value) {\n\t\t\tvar trimmedName = name.toLowerCase().trim();\n\t\t\tif(!$tw.utils.isValidFieldName(trimmedName)) {\n\t\t\t\tif(!hadInvalidFieldName) {\n\t\t\t\t\talert($tw.language.getString(\n\t\t\t\t\t\t\"InvalidFieldName\",\n\t\t\t\t\t\t{variables:\n\t\t\t\t\t\t\t{fieldName: trimmedName}\n\t\t\t\t\t\t}\n\t\t\t\t\t));\n\t\t\t\t\thadInvalidFieldName = true;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif(!value && tiddler) {\n\t\t\t\t\tvalue = tiddler.fields[trimmedName];\n\t\t\t\t}\n\t\t\t\taddition[trimmedName] = value || \"\";\n\t\t\t}\n\t\t\treturn;\n\t\t};\n\taddition.title = this.mangleTitle;\n\tif(typeof event.param === \"string\") {\n\t\taddField(event.param,\"\");\n\t}\n\tif(typeof event.paramObject === \"object\") {\n\t\tfor(var name in event.paramObject) {\n\t\t\taddField(name,event.paramObject[name]);\n\t\t}\n\t}\n\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,addition));\n\treturn true;\n};\n\nFieldManglerWidget.prototype.handleRemoveTagEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle);\n\tif(tiddler && tiddler.fields.tags) {\n\t\tvar p = tiddler.fields.tags.indexOf(event.param);\n\t\tif(p !== -1) {\n\t\t\tvar modification = this.wiki.getModificationFields();\n\t\t\tmodification.tags = (tiddler.fields.tags || []).slice(0);\n\t\t\tmodification.tags.splice(p,1);\n\t\t\tif(modification.tags.length === 0) {\n\t\t\t\tmodification.tags = undefined;\n\t\t\t}\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,modification));\n\t\t}\n\t}\n\treturn true;\n};\n\nFieldManglerWidget.prototype.handleAddTagEvent = function(event) {\n\tvar tiddler = this.wiki.getTiddler(this.mangleTitle);\n\tif(tiddler && typeof event.param === \"string\") {\n\t\tvar tag = event.param.trim();\n\t\tif(tag !== \"\") {\n\t\t\tvar modification = this.wiki.getModificationFields();\n\t\t\tmodification.tags = (tiddler.fields.tags || []).slice(0);\n\t\t\t$tw.utils.pushTop(modification.tags,tag);\n\t\t\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,modification));\t\t\t\n\t\t}\n\t} else if(typeof event.param === \"string\" && event.param.trim() !== \"\" && this.mangleTitle.trim() !== \"\") {\n\t\tvar tag = [];\n\t\ttag.push(event.param.trim());\n\t\tthis.wiki.addTiddler({title: this.mangleTitle, tags: tag});\t\t\n\t}\n\treturn true;\n};\n\nexports.fieldmangler = FieldManglerWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/fieldmangler.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/fields.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/fields.js\ntype: application/javascript\nmodule-type: widget\n\nFields widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar FieldsWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nFieldsWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nFieldsWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar textNode = this.document.createTextNode(this.text);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nFieldsWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.tiddlerTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.template = this.getAttribute(\"template\");\n\tthis.exclude = this.getAttribute(\"exclude\");\n\tthis.stripTitlePrefix = this.getAttribute(\"stripTitlePrefix\",\"no\") === \"yes\";\n\t// Get the value to display\n\tvar tiddler = this.wiki.getTiddler(this.tiddlerTitle);\n\t// Get the exclusion list\n\tvar exclude;\n\tif(this.exclude) {\n\t\texclude = this.exclude.split(\" \");\n\t} else {\n\t\texclude = [\"text\"]; \n\t}\n\t// Compose the template\n\tvar text = [];\n\tif(this.template && tiddler) {\n\t\tvar fields = [];\n\t\tfor(var fieldName in tiddler.fields) {\n\t\t\tif(exclude.indexOf(fieldName) === -1) {\n\t\t\t\tfields.push(fieldName);\n\t\t\t}\n\t\t}\n\t\tfields.sort();\n\t\tfor(var f=0; f<fields.length; f++) {\n\t\t\tfieldName = fields[f];\n\t\t\tif(exclude.indexOf(fieldName) === -1) {\n\t\t\t\tvar row = this.template,\n\t\t\t\t\tvalue = tiddler.getFieldString(fieldName);\n\t\t\t\tif(this.stripTitlePrefix && fieldName === \"title\") {\n\t\t\t\t\tvar reStrip = /^\\{[^\\}]+\\}(.+)/mg,\n\t\t\t\t\t\treMatch = reStrip.exec(value);\n\t\t\t\t\tif(reMatch) {\n\t\t\t\t\t\tvalue = reMatch[1];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trow = row.replace(\"$name$\",fieldName);\n\t\t\t\trow = row.replace(\"$value$\",value);\n\t\t\t\trow = row.replace(\"$encoded_value$\",$tw.utils.htmlEncode(value));\n\t\t\t\ttext.push(row);\n\t\t\t}\n\t\t}\n\t}\n\tthis.text = text.join(\"\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nFieldsWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.template || changedAttributes.exclude || changedAttributes.stripTitlePrefix || changedTiddlers[this.tiddlerTitle]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.fields = FieldsWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/fields.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/image.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/image.js\ntype: application/javascript\nmodule-type: widget\n\nThe image widget displays an image referenced with an external URI or with a local tiddler title.\n\n```\n<$image src=\"TiddlerTitle\" width=\"320\" height=\"400\" class=\"classnames\">\n```\n\nThe image source can be the title of an existing tiddler or the URL of an external image.\n\nExternal images always generate an HTML `<img>` tag.\n\nTiddlers that have a _canonical_uri field generate an HTML `<img>` tag with the src attribute containing the URI.\n\nTiddlers that contain image data generate an HTML `<img>` tag with the src attribute containing a base64 representation of the image.\n\nTiddlers that contain wikitext could be rendered to a DIV of the usual size of a tiddler, and then transformed to the size requested.\n\nThe width and height attributes are interpreted as a number of pixels, and do not need to include the \"px\" suffix.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ImageWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nImageWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nImageWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\t// Determine what type of image it is\n\tvar tag = \"img\", src = \"\",\n\t\ttiddler = this.wiki.getTiddler(this.imageSource);\n\tif(!tiddler) {\n\t\t// The source isn't the title of a tiddler, so we'll assume it's a URL\n\t\tsrc = this.getVariable(\"tv-get-export-image-link\",{params: [{name: \"src\",value: this.imageSource}],defaultValue: this.imageSource});\n\t} else {\n\t\t// Check if it is an image tiddler\n\t\tif(this.wiki.isImageTiddler(this.imageSource)) {\n\t\t\tvar type = tiddler.fields.type,\n\t\t\t\ttext = tiddler.fields.text,\n\t\t\t\t_canonical_uri = tiddler.fields._canonical_uri;\n\t\t\t// If the tiddler has body text then it doesn't need to be lazily loaded\n\t\t\tif(text) {\n\t\t\t\t// Render the appropriate element for the image type\n\t\t\t\tswitch(type) {\n\t\t\t\t\tcase \"application/pdf\":\n\t\t\t\t\t\ttag = \"embed\";\n\t\t\t\t\t\tsrc = \"data:application/pdf;base64,\" + text;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"image/svg+xml\":\n\t\t\t\t\t\tsrc = \"data:image/svg+xml,\" + encodeURIComponent(text);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tsrc = \"data:\" + type + \";base64,\" + text;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t} else if(_canonical_uri) {\n\t\t\t\tswitch(type) {\n\t\t\t\t\tcase \"application/pdf\":\n\t\t\t\t\t\ttag = \"embed\";\n\t\t\t\t\t\tsrc = _canonical_uri;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"image/svg+xml\":\n\t\t\t\t\t\tsrc = _canonical_uri;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tsrc = _canonical_uri;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\t\n\t\t\t} else {\n\t\t\t\t// Just trigger loading of the tiddler\n\t\t\t\tthis.wiki.getTiddlerText(this.imageSource);\n\t\t\t}\n\t\t}\n\t}\n\t// Create the element and assign the attributes\n\tvar domNode = this.document.createElement(tag);\n\tdomNode.setAttribute(\"src\",src);\n\tif(this.imageClass) {\n\t\tdomNode.setAttribute(\"class\",this.imageClass);\t\t\n\t}\n\tif(this.imageWidth) {\n\t\tdomNode.setAttribute(\"width\",this.imageWidth);\n\t}\n\tif(this.imageHeight) {\n\t\tdomNode.setAttribute(\"height\",this.imageHeight);\n\t}\n\tif(this.imageTooltip) {\n\t\tdomNode.setAttribute(\"title\",this.imageTooltip);\t\t\n\t}\n\tif(this.imageAlt) {\n\t\tdomNode.setAttribute(\"alt\",this.imageAlt);\t\t\n\t}\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.domNodes.push(domNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nImageWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.imageSource = this.getAttribute(\"source\");\n\tthis.imageWidth = this.getAttribute(\"width\");\n\tthis.imageHeight = this.getAttribute(\"height\");\n\tthis.imageClass = this.getAttribute(\"class\");\n\tthis.imageTooltip = this.getAttribute(\"tooltip\");\n\tthis.imageAlt = this.getAttribute(\"alt\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nImageWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.source || changedAttributes.width || changedAttributes.height || changedAttributes[\"class\"] || changedAttributes.tooltip || changedTiddlers[this.imageSource]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\t\n\t}\n};\n\nexports.image = ImageWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/image.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/importvariables.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/importvariables.js\ntype: application/javascript\nmodule-type: widget\n\nImport variable definitions from other tiddlers\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ImportVariablesWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nImportVariablesWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nImportVariablesWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nImportVariablesWidget.prototype.execute = function(tiddlerList) {\n\tvar self = this;\n\t// Get our parameters\n\tthis.filter = this.getAttribute(\"filter\");\n\t// Compute the filter\n\tthis.tiddlerList = tiddlerList || this.wiki.filterTiddlers(this.filter,this);\n\t// Accumulate the <$set> widgets from each tiddler\n\tvar widgetStackStart,widgetStackEnd;\n\tfunction addWidgetNode(widgetNode) {\n\t\tif(widgetNode) {\n\t\t\tif(!widgetStackStart && !widgetStackEnd) {\n\t\t\t\twidgetStackStart = widgetNode;\n\t\t\t\twidgetStackEnd = widgetNode;\n\t\t\t} else {\n\t\t\t\twidgetStackEnd.children = [widgetNode];\n\t\t\t\twidgetStackEnd = widgetNode;\n\t\t\t}\n\t\t}\n\t}\n\t$tw.utils.each(this.tiddlerList,function(title) {\n\t\tvar parser = self.wiki.parseTiddler(title);\n\t\tif(parser) {\n\t\t\tvar parseTreeNode = parser.tree[0];\n\t\t\twhile(parseTreeNode && parseTreeNode.type === \"set\") {\n\t\t\t\taddWidgetNode({\n\t\t\t\t\ttype: \"set\",\n\t\t\t\t\tattributes: parseTreeNode.attributes,\n\t\t\t\t\tparams: parseTreeNode.params\n\t\t\t\t});\n\t\t\t\tparseTreeNode = parseTreeNode.children[0];\n\t\t\t}\n\t\t} \n\t});\n\t// Add our own children to the end of the pile\n\tvar parseTreeNodes;\n\tif(widgetStackStart && widgetStackEnd) {\n\t\tparseTreeNodes = [widgetStackStart];\n\t\twidgetStackEnd.children = this.parseTreeNode.children;\n\t} else {\n\t\tparseTreeNodes = this.parseTreeNode.children;\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(parseTreeNodes);\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nImportVariablesWidget.prototype.refresh = function(changedTiddlers) {\n\t// Recompute our attributes and the filter list\n\tvar changedAttributes = this.computeAttributes(),\n\t\ttiddlerList = this.wiki.filterTiddlers(this.getAttribute(\"filter\"),this);\n\t// Refresh if the filter has changed, or the list of tiddlers has changed, or any of the tiddlers in the list has changed\n\tfunction haveListedTiddlersChanged() {\n\t\tvar changed = false;\n\t\ttiddlerList.forEach(function(title) {\n\t\t\tif(changedTiddlers[title]) {\n\t\t\t\tchanged = true;\n\t\t\t}\n\t\t});\n\t\treturn changed;\n\t}\n\tif(changedAttributes.filter || !$tw.utils.isArrayEqual(this.tiddlerList,tiddlerList) || haveListedTiddlersChanged()) {\n\t\t// Compute the filter\n\t\tthis.removeChildDomNodes();\n\t\tthis.execute(tiddlerList);\n\t\tthis.renderChildren(this.parentDomNode,this.findNextSiblingDomNode());\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.importvariables = ImportVariablesWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/importvariables.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/keyboard.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/keyboard.js\ntype: application/javascript\nmodule-type: widget\n\nKeyboard shortcut widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar KeyboardWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nKeyboardWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nKeyboardWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create element\n\tvar domNode = this.document.createElement(\"div\");\n\t// Assign classes\n\tvar classes = (this[\"class\"] || \"\").split(\" \");\n\tclasses.push(\"tc-keyboard\");\n\tdomNode.className = classes.join(\" \");\n\t// Add a keyboard event handler\n\tdomNode.addEventListener(\"keydown\",function (event) {\n\t\tif($tw.keyboardManager.checkKeyDescriptors(event,self.keyInfoArray)) {\n\t\t\tself.invokeActions(self,event);\n\t\t\tif(self.actions) {\n\t\t\t\tself.invokeActionString(self.actions,self,event);\n\t\t\t}\n\t\t\tself.dispatchMessage(event);\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t},false);\n\t// Insert element\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nKeyboardWidget.prototype.dispatchMessage = function(event) {\n\tthis.dispatchEvent({type: this.message, param: this.param, tiddlerTitle: this.getVariable(\"currentTiddler\")});\n};\n\n/*\nCompute the internal state of the widget\n*/\nKeyboardWidget.prototype.execute = function() {\n\t// Get attributes\n\tthis.actions = this.getAttribute(\"actions\");\n\tthis.message = this.getAttribute(\"message\");\n\tthis.param = this.getAttribute(\"param\");\n\tthis.key = this.getAttribute(\"key\");\n\tthis.keyInfoArray = $tw.keyboardManager.parseKeyDescriptors(this.key);\n\tthis[\"class\"] = this.getAttribute(\"class\");\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nKeyboardWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.message || changedAttributes.param || changedAttributes.key || changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.keyboard = KeyboardWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/keyboard.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/link.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/link.js\ntype: application/javascript\nmodule-type: widget\n\nLink widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\nvar MISSING_LINK_CONFIG_TITLE = \"$:/config/MissingLinks\";\n\nvar LinkWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nLinkWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nLinkWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Get the value of the tv-wikilinks configuration macro\n\tvar wikiLinksMacro = this.getVariable(\"tv-wikilinks\"),\n\t\tuseWikiLinks = wikiLinksMacro ? (wikiLinksMacro.trim() !== \"no\") : true,\n\t\tmissingLinksEnabled = !(this.hideMissingLinks && this.isMissing && !this.isShadow);\n\t// Render the link if required\n\tif(useWikiLinks && missingLinksEnabled) {\n\t\tthis.renderLink(parent,nextSibling);\n\t} else {\n\t\t// Just insert the link text\n\t\tvar domNode = this.document.createElement(\"span\");\n\t\tparent.insertBefore(domNode,nextSibling);\n\t\tthis.renderChildren(domNode,null);\n\t\tthis.domNodes.push(domNode);\n\t}\n};\n\n/*\nRender this widget into the DOM\n*/\nLinkWidget.prototype.renderLink = function(parent,nextSibling) {\n\tvar self = this;\n\t// Sanitise the specified tag\n\tvar tag = this.linkTag;\n\tif($tw.config.htmlUnsafeElements.indexOf(tag) !== -1) {\n\t\ttag = \"a\";\n\t}\n\t// Create our element\n\tvar domNode = this.document.createElement(tag);\n\t// Assign classes\n\tvar classes = [];\n\tif(this.linkClasses) {\n\t\tclasses.push(this.linkClasses);\n\t}\n\tclasses.push(\"tc-tiddlylink\");\n\tif(this.isShadow) {\n\t\tclasses.push(\"tc-tiddlylink-shadow\");\n\t}\n\tif(this.isMissing && !this.isShadow) {\n\t\tclasses.push(\"tc-tiddlylink-missing\");\n\t} else {\n\t\tif(!this.isMissing) {\n\t\t\tclasses.push(\"tc-tiddlylink-resolves\");\n\t\t}\n\t}\n\tdomNode.setAttribute(\"class\",classes.join(\" \"));\n\t// Set an href\n\tvar wikiLinkTemplateMacro = this.getVariable(\"tv-wikilink-template\"),\n\t\twikiLinkTemplate = wikiLinkTemplateMacro ? wikiLinkTemplateMacro.trim() : \"#$uri_encoded$\",\n\t\twikiLinkText = wikiLinkTemplate.replace(\"$uri_encoded$\",encodeURIComponent(this.to));\n\twikiLinkText = wikiLinkText.replace(\"$uri_doubleencoded$\",encodeURIComponent(encodeURIComponent(this.to)));\n\twikiLinkText = this.getVariable(\"tv-get-export-link\",{params: [{name: \"to\",value: this.to}],defaultValue: wikiLinkText});\n\tif(tag === \"a\") {\n\t\tdomNode.setAttribute(\"href\",wikiLinkText);\n\t}\n\tif(this.tabIndex) {\n\t\tdomNode.setAttribute(\"tabindex\",this.tabIndex);\n\t}\n\t// Set the tooltip\n\t// HACK: Performance issues with re-parsing the tooltip prevent us defaulting the tooltip to \"<$transclude field='tooltip'><$transclude field='title'/></$transclude>\"\n\tvar tooltipWikiText = this.tooltip || this.getVariable(\"tv-wikilink-tooltip\");\n\tif(tooltipWikiText) {\n\t\tvar tooltipText = this.wiki.renderText(\"text/plain\",\"text/vnd.tiddlywiki\",tooltipWikiText,{\n\t\t\t\tparseAsInline: true,\n\t\t\t\tvariables: {\n\t\t\t\t\tcurrentTiddler: this.to\n\t\t\t\t},\n\t\t\t\tparentWidget: this\n\t\t\t});\n\t\tdomNode.setAttribute(\"title\",tooltipText);\n\t}\n\tif(this[\"aria-label\"]) {\n\t\tdomNode.setAttribute(\"aria-label\",this[\"aria-label\"]);\n\t}\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(domNode,[\n\t\t{name: \"click\", handlerObject: this, handlerMethod: \"handleClickEvent\"},\n\t]);\n\tif(this.draggable === \"yes\") {\n\t\t$tw.utils.addEventListeners(domNode,[\n\t\t\t{name: \"dragstart\", handlerObject: this, handlerMethod: \"handleDragStartEvent\"},\n\t\t\t{name: \"dragend\", handlerObject: this, handlerMethod: \"handleDragEndEvent\"}\n\t\t]);\n\t}\n\t// Insert the link into the DOM and render any children\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nLinkWidget.prototype.handleClickEvent = function(event) {\n\t// Send the click on its way as a navigate event\n\tvar bounds = this.domNodes[0].getBoundingClientRect();\n\tthis.dispatchEvent({\n\t\ttype: \"tm-navigate\",\n\t\tnavigateTo: this.to,\n\t\tnavigateFromTitle: this.getVariable(\"storyTiddler\"),\n\t\tnavigateFromNode: this,\n\t\tnavigateFromClientRect: { top: bounds.top, left: bounds.left, width: bounds.width, right: bounds.right, bottom: bounds.bottom, height: bounds.height\n\t\t},\n\t\tnavigateSuppressNavigation: event.metaKey || event.ctrlKey || (event.button === 1)\n\t});\n\tif(this.domNodes[0].hasAttribute(\"href\")) {\n\t\tevent.preventDefault();\n\t}\n\tevent.stopPropagation();\n\treturn false;\n};\n\nLinkWidget.prototype.handleDragStartEvent = function(event) {\n\tif(event.target === this.domNodes[0]) {\n\t\tif(this.to) {\n\t\t\t$tw.dragInProgress = true;\n\t\t\t// Set the dragging class on the element being dragged\n\t\t\t$tw.utils.addClass(event.target,\"tc-tiddlylink-dragging\");\n\t\t\t// Create the drag image elements\n\t\t\tthis.dragImage = this.document.createElement(\"div\");\n\t\t\tthis.dragImage.className = \"tc-tiddler-dragger\";\n\t\t\tvar inner = this.document.createElement(\"div\");\n\t\t\tinner.className = \"tc-tiddler-dragger-inner\";\n\t\t\tinner.appendChild(this.document.createTextNode(this.to));\n\t\t\tthis.dragImage.appendChild(inner);\n\t\t\tthis.document.body.appendChild(this.dragImage);\n\t\t\t// Astoundingly, we need to cover the dragger up: http://www.kryogenix.org/code/browser/custom-drag-image.html\n\t\t\tvar cover = this.document.createElement(\"div\");\n\t\t\tcover.className = \"tc-tiddler-dragger-cover\";\n\t\t\tcover.style.left = (inner.offsetLeft - 16) + \"px\";\n\t\t\tcover.style.top = (inner.offsetTop - 16) + \"px\";\n\t\t\tcover.style.width = (inner.offsetWidth + 32) + \"px\";\n\t\t\tcover.style.height = (inner.offsetHeight + 32) + \"px\";\n\t\t\tthis.dragImage.appendChild(cover);\n\t\t\t// Set the data transfer properties\n\t\t\tvar dataTransfer = event.dataTransfer;\n\t\t\t// First the image\n\t\t\tdataTransfer.effectAllowed = \"copy\";\n\t\t\tif(dataTransfer.setDragImage) {\n\t\t\t\tdataTransfer.setDragImage(this.dragImage.firstChild,-16,-16);\n\t\t\t}\n\t\t\t// Then the data\n\t\t\tdataTransfer.clearData();\n\t\t\tvar jsonData = this.wiki.getTiddlerAsJson(this.to),\n\t\t\t\ttextData = this.wiki.getTiddlerText(this.to,\"\"),\n\t\t\t\ttitle = (new RegExp(\"^\" + $tw.config.textPrimitives.wikiLink + \"$\",\"mg\")).exec(this.to) ? this.to : \"[[\" + this.to + \"]]\";\n\t\t\t// IE doesn't like these content types\n\t\t\tif(!$tw.browser.isIE) {\n\t\t\t\tdataTransfer.setData(\"text/vnd.tiddler\",jsonData);\n\t\t\t\tdataTransfer.setData(\"text/plain\",title);\n\t\t\t\tdataTransfer.setData(\"text/x-moz-url\",\"data:text/vnd.tiddler,\" + encodeURIComponent(jsonData));\n\t\t\t}\n\t\t\tdataTransfer.setData(\"URL\",\"data:text/vnd.tiddler,\" + encodeURIComponent(jsonData));\n\t\t\tdataTransfer.setData(\"Text\",title);\n\t\t\tevent.stopPropagation();\n\t\t} else {\n\t\t\tevent.preventDefault();\n\t\t}\n\t}\n};\n\nLinkWidget.prototype.handleDragEndEvent = function(event) {\n\tif(event.target === this.domNodes[0]) {\n\t\t$tw.dragInProgress = false;\n\t\t// Remove the dragging class on the element being dragged\n\t\t$tw.utils.removeClass(event.target,\"tc-tiddlylink-dragging\");\n\t\t// Delete the drag image element\n\t\tif(this.dragImage) {\n\t\t\tthis.dragImage.parentNode.removeChild(this.dragImage);\n\t\t}\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nLinkWidget.prototype.execute = function() {\n\t// Pick up our attributes\n\tthis.to = this.getAttribute(\"to\",this.getVariable(\"currentTiddler\"));\n\tthis.tooltip = this.getAttribute(\"tooltip\");\n\tthis[\"aria-label\"] = this.getAttribute(\"aria-label\");\n\tthis.linkClasses = this.getAttribute(\"class\");\n\tthis.tabIndex = this.getAttribute(\"tabindex\");\n\tthis.draggable = this.getAttribute(\"draggable\",\"yes\");\n\tthis.linkTag = this.getAttribute(\"tag\",\"a\");\n\t// Determine the link characteristics\n\tthis.isMissing = !this.wiki.tiddlerExists(this.to);\n\tthis.isShadow = this.wiki.isShadowTiddler(this.to);\n\tthis.hideMissingLinks = ($tw.wiki.getTiddlerText(MISSING_LINK_CONFIG_TITLE,\"yes\") === \"no\");\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nLinkWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.to || changedTiddlers[this.to] || changedAttributes[\"aria-label\"] || changedAttributes.tooltip || changedTiddlers[MISSING_LINK_CONFIG_TITLE]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.link = LinkWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/link.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/linkcatcher.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/linkcatcher.js\ntype: application/javascript\nmodule-type: widget\n\nLinkcatcher widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar LinkCatcherWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.addEventListeners([\n\t\t{type: \"tm-navigate\", handler: \"handleNavigateEvent\"}\n\t]);\n};\n\n/*\nInherit from the base widget class\n*/\nLinkCatcherWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nLinkCatcherWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nLinkCatcherWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.catchTo = this.getAttribute(\"to\");\n\tthis.catchMessage = this.getAttribute(\"message\");\n\tthis.catchSet = this.getAttribute(\"set\");\n\tthis.catchSetTo = this.getAttribute(\"setTo\");\n\tthis.catchActions = this.getAttribute(\"actions\");\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nLinkCatcherWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.to || changedAttributes.message || changedAttributes.set || changedAttributes.setTo) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\n/*\nHandle a tm-navigate event\n*/\nLinkCatcherWidget.prototype.handleNavigateEvent = function(event) {\n\tif(this.catchTo) {\n\t\tthis.wiki.setTextReference(this.catchTo,event.navigateTo,this.getVariable(\"currentTiddler\"));\n\t}\n\tif(this.catchMessage && this.parentWidget) {\n\t\tthis.parentWidget.dispatchEvent({\n\t\t\ttype: this.catchMessage,\n\t\t\tparam: event.navigateTo,\n\t\t\tnavigateTo: event.navigateTo\n\t\t});\n\t}\n\tif(this.catchSet) {\n\t\tvar tiddler = this.wiki.getTiddler(this.catchSet);\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(tiddler,{title: this.catchSet, text: this.catchSetTo}));\n\t}\n\tif(this.catchActions) {\n\t\tthis.invokeActionString(this.catchActions,this);\n\t}\n\treturn false;\n};\n\nexports.linkcatcher = LinkCatcherWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/linkcatcher.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/list.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/list.js\ntype: application/javascript\nmodule-type: widget\n\nList and list item widgets\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\n/*\nThe list widget creates list element sub-widgets that reach back into the list widget for their configuration\n*/\n\nvar ListWidget = function(parseTreeNode,options) {\n\t// Initialise the storyviews if they've not been done already\n\tif(!this.storyViews) {\n\t\tListWidget.prototype.storyViews = {};\n\t\t$tw.modules.applyMethods(\"storyview\",this.storyViews);\n\t}\n\t// Main initialisation inherited from widget.js\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nListWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nListWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n\t// Construct the storyview\n\tvar StoryView = this.storyViews[this.storyViewName];\n\tif(StoryView && !this.document.isTiddlyWikiFakeDom) {\n\t\tthis.storyview = new StoryView(this);\n\t} else {\n\t\tthis.storyview = null;\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nListWidget.prototype.execute = function() {\n\t// Get our attributes\n\tthis.template = this.getAttribute(\"template\");\n\tthis.editTemplate = this.getAttribute(\"editTemplate\");\n\tthis.variableName = this.getAttribute(\"variable\",\"currentTiddler\");\n\tthis.storyViewName = this.getAttribute(\"storyview\");\n\tthis.historyTitle = this.getAttribute(\"history\");\n\t// Compose the list elements\n\tthis.list = this.getTiddlerList();\n\tvar members = [],\n\t\tself = this;\n\t// Check for an empty list\n\tif(this.list.length === 0) {\n\t\tmembers = this.getEmptyMessage();\n\t} else {\n\t\t$tw.utils.each(this.list,function(title,index) {\n\t\t\tmembers.push(self.makeItemTemplate(title));\n\t\t});\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(members);\n\t// Clear the last history\n\tthis.history = [];\n};\n\nListWidget.prototype.getTiddlerList = function() {\n\tvar defaultFilter = \"[!is[system]sort[title]]\";\n\treturn this.wiki.filterTiddlers(this.getAttribute(\"filter\",defaultFilter),this);\n};\n\nListWidget.prototype.getEmptyMessage = function() {\n\tvar emptyMessage = this.getAttribute(\"emptyMessage\",\"\"),\n\t\tparser = this.wiki.parseText(\"text/vnd.tiddlywiki\",emptyMessage,{parseAsInline: true});\n\tif(parser) {\n\t\treturn parser.tree;\n\t} else {\n\t\treturn [];\n\t}\n};\n\n/*\nCompose the template for a list item\n*/\nListWidget.prototype.makeItemTemplate = function(title) {\n\t// Check if the tiddler is a draft\n\tvar tiddler = this.wiki.getTiddler(title),\n\t\tisDraft = tiddler && tiddler.hasField(\"draft.of\"),\n\t\ttemplate = this.template,\n\t\ttemplateTree;\n\tif(isDraft && this.editTemplate) {\n\t\ttemplate = this.editTemplate;\n\t}\n\t// Compose the transclusion of the template\n\tif(template) {\n\t\ttemplateTree = [{type: \"transclude\", attributes: {tiddler: {type: \"string\", value: template}}}];\n\t} else {\n\t\tif(this.parseTreeNode.children && this.parseTreeNode.children.length > 0) {\n\t\t\ttemplateTree = this.parseTreeNode.children;\n\t\t} else {\n\t\t\t// Default template is a link to the title\n\t\t\ttemplateTree = [{type: \"element\", tag: this.parseTreeNode.isBlock ? \"div\" : \"span\", children: [{type: \"link\", attributes: {to: {type: \"string\", value: title}}, children: [\n\t\t\t\t\t{type: \"text\", text: title}\n\t\t\t]}]}];\n\t\t}\n\t}\n\t// Return the list item\n\treturn {type: \"listitem\", itemTitle: title, variableName: this.variableName, children: templateTree};\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nListWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes(),\n\t\tresult;\n\t// Call the storyview\n\tif(this.storyview && this.storyview.refreshStart) {\n\t\tthis.storyview.refreshStart(changedTiddlers,changedAttributes);\n\t}\n\t// Completely refresh if any of our attributes have changed\n\tif(changedAttributes.filter || changedAttributes.template || changedAttributes.editTemplate || changedAttributes.emptyMessage || changedAttributes.storyview || changedAttributes.history) {\n\t\tthis.refreshSelf();\n\t\tresult = true;\n\t} else {\n\t\t// Handle any changes to the list\n\t\tresult = this.handleListChanges(changedTiddlers);\n\t\t// Handle any changes to the history stack\n\t\tif(this.historyTitle && changedTiddlers[this.historyTitle]) {\n\t\t\tthis.handleHistoryChanges();\n\t\t}\n\t}\n\t// Call the storyview\n\tif(this.storyview && this.storyview.refreshEnd) {\n\t\tthis.storyview.refreshEnd(changedTiddlers,changedAttributes);\n\t}\n\treturn result;\n};\n\n/*\nHandle any changes to the history list\n*/\nListWidget.prototype.handleHistoryChanges = function() {\n\t// Get the history data\n\tvar newHistory = this.wiki.getTiddlerDataCached(this.historyTitle,[]);\n\t// Ignore any entries of the history that match the previous history\n\tvar entry = 0;\n\twhile(entry < newHistory.length && entry < this.history.length && newHistory[entry].title === this.history[entry].title) {\n\t\tentry++;\n\t}\n\t// Navigate forwards to each of the new tiddlers\n\twhile(entry < newHistory.length) {\n\t\tif(this.storyview && this.storyview.navigateTo) {\n\t\t\tthis.storyview.navigateTo(newHistory[entry]);\n\t\t}\n\t\tentry++;\n\t}\n\t// Update the history\n\tthis.history = newHistory;\n};\n\n/*\nProcess any changes to the list\n*/\nListWidget.prototype.handleListChanges = function(changedTiddlers) {\n\t// Get the new list\n\tvar prevList = this.list;\n\tthis.list = this.getTiddlerList();\n\t// Check for an empty list\n\tif(this.list.length === 0) {\n\t\t// Check if it was empty before\n\t\tif(prevList.length === 0) {\n\t\t\t// If so, just refresh the empty message\n\t\t\treturn this.refreshChildren(changedTiddlers);\n\t\t} else {\n\t\t\t// Replace the previous content with the empty message\n\t\t\tfor(t=this.children.length-1; t>=0; t--) {\n\t\t\t\tthis.removeListItem(t);\n\t\t\t}\n\t\t\tvar nextSibling = this.findNextSiblingDomNode();\n\t\t\tthis.makeChildWidgets(this.getEmptyMessage());\n\t\t\tthis.renderChildren(this.parentDomNode,nextSibling);\n\t\t\treturn true;\n\t\t}\n\t} else {\n\t\t// If the list was empty then we need to remove the empty message\n\t\tif(prevList.length === 0) {\n\t\t\tthis.removeChildDomNodes();\n\t\t\tthis.children = [];\n\t\t}\n\t\t// Cycle through the list, inserting and removing list items as needed\n\t\tvar hasRefreshed = false;\n\t\tfor(var t=0; t<this.list.length; t++) {\n\t\t\tvar index = this.findListItem(t,this.list[t]);\n\t\t\tif(index === undefined) {\n\t\t\t\t// The list item must be inserted\n\t\t\t\tthis.insertListItem(t,this.list[t]);\n\t\t\t\thasRefreshed = true;\n\t\t\t} else {\n\t\t\t\t// There are intervening list items that must be removed\n\t\t\t\tfor(var n=index-1; n>=t; n--) {\n\t\t\t\t\tthis.removeListItem(n);\n\t\t\t\t\thasRefreshed = true;\n\t\t\t\t}\n\t\t\t\t// Refresh the item we're reusing\n\t\t\t\tvar refreshed = this.children[t].refresh(changedTiddlers);\n\t\t\t\thasRefreshed = hasRefreshed || refreshed;\n\t\t\t}\n\t\t}\n\t\t// Remove any left over items\n\t\tfor(t=this.children.length-1; t>=this.list.length; t--) {\n\t\t\tthis.removeListItem(t);\n\t\t\thasRefreshed = true;\n\t\t}\n\t\treturn hasRefreshed;\n\t}\n};\n\n/*\nFind the list item with a given title, starting from a specified position\n*/\nListWidget.prototype.findListItem = function(startIndex,title) {\n\twhile(startIndex < this.children.length) {\n\t\tif(this.children[startIndex].parseTreeNode.itemTitle === title) {\n\t\t\treturn startIndex;\n\t\t}\n\t\tstartIndex++;\n\t}\n\treturn undefined;\n};\n\n/*\nInsert a new list item at the specified index\n*/\nListWidget.prototype.insertListItem = function(index,title) {\n\t// Create, insert and render the new child widgets\n\tvar widget = this.makeChildWidget(this.makeItemTemplate(title));\n\twidget.parentDomNode = this.parentDomNode; // Hack to enable findNextSiblingDomNode() to work\n\tthis.children.splice(index,0,widget);\n\tvar nextSibling = widget.findNextSiblingDomNode();\n\twidget.render(this.parentDomNode,nextSibling);\n\t// Animate the insertion if required\n\tif(this.storyview && this.storyview.insert) {\n\t\tthis.storyview.insert(widget);\n\t}\n\treturn true;\n};\n\n/*\nRemove the specified list item\n*/\nListWidget.prototype.removeListItem = function(index) {\n\tvar widget = this.children[index];\n\t// Animate the removal if required\n\tif(this.storyview && this.storyview.remove) {\n\t\tthis.storyview.remove(widget);\n\t} else {\n\t\twidget.removeChildDomNodes();\n\t}\n\t// Remove the child widget\n\tthis.children.splice(index,1);\n};\n\nexports.list = ListWidget;\n\nvar ListItemWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nListItemWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nListItemWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nListItemWidget.prototype.execute = function() {\n\t// Set the current list item title\n\tthis.setVariable(this.parseTreeNode.variableName,this.parseTreeNode.itemTitle);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nListItemWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.listitem = ListItemWidget;\n\n})();",
            "title": "$:/core/modules/widgets/list.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/macrocall.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/macrocall.js\ntype: application/javascript\nmodule-type: widget\n\nMacrocall widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar MacroCallWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nMacroCallWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nMacroCallWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nMacroCallWidget.prototype.execute = function() {\n\t// Get the parse type if specified\n\tthis.parseType = this.getAttribute(\"$type\",\"text/vnd.tiddlywiki\");\n\tthis.renderOutput = this.getAttribute(\"$output\",\"text/html\");\n\t// Merge together the parameters specified in the parse tree with the specified attributes\n\tvar params = this.parseTreeNode.params ? this.parseTreeNode.params.slice(0) : [];\n\t$tw.utils.each(this.attributes,function(attribute,name) {\n\t\tif(name.charAt(0) !== \"$\") {\n\t\t\tparams.push({name: name, value: attribute});\t\t\t\n\t\t}\n\t});\n\t// Get the macro value\n\tvar text = this.getVariable(this.parseTreeNode.name || this.getAttribute(\"$name\"),{params: params}),\n\t\tparseTreeNodes;\n\t// Are we rendering to HTML?\n\tif(this.renderOutput === \"text/html\") {\n\t\t// If so we'll return the parsed macro\n\t\tvar parser = this.wiki.parseText(this.parseType,text,\n\t\t\t\t\t\t\t{parseAsInline: !this.parseTreeNode.isBlock});\n\t\tparseTreeNodes = parser ? parser.tree : [];\n\t} else {\n\t\t// Otherwise, we'll render the text\n\t\tvar plainText = this.wiki.renderText(\"text/plain\",this.parseType,text,{parentWidget: this});\n\t\tparseTreeNodes = [{type: \"text\", text: plainText}];\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(parseTreeNodes);\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nMacroCallWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif($tw.utils.count(changedAttributes) > 0) {\n\t\t// Rerender ourselves\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.macrocall = MacroCallWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/macrocall.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/navigator.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/navigator.js\ntype: application/javascript\nmodule-type: widget\n\nNavigator widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar IMPORT_TITLE = \"$:/Import\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar NavigatorWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.addEventListeners([\n\t\t{type: \"tm-navigate\", handler: \"handleNavigateEvent\"},\n\t\t{type: \"tm-edit-tiddler\", handler: \"handleEditTiddlerEvent\"},\n\t\t{type: \"tm-delete-tiddler\", handler: \"handleDeleteTiddlerEvent\"},\n\t\t{type: \"tm-save-tiddler\", handler: \"handleSaveTiddlerEvent\"},\n\t\t{type: \"tm-cancel-tiddler\", handler: \"handleCancelTiddlerEvent\"},\n\t\t{type: \"tm-close-tiddler\", handler: \"handleCloseTiddlerEvent\"},\n\t\t{type: \"tm-close-all-tiddlers\", handler: \"handleCloseAllTiddlersEvent\"},\n\t\t{type: \"tm-close-other-tiddlers\", handler: \"handleCloseOtherTiddlersEvent\"},\n\t\t{type: \"tm-new-tiddler\", handler: \"handleNewTiddlerEvent\"},\n\t\t{type: \"tm-import-tiddlers\", handler: \"handleImportTiddlersEvent\"},\n\t\t{type: \"tm-perform-import\", handler: \"handlePerformImportEvent\"},\n\t\t{type: \"tm-fold-tiddler\", handler: \"handleFoldTiddlerEvent\"},\n\t\t{type: \"tm-fold-other-tiddlers\", handler: \"handleFoldOtherTiddlersEvent\"},\n\t\t{type: \"tm-fold-all-tiddlers\", handler: \"handleFoldAllTiddlersEvent\"},\n\t\t{type: \"tm-unfold-all-tiddlers\", handler: \"handleUnfoldAllTiddlersEvent\"},\n\t\t{type: \"tm-rename-tiddler\", handler: \"handleRenameTiddlerEvent\"}\n\t]);\n};\n\n/*\nInherit from the base widget class\n*/\nNavigatorWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nNavigatorWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nNavigatorWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.storyTitle = this.getAttribute(\"story\");\n\tthis.historyTitle = this.getAttribute(\"history\");\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nNavigatorWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.story || changedAttributes.history) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nNavigatorWidget.prototype.getStoryList = function() {\n\treturn this.storyTitle ? this.wiki.getTiddlerList(this.storyTitle) : null;\n};\n\nNavigatorWidget.prototype.saveStoryList = function(storyList) {\n\tvar storyTiddler = this.wiki.getTiddler(this.storyTitle);\n\tthis.wiki.addTiddler(new $tw.Tiddler(\n\t\t{title: this.storyTitle},\n\t\tstoryTiddler,\n\t\t{list: storyList}\n\t));\n};\n\nNavigatorWidget.prototype.removeTitleFromStory = function(storyList,title) {\n\tvar p = storyList.indexOf(title);\n\twhile(p !== -1) {\n\t\tstoryList.splice(p,1);\n\t\tp = storyList.indexOf(title);\n\t}\n};\n\nNavigatorWidget.prototype.replaceFirstTitleInStory = function(storyList,oldTitle,newTitle) {\n\tvar pos = storyList.indexOf(oldTitle);\n\tif(pos !== -1) {\n\t\tstoryList[pos] = newTitle;\n\t\tdo {\n\t\t\tpos = storyList.indexOf(oldTitle,pos + 1);\n\t\t\tif(pos !== -1) {\n\t\t\t\tstoryList.splice(pos,1);\n\t\t\t}\n\t\t} while(pos !== -1);\n\t} else {\n\t\tstoryList.splice(0,0,newTitle);\n\t}\n};\n\nNavigatorWidget.prototype.addToStory = function(title,fromTitle) {\n\tvar storyList = this.getStoryList();\n\t// Quit if we cannot get hold of the story list\n\tif(!storyList) {\n\t\treturn;\n\t}\n\t// See if the tiddler is already there\n\tvar slot = storyList.indexOf(title);\n\t// Quit if it already exists in the story river\n\tif(slot >= 0) {\n\t\treturn;\n\t}\n\t// First we try to find the position of the story element we navigated from\n\tvar fromIndex = storyList.indexOf(fromTitle);\n\tif(fromIndex >= 0) {\n\t\t// The tiddler is added from inside the river\n\t\t// Determine where to insert the tiddler; Fallback is \"below\"\n\t\tswitch(this.getAttribute(\"openLinkFromInsideRiver\",\"below\")) {\n\t\t\tcase \"top\":\n\t\t\t\tslot = 0;\n\t\t\t\tbreak;\n\t\t\tcase \"bottom\":\n\t\t\t\tslot = storyList.length;\n\t\t\t\tbreak;\n\t\t\tcase \"above\":\n\t\t\t\tslot = fromIndex;\n\t\t\t\tbreak;\n\t\t\tcase \"below\": // Intentional fall-through\n\t\t\tdefault:\n\t\t\t\tslot = fromIndex + 1;\n\t\t\t\tbreak;\n\t\t}\n\t} else {\n\t\t// The tiddler is opened from outside the river. Determine where to insert the tiddler; default is \"top\"\n\t\tif(this.getAttribute(\"openLinkFromOutsideRiver\",\"top\") === \"bottom\") {\n\t\t\t// Insert at bottom\n\t\t\tslot = storyList.length;\n\t\t} else {\n\t\t\t// Insert at top\n\t\t\tslot = 0;\n\t\t}\n\t}\n\t// Add the tiddler\n\tstoryList.splice(slot,0,title);\n\t// Save the story\n\tthis.saveStoryList(storyList);\n};\n\n/*\nAdd a new record to the top of the history stack\ntitle: a title string or an array of title strings\nfromPageRect: page coordinates of the origin of the navigation\n*/\nNavigatorWidget.prototype.addToHistory = function(title,fromPageRect) {\n\tthis.wiki.addToHistory(title,fromPageRect,this.historyTitle);\n};\n\n/*\nHandle a tm-navigate event\n*/\nNavigatorWidget.prototype.handleNavigateEvent = function(event) {\n\tif(event.navigateTo) {\n\t\tthis.addToStory(event.navigateTo,event.navigateFromTitle);\n\t\tif(!event.navigateSuppressNavigation) {\n\t\t\tthis.addToHistory(event.navigateTo,event.navigateFromClientRect);\n\t\t}\n\t}\n\treturn false;\n};\n\n// Close a specified tiddler\nNavigatorWidget.prototype.handleCloseTiddlerEvent = function(event) {\n\tvar title = event.param || event.tiddlerTitle,\n\t\tstoryList = this.getStoryList();\n\t// Look for tiddlers with this title to close\n\tthis.removeTitleFromStory(storyList,title);\n\tthis.saveStoryList(storyList);\n\treturn false;\n};\n\n// Close all tiddlers\nNavigatorWidget.prototype.handleCloseAllTiddlersEvent = function(event) {\n\tthis.saveStoryList([]);\n\treturn false;\n};\n\n// Close other tiddlers\nNavigatorWidget.prototype.handleCloseOtherTiddlersEvent = function(event) {\n\tvar title = event.param || event.tiddlerTitle;\n\tthis.saveStoryList([title]);\n\treturn false;\n};\n\n// Place a tiddler in edit mode\nNavigatorWidget.prototype.handleEditTiddlerEvent = function(event) {\n\tvar self = this;\n\tfunction isUnmodifiedShadow(title) {\n\t\treturn self.wiki.isShadowTiddler(title) && !self.wiki.tiddlerExists(title);\n\t}\n\tfunction confirmEditShadow(title) {\n\t\treturn confirm($tw.language.getString(\n\t\t\t\"ConfirmEditShadowTiddler\",\n\t\t\t{variables:\n\t\t\t\t{title: title}\n\t\t\t}\n\t\t));\n\t}\n\tvar title = event.param || event.tiddlerTitle;\n\tif(isUnmodifiedShadow(title) && !confirmEditShadow(title)) {\n\t\treturn false;\n\t}\n\t// Replace the specified tiddler with a draft in edit mode\n\tvar draftTiddler = this.makeDraftTiddler(title);\n\t// Update the story and history if required\n\tif(!event.paramObject || event.paramObject.suppressNavigation !== \"yes\") {\n\t\tvar draftTitle = draftTiddler.fields.title,\n\t\t\tstoryList = this.getStoryList();\n\t\tthis.removeTitleFromStory(storyList,draftTitle);\n\t\tthis.replaceFirstTitleInStory(storyList,title,draftTitle);\n\t\tthis.addToHistory(draftTitle,event.navigateFromClientRect);\n\t\tthis.saveStoryList(storyList);\n\t\treturn false;\n\t}\n};\n\n// Delete a tiddler\nNavigatorWidget.prototype.handleDeleteTiddlerEvent = function(event) {\n\t// Get the tiddler we're deleting\n\tvar title = event.param || event.tiddlerTitle,\n\t\ttiddler = this.wiki.getTiddler(title),\n\t\tstoryList = this.getStoryList(),\n\t\toriginalTitle = tiddler ? tiddler.fields[\"draft.of\"] : \"\",\n\t\tconfirmationTitle;\n\tif(!tiddler) {\n\t\treturn false;\n\t}\n\t// Check if the tiddler we're deleting is in draft mode\n\tif(originalTitle) {\n\t\t// If so, we'll prompt for confirmation referencing the original tiddler\n\t\tconfirmationTitle = originalTitle;\n\t} else {\n\t\t// If not a draft, then prompt for confirmation referencing the specified tiddler\n\t\tconfirmationTitle = title;\n\t}\n\t// Seek confirmation\n\tif((this.wiki.getTiddler(originalTitle) || (tiddler.fields.text || \"\") !== \"\") && !confirm($tw.language.getString(\n\t\t\t\t\"ConfirmDeleteTiddler\",\n\t\t\t\t{variables:\n\t\t\t\t\t{title: confirmationTitle}\n\t\t\t\t}\n\t\t\t))) {\n\t\treturn false;\n\t}\n\t// Delete the original tiddler\n\tif(originalTitle) {\n\t\tthis.wiki.deleteTiddler(originalTitle);\n\t\tthis.removeTitleFromStory(storyList,originalTitle);\n\t}\n\t// Delete this tiddler\n\tthis.wiki.deleteTiddler(title);\n\t// Remove the closed tiddler from the story\n\tthis.removeTitleFromStory(storyList,title);\n\tthis.saveStoryList(storyList);\n\t// Trigger an autosave\n\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n\treturn false;\n};\n\n/*\nCreate/reuse the draft tiddler for a given title\n*/\nNavigatorWidget.prototype.makeDraftTiddler = function(targetTitle) {\n\t// See if there is already a draft tiddler for this tiddler\n\tvar draftTitle = this.wiki.findDraft(targetTitle);\n\tif(draftTitle) {\n\t\treturn this.wiki.getTiddler(draftTitle);\n\t}\n\t// Get the current value of the tiddler we're editing\n\tvar tiddler = this.wiki.getTiddler(targetTitle);\n\t// Save the initial value of the draft tiddler\n\tdraftTitle = this.generateDraftTitle(targetTitle);\n\tvar draftTiddler = new $tw.Tiddler(\n\t\t\ttiddler,\n\t\t\t{\n\t\t\t\ttitle: draftTitle,\n\t\t\t\t\"draft.title\": targetTitle,\n\t\t\t\t\"draft.of\": targetTitle\n\t\t\t},\n\t\t\tthis.wiki.getModificationFields()\n\t\t);\n\tthis.wiki.addTiddler(draftTiddler);\n\treturn draftTiddler;\n};\n\n/*\nGenerate a title for the draft of a given tiddler\n*/\nNavigatorWidget.prototype.generateDraftTitle = function(title) {\n\tvar c = 0,\n\t\tdraftTitle;\n\tdo {\n\t\tdraftTitle = \"Draft \" + (c ? (c + 1) + \" \" : \"\") + \"of '\" + title + \"'\";\n\t\tc++;\n\t} while(this.wiki.tiddlerExists(draftTitle));\n\treturn draftTitle;\n};\n\n// Take a tiddler out of edit mode, saving the changes\nNavigatorWidget.prototype.handleSaveTiddlerEvent = function(event) {\n\tvar title = event.param || event.tiddlerTitle,\n\t\ttiddler = this.wiki.getTiddler(title),\n\t\tstoryList = this.getStoryList();\n\t// Replace the original tiddler with the draft\n\tif(tiddler) {\n\t\tvar draftTitle = (tiddler.fields[\"draft.title\"] || \"\").trim(),\n\t\t\tdraftOf = (tiddler.fields[\"draft.of\"] || \"\").trim();\n\t\tif(draftTitle) {\n\t\t\tvar isRename = draftOf !== draftTitle,\n\t\t\t\tisConfirmed = true;\n\t\t\tif(isRename && this.wiki.tiddlerExists(draftTitle)) {\n\t\t\t\tisConfirmed = confirm($tw.language.getString(\n\t\t\t\t\t\"ConfirmOverwriteTiddler\",\n\t\t\t\t\t{variables:\n\t\t\t\t\t\t{title: draftTitle}\n\t\t\t\t\t}\n\t\t\t\t));\n\t\t\t}\n\t\t\tif(isConfirmed) {\n\t\t\t\t// Create the new tiddler and pass it through the th-saving-tiddler hook\n\t\t\t\tvar newTiddler = new $tw.Tiddler(this.wiki.getCreationFields(),tiddler,{\n\t\t\t\t\ttitle: draftTitle,\n\t\t\t\t\t\"draft.title\": undefined,\n\t\t\t\t\t\"draft.of\": undefined\n\t\t\t\t},this.wiki.getModificationFields());\n\t\t\t\tnewTiddler = $tw.hooks.invokeHook(\"th-saving-tiddler\",newTiddler);\n\t\t\t\tthis.wiki.addTiddler(newTiddler);\n\t\t\t\t// Remove the draft tiddler\n\t\t\t\tthis.wiki.deleteTiddler(title);\n\t\t\t\t// Remove the original tiddler if we're renaming it\n\t\t\t\tif(isRename) {\n\t\t\t\t\tthis.wiki.deleteTiddler(draftOf);\n\t\t\t\t}\n\t\t\t\tif(!event.paramObject || event.paramObject.suppressNavigation !== \"yes\") {\n\t\t\t\t\t// Replace the draft in the story with the original\n\t\t\t\t\tthis.replaceFirstTitleInStory(storyList,title,draftTitle);\n\t\t\t\t\tthis.addToHistory(draftTitle,event.navigateFromClientRect);\n\t\t\t\t\tif(draftTitle !== this.storyTitle) {\n\t\t\t\t\t\tthis.saveStoryList(storyList);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Trigger an autosave\n\t\t\t\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n};\n\n// Take a tiddler out of edit mode without saving the changes\nNavigatorWidget.prototype.handleCancelTiddlerEvent = function(event) {\n\t// Flip the specified tiddler from draft back to the original\n\tvar draftTitle = event.param || event.tiddlerTitle,\n\t\tdraftTiddler = this.wiki.getTiddler(draftTitle),\n\t\toriginalTitle = draftTiddler && draftTiddler.fields[\"draft.of\"];\n\tif(draftTiddler && originalTitle) {\n\t\t// Ask for confirmation if the tiddler text has changed\n\t\tvar isConfirmed = true,\n\t\t\toriginalTiddler = this.wiki.getTiddler(originalTitle),\n\t\t\tstoryList = this.getStoryList();\n\t\tif(this.wiki.isDraftModified(draftTitle)) {\n\t\t\tisConfirmed = confirm($tw.language.getString(\n\t\t\t\t\"ConfirmCancelTiddler\",\n\t\t\t\t{variables:\n\t\t\t\t\t{title: draftTitle}\n\t\t\t\t}\n\t\t\t));\n\t\t}\n\t\t// Remove the draft tiddler\n\t\tif(isConfirmed) {\n\t\t\tthis.wiki.deleteTiddler(draftTitle);\n\t\t\tif(!event.paramObject || event.paramObject.suppressNavigation !== \"yes\") {\n\t\t\t\tif(originalTiddler) {\n\t\t\t\t\tthis.replaceFirstTitleInStory(storyList,draftTitle,originalTitle);\n\t\t\t\t\tthis.addToHistory(originalTitle,event.navigateFromClientRect);\n\t\t\t\t} else {\n\t\t\t\t\tthis.removeTitleFromStory(storyList,draftTitle);\n\t\t\t\t}\n\t\t\t\tthis.saveStoryList(storyList);\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n};\n\n// Create a new draft tiddler\n// event.param can either be the title of a template tiddler, or a hashmap of fields.\n//\n// The title of the newly created tiddler follows these rules:\n// * If a hashmap was used and a title field was specified, use that title\n// * If a hashmap was used without a title field, use a default title, if necessary making it unique with a numeric suffix\n// * If a template tiddler was used, use the title of the template, if necessary making it unique with a numeric suffix\n//\n// If a draft of the target tiddler already exists then it is reused\nNavigatorWidget.prototype.handleNewTiddlerEvent = function(event) {\n\t// Get the story details\n\tvar storyList = this.getStoryList(),\n\t\ttemplateTiddler, additionalFields, title, draftTitle, existingTiddler;\n\t// Get the template tiddler (if any)\n\tif(typeof event.param === \"string\") {\n\t\t// Get the template tiddler\n\t\ttemplateTiddler = this.wiki.getTiddler(event.param);\n\t\t// Generate a new title\n\t\ttitle = this.wiki.generateNewTitle(event.param || $tw.language.getString(\"DefaultNewTiddlerTitle\"));\n\t}\n\t// Get the specified additional fields\n\tif(typeof event.paramObject === \"object\") {\n\t\tadditionalFields = event.paramObject;\n\t}\n\tif(typeof event.param === \"object\") { // Backwards compatibility with 5.1.3\n\t\tadditionalFields = event.param;\n\t}\n\tif(additionalFields && additionalFields.title) {\n\t\ttitle = additionalFields.title;\n\t}\n\t// Generate a title if we don't have one\n\ttitle = title || this.wiki.generateNewTitle($tw.language.getString(\"DefaultNewTiddlerTitle\"));\n\t// Find any existing draft for this tiddler\n\tdraftTitle = this.wiki.findDraft(title);\n\t// Pull in any existing tiddler\n\tif(draftTitle) {\n\t\texistingTiddler = this.wiki.getTiddler(draftTitle);\n\t} else {\n\t\tdraftTitle = this.generateDraftTitle(title);\n\t\texistingTiddler = this.wiki.getTiddler(title);\n\t}\n\t// Merge the tags\n\tvar mergedTags = [];\n\tif(existingTiddler && existingTiddler.fields.tags) {\n\t\t$tw.utils.pushTop(mergedTags,existingTiddler.fields.tags)\n\t}\n\tif(additionalFields && additionalFields.tags) {\n\t\t// Merge tags\n\t\tmergedTags = $tw.utils.pushTop(mergedTags,$tw.utils.parseStringArray(additionalFields.tags));\n\t}\n\tif(templateTiddler && templateTiddler.fields.tags) {\n\t\t// Merge tags\n\t\tmergedTags = $tw.utils.pushTop(mergedTags,templateTiddler.fields.tags);\n\t}\n\t// Save the draft tiddler\n\tvar draftTiddler = new $tw.Tiddler({\n\t\t\ttext: \"\",\n\t\t\t\"draft.title\": title\n\t\t},\n\t\ttemplateTiddler,\n\t\texistingTiddler,\n\t\tadditionalFields,\n\t\tthis.wiki.getCreationFields(),\n\t\t{\n\t\t\ttitle: draftTitle,\n\t\t\t\"draft.of\": title,\n\t\t\ttags: mergedTags\n\t\t},this.wiki.getModificationFields());\n\tthis.wiki.addTiddler(draftTiddler);\n\t// Update the story to insert the new draft at the top and remove any existing tiddler\n\tif(storyList.indexOf(draftTitle) === -1) {\n\t\tvar slot = storyList.indexOf(event.navigateFromTitle);\n\t\tstoryList.splice(slot + 1,0,draftTitle);\n\t}\n\tif(storyList.indexOf(title) !== -1) {\n\t\tstoryList.splice(storyList.indexOf(title),1);\t\t\n\t}\n\tthis.saveStoryList(storyList);\n\t// Add a new record to the top of the history stack\n\tthis.addToHistory(draftTitle);\n\treturn false;\n};\n\n// Import JSON tiddlers into a pending import tiddler\nNavigatorWidget.prototype.handleImportTiddlersEvent = function(event) {\n\tvar self = this;\n\t// Get the tiddlers\n\tvar tiddlers = [];\n\ttry {\n\t\ttiddlers = JSON.parse(event.param);\t\n\t} catch(e) {\n\t}\n\t// Get the current $:/Import tiddler\n\tvar importTiddler = this.wiki.getTiddler(IMPORT_TITLE),\n\t\timportData = this.wiki.getTiddlerData(IMPORT_TITLE,{}),\n\t\tnewFields = new Object({\n\t\t\ttitle: IMPORT_TITLE,\n\t\t\ttype: \"application/json\",\n\t\t\t\"plugin-type\": \"import\",\n\t\t\t\"status\": \"pending\"\n\t\t}),\n\t\tincomingTiddlers = [];\n\t// Process each tiddler\n\timportData.tiddlers = importData.tiddlers || {};\n\t$tw.utils.each(tiddlers,function(tiddlerFields) {\n\t\tvar title = tiddlerFields.title;\n\t\tif(title) {\n\t\t\tincomingTiddlers.push(title);\n\t\t\timportData.tiddlers[title] = tiddlerFields;\n\t\t}\n\t});\n\t// Give the active upgrader modules a chance to process the incoming tiddlers\n\tvar messages = this.wiki.invokeUpgraders(incomingTiddlers,importData.tiddlers);\n\t$tw.utils.each(messages,function(message,title) {\n\t\tnewFields[\"message-\" + title] = message;\n\t});\n\t// Deselect any suppressed tiddlers\n\t$tw.utils.each(importData.tiddlers,function(tiddler,title) {\n\t\tif($tw.utils.count(tiddler) === 0) {\n\t\t\tnewFields[\"selection-\" + title] = \"unchecked\";\n\t\t}\n\t});\n\t// Save the $:/Import tiddler\n\tnewFields.text = JSON.stringify(importData,null,$tw.config.preferences.jsonSpaces);\n\tthis.wiki.addTiddler(new $tw.Tiddler(importTiddler,newFields));\n\t// Update the story and history details\n\tif(this.getVariable(\"tv-auto-open-on-import\") !== \"no\") {\n\t\tvar storyList = this.getStoryList(),\n\t\t\thistory = [];\n\t\t// Add it to the story\n\t\tif(storyList.indexOf(IMPORT_TITLE) === -1) {\n\t\t\tstoryList.unshift(IMPORT_TITLE);\n\t\t}\n\t\t// And to history\n\t\thistory.push(IMPORT_TITLE);\n\t\t// Save the updated story and history\n\t\tthis.saveStoryList(storyList);\n\t\tthis.addToHistory(history);\t\t\n\t}\n\treturn false;\n};\n\n// \nNavigatorWidget.prototype.handlePerformImportEvent = function(event) {\n\tvar self = this,\n\t\timportTiddler = this.wiki.getTiddler(event.param),\n\t\timportData = this.wiki.getTiddlerDataCached(event.param,{tiddlers: {}}),\n\t\timportReport = [];\n\t// Add the tiddlers to the store\n\timportReport.push($tw.language.getString(\"Import/Imported/Hint\") + \"\\n\");\n\t$tw.utils.each(importData.tiddlers,function(tiddlerFields) {\n\t\tvar title = tiddlerFields.title;\n\t\tif(title && importTiddler && importTiddler.fields[\"selection-\" + title] !== \"unchecked\") {\n\t\t\tself.wiki.addTiddler(new $tw.Tiddler(tiddlerFields));\n\t\t\timportReport.push(\"# [[\" + tiddlerFields.title + \"]]\");\n\t\t}\n\t});\n\t// Replace the $:/Import tiddler with an import report\n\tthis.wiki.addTiddler(new $tw.Tiddler({\n\t\ttitle: event.param,\n\t\ttext: importReport.join(\"\\n\"),\n\t\t\"status\": \"complete\"\n\t}));\n\t// Navigate to the $:/Import tiddler\n\tthis.addToHistory([event.param]);\n\t// Trigger an autosave\n\t$tw.rootWidget.dispatchEvent({type: \"tm-auto-save-wiki\"});\n};\n\nNavigatorWidget.prototype.handleFoldTiddlerEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {};\n\tif(paramObject.foldedState) {\n\t\tvar foldedState = this.wiki.getTiddlerText(paramObject.foldedState,\"show\") === \"show\" ? \"hide\" : \"show\";\n\t\tthis.wiki.setText(paramObject.foldedState,\"text\",null,foldedState);\n\t}\n};\n\nNavigatorWidget.prototype.handleFoldOtherTiddlersEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tprefix = paramObject.foldedStatePrefix;\n\t$tw.utils.each(this.getStoryList(),function(title) {\n\t\tself.wiki.setText(prefix + title,\"text\",null,event.param === title ? \"show\" : \"hide\");\n\t});\n};\n\nNavigatorWidget.prototype.handleFoldAllTiddlersEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tprefix = paramObject.foldedStatePrefix;\n\t$tw.utils.each(this.getStoryList(),function(title) {\n\t\tself.wiki.setText(prefix + title,\"text\",null,\"hide\");\n\t});\n};\n\nNavigatorWidget.prototype.handleUnfoldAllTiddlersEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tprefix = paramObject.foldedStatePrefix;\n\t$tw.utils.each(this.getStoryList(),function(title) {\n\t\tself.wiki.setText(prefix + title,\"text\",null,\"show\");\n\t});\n};\n\nNavigatorWidget.prototype.handleRenameTiddlerEvent = function(event) {\n\tvar self = this,\n\t\tparamObject = event.paramObject || {},\n\t\tfrom = paramObject.from || event.tiddlerTitle,\n\t\tto = paramObject.to;\n\t$tw.wiki.renameTiddler(from,to);\n};\n\nexports.navigator = NavigatorWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/navigator.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/password.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/password.js\ntype: application/javascript\nmodule-type: widget\n\nPassword widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar PasswordWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nPasswordWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nPasswordWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Get the current password\n\tvar password = $tw.browser ? $tw.utils.getPassword(this.passwordName) || \"\" : \"\";\n\t// Create our element\n\tvar domNode = this.document.createElement(\"input\");\n\tdomNode.setAttribute(\"type\",\"password\");\n\tdomNode.setAttribute(\"value\",password);\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(domNode,[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n\t// Insert the label into the DOM and render any children\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tthis.domNodes.push(domNode);\n};\n\nPasswordWidget.prototype.handleChangeEvent = function(event) {\n\tvar password = this.domNodes[0].value;\n\treturn $tw.utils.savePassword(this.passwordName,password);\n};\n\n/*\nCompute the internal state of the widget\n*/\nPasswordWidget.prototype.execute = function() {\n\t// Get the parameters from the attributes\n\tthis.passwordName = this.getAttribute(\"name\",\"\");\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nPasswordWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.name) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.password = PasswordWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/password.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/radio.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/radio.js\ntype: application/javascript\nmodule-type: widget\n\nRadio widget\n\nWill set a field to the selected value:\n\n```\n\t<$radio field=\"myfield\" value=\"check 1\">one</$radio>\n\t<$radio field=\"myfield\" value=\"check 2\">two</$radio>\n\t<$radio field=\"myfield\" value=\"check 3\">three</$radio>\n```\n\n|Parameter |Description |h\n|tiddler |Name of the tiddler in which the field should be set. Defaults to current tiddler |\n|field |The name of the field to be set |\n|value |The value to set |\n|class |Optional class name(s) |\n\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar RadioWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nRadioWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nRadioWidget.prototype.render = function(parent,nextSibling) {\n\t// Save the parent dom node\n\tthis.parentDomNode = parent;\n\t// Compute our attributes\n\tthis.computeAttributes();\n\t// Execute our logic\n\tthis.execute();\n\t// Create our elements\n\tthis.labelDomNode = this.document.createElement(\"label\");\n\tthis.labelDomNode.setAttribute(\"class\",this.radioClass);\n\tthis.inputDomNode = this.document.createElement(\"input\");\n\tthis.inputDomNode.setAttribute(\"type\",\"radio\");\n\tif(this.getValue() == this.radioValue) {\n\t\tthis.inputDomNode.setAttribute(\"checked\",\"true\");\n\t}\n\tthis.labelDomNode.appendChild(this.inputDomNode);\n\tthis.spanDomNode = this.document.createElement(\"span\");\n\tthis.labelDomNode.appendChild(this.spanDomNode);\n\t// Add a click event handler\n\t$tw.utils.addEventListeners(this.inputDomNode,[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n\t// Insert the label into the DOM and render any children\n\tparent.insertBefore(this.labelDomNode,nextSibling);\n\tthis.renderChildren(this.spanDomNode,null);\n\tthis.domNodes.push(this.labelDomNode);\n};\n\nRadioWidget.prototype.getValue = function() {\n\tvar tiddler = this.wiki.getTiddler(this.radioTitle);\n\treturn tiddler && tiddler.getFieldString(this.radioField);\n};\n\nRadioWidget.prototype.setValue = function() {\n\tif(this.radioField) {\n\t\tvar tiddler = this.wiki.getTiddler(this.radioTitle),\n\t\t\taddition = {};\n\t\taddition[this.radioField] = this.radioValue;\n\t\tthis.wiki.addTiddler(new $tw.Tiddler(this.wiki.getCreationFields(),{title: this.radioTitle},tiddler,addition,this.wiki.getModificationFields()));\n\t}\n};\n\nRadioWidget.prototype.handleChangeEvent = function(event) {\n\tif(this.inputDomNode.checked) {\n\t\tthis.setValue();\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nRadioWidget.prototype.execute = function() {\n\t// Get the parameters from the attributes\n\tthis.radioTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.radioField = this.getAttribute(\"field\",\"text\");\n\tthis.radioValue = this.getAttribute(\"value\");\n\tthis.radioClass = this.getAttribute(\"class\",\"\");\n\tif(this.radioClass !== \"\") {\n\t\tthis.radioClass += \" \";\n\t}\n\tthis.radioClass += \"tc-radio\";\n\t// Make the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nRadioWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.value || changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\tvar refreshed = false;\n\t\tif(changedTiddlers[this.radioTitle]) {\n\t\t\tthis.inputDomNode.checked = this.getValue() === this.radioValue;\n\t\t\trefreshed = true;\n\t\t}\n\t\treturn this.refreshChildren(changedTiddlers) || refreshed;\n\t}\n};\n\nexports.radio = RadioWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/radio.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/raw.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/raw.js\ntype: application/javascript\nmodule-type: widget\n\nRaw widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar RawWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nRawWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nRawWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.execute();\n\tvar div = this.document.createElement(\"div\");\n\tdiv.innerHTML=this.parseTreeNode.html;\n\tparent.insertBefore(div,nextSibling);\n\tthis.domNodes.push(div);\t\n};\n\n/*\nCompute the internal state of the widget\n*/\nRawWidget.prototype.execute = function() {\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nRawWidget.prototype.refresh = function(changedTiddlers) {\n\treturn false;\n};\n\nexports.raw = RawWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/raw.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/reveal.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/reveal.js\ntype: application/javascript\nmodule-type: widget\n\nReveal widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar RevealWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nRevealWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nRevealWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar tag = this.parseTreeNode.isBlock ? \"div\" : \"span\";\n\tif(this.revealTag && $tw.config.htmlUnsafeElements.indexOf(this.revealTag) === -1) {\n\t\ttag = this.revealTag;\n\t}\n\tvar domNode = this.document.createElement(tag);\n\tvar classes = this[\"class\"].split(\" \") || [];\n\tclasses.push(\"tc-reveal\");\n\tdomNode.className = classes.join(\" \");\n\tif(this.style) {\n\t\tdomNode.setAttribute(\"style\",this.style);\n\t}\n\tparent.insertBefore(domNode,nextSibling);\n\tthis.renderChildren(domNode,null);\n\tif(!domNode.isTiddlyWikiFakeDom && this.type === \"popup\" && this.isOpen) {\n\t\tthis.positionPopup(domNode);\n\t\t$tw.utils.addClass(domNode,\"tc-popup\"); // Make sure that clicks don't dismiss popups within the revealed content\n\t}\n\tif(!this.isOpen) {\n\t\tdomNode.setAttribute(\"hidden\",\"true\");\n\t}\n\tthis.domNodes.push(domNode);\n};\n\nRevealWidget.prototype.positionPopup = function(domNode) {\n\tdomNode.style.position = \"absolute\";\n\tdomNode.style.zIndex = \"1000\";\n\tswitch(this.position) {\n\t\tcase \"left\":\n\t\t\tdomNode.style.left = (this.popup.left - domNode.offsetWidth) + \"px\";\n\t\t\tdomNode.style.top = this.popup.top + \"px\";\n\t\t\tbreak;\n\t\tcase \"above\":\n\t\t\tdomNode.style.left = this.popup.left + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top - domNode.offsetHeight) + \"px\";\n\t\t\tbreak;\n\t\tcase \"aboveright\":\n\t\t\tdomNode.style.left = (this.popup.left + this.popup.width) + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top + this.popup.height - domNode.offsetHeight) + \"px\";\n\t\t\tbreak;\n\t\tcase \"right\":\n\t\t\tdomNode.style.left = (this.popup.left + this.popup.width) + \"px\";\n\t\t\tdomNode.style.top = this.popup.top + \"px\";\n\t\t\tbreak;\n\t\tcase \"belowleft\":\n\t\t\tdomNode.style.left = (this.popup.left + this.popup.width - domNode.offsetWidth) + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top + this.popup.height) + \"px\";\n\t\t\tbreak;\n\t\tdefault: // Below\n\t\t\tdomNode.style.left = this.popup.left + \"px\";\n\t\t\tdomNode.style.top = (this.popup.top + this.popup.height) + \"px\";\n\t\t\tbreak;\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nRevealWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.state = this.getAttribute(\"state\");\n\tthis.revealTag = this.getAttribute(\"tag\");\n\tthis.type = this.getAttribute(\"type\");\n\tthis.text = this.getAttribute(\"text\");\n\tthis.position = this.getAttribute(\"position\");\n\tthis[\"class\"] = this.getAttribute(\"class\",\"\");\n\tthis.style = this.getAttribute(\"style\",\"\");\n\tthis[\"default\"] = this.getAttribute(\"default\",\"\");\n\tthis.animate = this.getAttribute(\"animate\",\"no\");\n\tthis.retain = this.getAttribute(\"retain\",\"no\");\n\tthis.openAnimation = this.animate === \"no\" ? undefined : \"open\";\n\tthis.closeAnimation = this.animate === \"no\" ? undefined : \"close\";\n\t// Compute the title of the state tiddler and read it\n\tthis.stateTitle = this.state;\n\tthis.readState();\n\t// Construct the child widgets\n\tvar childNodes = this.isOpen ? this.parseTreeNode.children : [];\n\tthis.hasChildNodes = this.isOpen;\n\tthis.makeChildWidgets(childNodes);\n};\n\n/*\nRead the state tiddler\n*/\nRevealWidget.prototype.readState = function() {\n\t// Read the information from the state tiddler\n\tvar state = this.stateTitle ? this.wiki.getTextReference(this.stateTitle,this[\"default\"],this.getVariable(\"currentTiddler\")) : this[\"default\"];\n\tswitch(this.type) {\n\t\tcase \"popup\":\n\t\t\tthis.readPopupState(state);\n\t\t\tbreak;\n\t\tcase \"match\":\n\t\t\tthis.readMatchState(state);\n\t\t\tbreak;\n\t\tcase \"nomatch\":\n\t\t\tthis.readMatchState(state);\n\t\t\tthis.isOpen = !this.isOpen;\n\t\t\tbreak;\n\t}\n};\n\nRevealWidget.prototype.readMatchState = function(state) {\n\tthis.isOpen = state === this.text;\n};\n\nRevealWidget.prototype.readPopupState = function(state) {\n\tvar popupLocationRegExp = /^\\((-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+),(-?[0-9\\.E]+)\\)$/,\n\t\tmatch = popupLocationRegExp.exec(state);\n\t// Check if the state matches the location regexp\n\tif(match) {\n\t\t// If so, we're open\n\t\tthis.isOpen = true;\n\t\t// Get the location\n\t\tthis.popup = {\n\t\t\tleft: parseFloat(match[1]),\n\t\t\ttop: parseFloat(match[2]),\n\t\t\twidth: parseFloat(match[3]),\n\t\t\theight: parseFloat(match[4])\n\t\t};\n\t} else {\n\t\t// If not, we're closed\n\t\tthis.isOpen = false;\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nRevealWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.state || changedAttributes.type || changedAttributes.text || changedAttributes.position || changedAttributes[\"default\"] || changedAttributes.animate) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\tvar refreshed = false,\n\t\t\tcurrentlyOpen = this.isOpen;\n\t\tthis.readState();\n\t\tif(this.isOpen !== currentlyOpen) {\n\t\t\tif(this.retain === \"yes\") {\n\t\t\t\tthis.updateState();\n\t\t\t} else {\n\t\t\t\tthis.refreshSelf();\n\t\t\t\trefreshed = true;\n\t\t\t}\n\t\t}\n\t\treturn this.refreshChildren(changedTiddlers) || refreshed;\n\t}\n};\n\n/*\nCalled by refresh() to dynamically show or hide the content\n*/\nRevealWidget.prototype.updateState = function() {\n\t// Read the current state\n\tthis.readState();\n\t// Construct the child nodes if needed\n\tvar domNode = this.domNodes[0];\n\tif(this.isOpen && !this.hasChildNodes) {\n\t\tthis.hasChildNodes = true;\n\t\tthis.makeChildWidgets(this.parseTreeNode.children);\n\t\tthis.renderChildren(domNode,null);\n\t}\n\t// Animate our DOM node\n\tif(!domNode.isTiddlyWikiFakeDom && this.type === \"popup\" && this.isOpen) {\n\t\tthis.positionPopup(domNode);\n\t\t$tw.utils.addClass(domNode,\"tc-popup\"); // Make sure that clicks don't dismiss popups within the revealed content\n\n\t}\n\tif(this.isOpen) {\n\t\tdomNode.removeAttribute(\"hidden\");\n        $tw.anim.perform(this.openAnimation,domNode);\n\t} else {\n\t\t$tw.anim.perform(this.closeAnimation,domNode,{callback: function() {\n\t\t\tdomNode.setAttribute(\"hidden\",\"true\");\n        }});\n\t}\n};\n\nexports.reveal = RevealWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/reveal.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/scrollable.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/scrollable.js\ntype: application/javascript\nmodule-type: widget\n\nScrollable widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ScrollableWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n\tthis.scaleFactor = 1;\n\tthis.addEventListeners([\n\t\t{type: \"tm-scroll\", handler: \"handleScrollEvent\"}\n\t]);\n\tif($tw.browser) {\n\t\tthis.requestAnimationFrame = window.requestAnimationFrame ||\n\t\t\twindow.webkitRequestAnimationFrame ||\n\t\t\twindow.mozRequestAnimationFrame ||\n\t\t\tfunction(callback) {\n\t\t\t\treturn window.setTimeout(callback, 1000/60);\n\t\t\t};\n\t\tthis.cancelAnimationFrame = window.cancelAnimationFrame ||\n\t\t\twindow.webkitCancelAnimationFrame ||\n\t\t\twindow.webkitCancelRequestAnimationFrame ||\n\t\t\twindow.mozCancelAnimationFrame ||\n\t\t\twindow.mozCancelRequestAnimationFrame ||\n\t\t\tfunction(id) {\n\t\t\t\twindow.clearTimeout(id);\n\t\t\t};\n\t}\n};\n\n/*\nInherit from the base widget class\n*/\nScrollableWidget.prototype = new Widget();\n\nScrollableWidget.prototype.cancelScroll = function() {\n\tif(this.idRequestFrame) {\n\t\tthis.cancelAnimationFrame.call(window,this.idRequestFrame);\n\t\tthis.idRequestFrame = null;\n\t}\n};\n\n/*\nHandle a scroll event\n*/\nScrollableWidget.prototype.handleScrollEvent = function(event) {\n\t// Pass the scroll event through if our offsetsize is larger than our scrollsize\n\tif(this.outerDomNode.scrollWidth <= this.outerDomNode.offsetWidth && this.outerDomNode.scrollHeight <= this.outerDomNode.offsetHeight && this.fallthrough === \"yes\") {\n\t\treturn true;\n\t}\n\tthis.scrollIntoView(event.target);\n\treturn false; // Handled event\n};\n\n/*\nScroll an element into view\n*/\nScrollableWidget.prototype.scrollIntoView = function(element) {\n\tvar duration = $tw.utils.getAnimationDuration();\n\tthis.cancelScroll();\n\tthis.startTime = Date.now();\n\tvar scrollPosition = {\n\t\tx: this.outerDomNode.scrollLeft,\n\t\ty: this.outerDomNode.scrollTop\n\t};\n\t// Get the client bounds of the element and adjust by the scroll position\n\tvar scrollableBounds = this.outerDomNode.getBoundingClientRect(),\n\t\tclientTargetBounds = element.getBoundingClientRect(),\n\t\tbounds = {\n\t\t\tleft: clientTargetBounds.left + scrollPosition.x - scrollableBounds.left,\n\t\t\ttop: clientTargetBounds.top + scrollPosition.y - scrollableBounds.top,\n\t\t\twidth: clientTargetBounds.width,\n\t\t\theight: clientTargetBounds.height\n\t\t};\n\t// We'll consider the horizontal and vertical scroll directions separately via this function\n\tvar getEndPos = function(targetPos,targetSize,currentPos,currentSize) {\n\t\t\t// If the target is already visible then stay where we are\n\t\t\tif(targetPos >= currentPos && (targetPos + targetSize) <= (currentPos + currentSize)) {\n\t\t\t\treturn currentPos;\n\t\t\t// If the target is above/left of the current view, then scroll to its top/left\n\t\t\t} else if(targetPos <= currentPos) {\n\t\t\t\treturn targetPos;\n\t\t\t// If the target is smaller than the window and the scroll position is too far up, then scroll till the target is at the bottom of the window\n\t\t\t} else if(targetSize < currentSize && currentPos < (targetPos + targetSize - currentSize)) {\n\t\t\t\treturn targetPos + targetSize - currentSize;\n\t\t\t// If the target is big, then just scroll to the top\n\t\t\t} else if(currentPos < targetPos) {\n\t\t\t\treturn targetPos;\n\t\t\t// Otherwise, stay where we are\n\t\t\t} else {\n\t\t\t\treturn currentPos;\n\t\t\t}\n\t\t},\n\t\tendX = getEndPos(bounds.left,bounds.width,scrollPosition.x,this.outerDomNode.offsetWidth),\n\t\tendY = getEndPos(bounds.top,bounds.height,scrollPosition.y,this.outerDomNode.offsetHeight);\n\t// Only scroll if necessary\n\tif(endX !== scrollPosition.x || endY !== scrollPosition.y) {\n\t\tvar self = this,\n\t\t\tdrawFrame;\n\t\tdrawFrame = function () {\n\t\t\tvar t;\n\t\t\tif(duration <= 0) {\n\t\t\t\tt = 1;\n\t\t\t} else {\n\t\t\t\tt = ((Date.now()) - self.startTime) / duration;\t\n\t\t\t}\n\t\t\tif(t >= 1) {\n\t\t\t\tself.cancelScroll();\n\t\t\t\tt = 1;\n\t\t\t}\n\t\t\tt = $tw.utils.slowInSlowOut(t);\n\t\t\tself.outerDomNode.scrollLeft = scrollPosition.x + (endX - scrollPosition.x) * t;\n\t\t\tself.outerDomNode.scrollTop = scrollPosition.y + (endY - scrollPosition.y) * t;\n\t\t\tif(t < 1) {\n\t\t\t\tself.idRequestFrame = self.requestAnimationFrame.call(window,drawFrame);\n\t\t\t}\n\t\t};\n\t\tdrawFrame();\n\t}\n};\n\n/*\nRender this widget into the DOM\n*/\nScrollableWidget.prototype.render = function(parent,nextSibling) {\n\tvar self = this;\n\t// Remember parent\n\tthis.parentDomNode = parent;\n\t// Compute attributes and execute state\n\tthis.computeAttributes();\n\tthis.execute();\n\t// Create elements\n\tthis.outerDomNode = this.document.createElement(\"div\");\n\t$tw.utils.setStyle(this.outerDomNode,[\n\t\t{overflowY: \"auto\"},\n\t\t{overflowX: \"auto\"},\n\t\t{webkitOverflowScrolling: \"touch\"}\n\t]);\n\tthis.innerDomNode = this.document.createElement(\"div\");\n\tthis.outerDomNode.appendChild(this.innerDomNode);\n\t// Assign classes\n\tthis.outerDomNode.className = this[\"class\"] || \"\";\n\t// Insert element\n\tparent.insertBefore(this.outerDomNode,nextSibling);\n\tthis.renderChildren(this.innerDomNode,null);\n\tthis.domNodes.push(this.outerDomNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nScrollableWidget.prototype.execute = function() {\n\t// Get attributes\n\tthis.fallthrough = this.getAttribute(\"fallthrough\",\"yes\");\n\tthis[\"class\"] = this.getAttribute(\"class\");\n\t// Make child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nScrollableWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes[\"class\"]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports.scrollable = ScrollableWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/scrollable.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/select.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/select.js\ntype: application/javascript\nmodule-type: widget\n\nSelect widget:\n\n```\n<$select tiddler=\"MyTiddler\" field=\"text\">\n<$list filter=\"[tag[chapter]]\">\n<option value=<<currentTiddler>>>\n<$view field=\"description\"/>\n</option>\n</$list>\n</$select>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SelectWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSelectWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSelectWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n\tthis.setSelectValue();\n\t$tw.utils.addEventListeners(this.getSelectDomNode(),[\n\t\t{name: \"change\", handlerObject: this, handlerMethod: \"handleChangeEvent\"}\n\t]);\n};\n\n/*\nHandle a change event\n*/\nSelectWidget.prototype.handleChangeEvent = function(event) {\n\t// Get the new value and assign it to the tiddler\n\tif(this.selectMultiple == false) {\n\t\tvar value = this.getSelectDomNode().value;\n\t} else {\n\t\tvar value = this.getSelectValues()\n\t\t\t\tvalue = $tw.utils.stringifyList(value);\n\t}\n\tthis.wiki.setText(this.selectTitle,this.selectField,this.selectIndex,value);\n\t// Trigger actions\n\tif(this.selectActions) {\n\t\tthis.invokeActionString(this.selectActions,this,event);\n\t}\n};\n\n/*\nIf necessary, set the value of the select element to the current value\n*/\nSelectWidget.prototype.setSelectValue = function() {\n\tvar value = this.selectDefault;\n\t// Get the value\n\tif(this.selectIndex) {\n\t\tvalue = this.wiki.extractTiddlerDataItem(this.selectTitle,this.selectIndex);\n\t} else {\n\t\tvar tiddler = this.wiki.getTiddler(this.selectTitle);\n\t\tif(tiddler) {\n\t\t\tif(this.selectField === \"text\") {\n\t\t\t\t// Calling getTiddlerText() triggers lazy loading of skinny tiddlers\n\t\t\t\tvalue = this.wiki.getTiddlerText(this.selectTitle);\n\t\t\t} else {\n\t\t\t\tif($tw.utils.hop(tiddler.fields,this.selectField)) {\n\t\t\t\t\tvalue = tiddler.getFieldString(this.selectField);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif(this.selectField === \"title\") {\n\t\t\t\tvalue = this.selectTitle;\n\t\t\t}\n\t\t}\n\t}\n\t// Assign it to the select element if it's different than the current value\n\tif (this.selectMultiple) {\n\t\tvalue = value === undefined ? \"\" : value;\n\t\tvar select = this.getSelectDomNode();\n\t\tvar values = Array.isArray(value) ? value : $tw.utils.parseStringArray(value);\n\t\tfor(var i=0; i < select.children.length; i++){\n\t\t\tif(values.indexOf(select.children[i].value) != -1) {\n\t\t\t\tselect.children[i].selected = true;\n\t\t\t}\n\t\t}\n\t\t\n\t} else {\n\t\tvar domNode = this.getSelectDomNode();\n\t\tif(domNode.value !== value) {\n\t\t\tdomNode.value = value;\n\t\t}\n\t}\n};\n\n/*\nGet the DOM node of the select element\n*/\nSelectWidget.prototype.getSelectDomNode = function() {\n\treturn this.children[0].domNodes[0];\n};\n\n// Return an array of the selected opion values\n// select is an HTML select element\nSelectWidget.prototype.getSelectValues = function() {\n\tvar select, result, options, opt;\n\tselect = this.getSelectDomNode();\n\tresult = [];\n\toptions = select && select.options;\n\tfor (var i=0; i<options.length; i++) {\n\t\topt = options[i];\n\t\tif (opt.selected) {\n\t\t\tresult.push(opt.value || opt.text);\n\t\t}\n\t}\n\treturn result;\n}\n\n/*\nCompute the internal state of the widget\n*/\nSelectWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.selectActions = this.getAttribute(\"actions\");\n\tthis.selectTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.selectField = this.getAttribute(\"field\",\"text\");\n\tthis.selectIndex = this.getAttribute(\"index\");\n\tthis.selectClass = this.getAttribute(\"class\");\n\tthis.selectDefault = this.getAttribute(\"default\");\n\tthis.selectMultiple = this.getAttribute(\"multiple\", false);\n\tthis.selectSize = this.getAttribute(\"size\");\n\t// Make the child widgets\n\tvar selectNode = {\n\t\ttype: \"element\",\n\t\ttag: \"select\",\n\t\tchildren: this.parseTreeNode.children\n\t};\n\tif(this.selectClass) {\n\t\t$tw.utils.addAttributeToParseTreeNode(selectNode,\"class\",this.selectClass);\n\t}\n\tif(this.selectMultiple) {\n\t\t$tw.utils.addAttributeToParseTreeNode(selectNode,\"multiple\",\"multiple\");\n\t}\n\tif(this.selectSize) {\n\t\t$tw.utils.addAttributeToParseTreeNode(selectNode,\"size\",this.selectSize);\n\t}\n\tthis.makeChildWidgets([selectNode]);\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nSelectWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\t// If we're using a different tiddler/field/index then completely refresh ourselves\n\tif(changedAttributes.selectTitle || changedAttributes.selectField || changedAttributes.selectIndex) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t// If the target tiddler value has changed, just update setting and refresh the children\n\t} else {\n\t\tvar childrenRefreshed = this.refreshChildren(changedTiddlers);\n\t\tif(changedTiddlers[this.selectTitle] || childrenRefreshed) {\n\t\t\tthis.setSelectValue();\n\t\t} \n\t\treturn childrenRefreshed;\n\t}\n};\n\nexports.select = SelectWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/select.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/set.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/set.js\ntype: application/javascript\nmodule-type: widget\n\nSet variable widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar SetWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nSetWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nSetWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nSetWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.setName = this.getAttribute(\"name\",\"currentTiddler\");\n\tthis.setFilter = this.getAttribute(\"filter\");\n\tthis.setValue = this.getAttribute(\"value\");\n\tthis.setEmptyValue = this.getAttribute(\"emptyValue\");\n\t// Set context variable\n\tthis.setVariable(this.setName,this.getValue(),this.parseTreeNode.params);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nGet the value to be assigned\n*/\nSetWidget.prototype.getValue = function() {\n\tvar value = this.setValue;\n\tif(this.setFilter) {\n\t\tvar results = this.wiki.filterTiddlers(this.setFilter,this);\n\t\tif(!this.setValue) {\n\t\t\tvalue = $tw.utils.stringifyList(results);\n\t\t}\n\t\tif(results.length === 0 && this.setEmptyValue !== undefined) {\n\t\t\tvalue = this.setEmptyValue;\n\t\t}\n\t} else if(!value && this.setEmptyValue) {\n\t\tvalue = this.setEmptyValue;\n\t}\n\treturn value;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nSetWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.name || changedAttributes.filter || changedAttributes.value || changedAttributes.emptyValue ||\n\t   (this.setFilter && this.getValue() != this.variables[this.setName].value)) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.setvariable = SetWidget;\nexports.set = SetWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/set.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/text.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/text.js\ntype: application/javascript\nmodule-type: widget\n\nText node widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar TextNodeWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nTextNodeWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nTextNodeWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tvar text = this.getAttribute(\"text\",this.parseTreeNode.text || \"\");\n\ttext = text.replace(/\\r/mg,\"\");\n\tvar textNode = this.document.createTextNode(text);\n\tparent.insertBefore(textNode,nextSibling);\n\tthis.domNodes.push(textNode);\n};\n\n/*\nCompute the internal state of the widget\n*/\nTextNodeWidget.prototype.execute = function() {\n\t// Nothing to do for a text node\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nTextNodeWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.text) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.text = TextNodeWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/text.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/tiddler.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/tiddler.js\ntype: application/javascript\nmodule-type: widget\n\nTiddler widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar TiddlerWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nTiddlerWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nTiddlerWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nTiddlerWidget.prototype.execute = function() {\n\tthis.tiddlerState = this.computeTiddlerState();\n\tthis.setVariable(\"currentTiddler\",this.tiddlerState.currentTiddler);\n\tthis.setVariable(\"missingTiddlerClass\",this.tiddlerState.missingTiddlerClass);\n\tthis.setVariable(\"shadowTiddlerClass\",this.tiddlerState.shadowTiddlerClass);\n\tthis.setVariable(\"systemTiddlerClass\",this.tiddlerState.systemTiddlerClass);\n\tthis.setVariable(\"tiddlerTagClasses\",this.tiddlerState.tiddlerTagClasses);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nCompute the tiddler state flags\n*/\nTiddlerWidget.prototype.computeTiddlerState = function() {\n\t// Get our parameters\n\tthis.tiddlerTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\t// Compute the state\n\tvar state = {\n\t\tcurrentTiddler: this.tiddlerTitle || \"\",\n\t\tmissingTiddlerClass: (this.wiki.tiddlerExists(this.tiddlerTitle) || this.wiki.isShadowTiddler(this.tiddlerTitle)) ? \"tc-tiddler-exists\" : \"tc-tiddler-missing\",\n\t\tshadowTiddlerClass: this.wiki.isShadowTiddler(this.tiddlerTitle) ? \"tc-tiddler-shadow\" : \"\",\n\t\tsystemTiddlerClass: this.wiki.isSystemTiddler(this.tiddlerTitle) ? \"tc-tiddler-system\" : \"\",\n\t\ttiddlerTagClasses: this.getTagClasses()\n\t};\n\t// Compute a simple hash to make it easier to detect changes\n\tstate.hash = state.currentTiddler + state.missingTiddlerClass + state.shadowTiddlerClass + state.systemTiddlerClass + state.tiddlerTagClasses;\n\treturn state;\n};\n\n/*\nCreate a string of CSS classes derived from the tags of the current tiddler\n*/\nTiddlerWidget.prototype.getTagClasses = function() {\n\tvar tiddler = this.wiki.getTiddler(this.tiddlerTitle);\n\tif(tiddler) {\n\t\tvar tags = [];\n\t\t$tw.utils.each(tiddler.fields.tags,function(tag) {\n\t\t\ttags.push(\"tc-tagged-\" + encodeURIComponent(tag));\n\t\t});\n\t\treturn tags.join(\" \");\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nTiddlerWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes(),\n\t\tnewTiddlerState = this.computeTiddlerState();\n\tif(changedAttributes.tiddler || newTiddlerState.hash !== this.tiddlerState.hash) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.tiddler = TiddlerWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/tiddler.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/transclude.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/transclude.js\ntype: application/javascript\nmodule-type: widget\n\nTransclude widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar TranscludeWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nTranscludeWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nTranscludeWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nTranscludeWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.transcludeTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.transcludeSubTiddler = this.getAttribute(\"subtiddler\");\n\tthis.transcludeField = this.getAttribute(\"field\");\n\tthis.transcludeIndex = this.getAttribute(\"index\");\n\tthis.transcludeMode = this.getAttribute(\"mode\");\n\t// Parse the text reference\n\tvar parseAsInline = !this.parseTreeNode.isBlock;\n\tif(this.transcludeMode === \"inline\") {\n\t\tparseAsInline = true;\n\t} else if(this.transcludeMode === \"block\") {\n\t\tparseAsInline = false;\n\t}\n\tvar parser = this.wiki.parseTextReference(\n\t\t\t\t\t\tthis.transcludeTitle,\n\t\t\t\t\t\tthis.transcludeField,\n\t\t\t\t\t\tthis.transcludeIndex,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tparseAsInline: parseAsInline,\n\t\t\t\t\t\t\tsubTiddler: this.transcludeSubTiddler\n\t\t\t\t\t\t}),\n\t\tparseTreeNodes = parser ? parser.tree : this.parseTreeNode.children;\n\t// Set context variables for recursion detection\n\tvar recursionMarker = this.makeRecursionMarker();\n\tthis.setVariable(\"transclusion\",recursionMarker);\n\t// Check for recursion\n\tif(parser) {\n\t\tif(this.parentWidget && this.parentWidget.hasVariable(\"transclusion\",recursionMarker)) {\n\t\t\tparseTreeNodes = [{type: \"element\", tag: \"span\", attributes: {\n\t\t\t\t\"class\": {type: \"string\", value: \"tc-error\"}\n\t\t\t}, children: [\n\t\t\t\t{type: \"text\", text: $tw.language.getString(\"Error/RecursiveTransclusion\")}\n\t\t\t]}];\n\t\t}\n\t}\n\t// Construct the child widgets\n\tthis.makeChildWidgets(parseTreeNodes);\n};\n\n/*\nCompose a string comprising the title, field and/or index to identify this transclusion for recursion detection\n*/\nTranscludeWidget.prototype.makeRecursionMarker = function() {\n\tvar output = [];\n\toutput.push(\"{\");\n\toutput.push(this.getVariable(\"currentTiddler\",{defaultValue: \"\"}));\n\toutput.push(\"|\");\n\toutput.push(this.transcludeTitle || \"\");\n\toutput.push(\"|\");\n\toutput.push(this.transcludeField || \"\");\n\toutput.push(\"|\");\n\toutput.push(this.transcludeIndex || \"\");\n\toutput.push(\"|\");\n\toutput.push(this.transcludeSubTiddler || \"\");\n\toutput.push(\"}\");\n\treturn output.join(\"\");\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nTranscludeWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedTiddlers[this.transcludeTitle]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn this.refreshChildren(changedTiddlers);\t\t\n\t}\n};\n\nexports.transclude = TranscludeWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/transclude.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/vars.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/vars.js\ntype: application/javascript\nmodule-type: widget\n\nThis widget allows multiple variables to be set in one go:\n\n```\n\\define helloworld() Hello world!\n<$vars greeting=\"Hi\" me={{!!title}} sentence=<<helloworld>>>\n  <<greeting>>! I am <<me>> and I say: <<sentence>>\n</$vars>\n```\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar VarsWidget = function(parseTreeNode,options) {\n\t// Call the constructor\n\tWidget.call(this);\n\t// Initialise\t\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nVarsWidget.prototype = Object.create(Widget.prototype);\n\n/*\nRender this widget into the DOM\n*/\nVarsWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nVarsWidget.prototype.execute = function() {\n\t// Parse variables\n\tvar self = this;\n\t$tw.utils.each(this.attributes,function(val,key) {\n\t\tif(key.charAt(0) !== \"$\") {\n\t\t\tself.setVariable(key,val);\n\t\t}\n\t});\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nRefresh the widget by ensuring our attributes are up to date\n*/\nVarsWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(Object.keys(changedAttributes).length) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t}\n\treturn this.refreshChildren(changedTiddlers);\n};\n\nexports[\"vars\"] = VarsWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/vars.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/view.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/view.js\ntype: application/javascript\nmodule-type: widget\n\nView widget\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar ViewWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nViewWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nViewWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tif(this.text) {\n\t\tvar textNode = this.document.createTextNode(this.text);\n\t\tparent.insertBefore(textNode,nextSibling);\n\t\tthis.domNodes.push(textNode);\n\t} else {\n\t\tthis.makeChildWidgets();\n\t\tthis.renderChildren(parent,nextSibling);\n\t}\n};\n\n/*\nCompute the internal state of the widget\n*/\nViewWidget.prototype.execute = function() {\n\t// Get parameters from our attributes\n\tthis.viewTitle = this.getAttribute(\"tiddler\",this.getVariable(\"currentTiddler\"));\n\tthis.viewSubtiddler = this.getAttribute(\"subtiddler\");\n\tthis.viewField = this.getAttribute(\"field\",\"text\");\n\tthis.viewIndex = this.getAttribute(\"index\");\n\tthis.viewFormat = this.getAttribute(\"format\",\"text\");\n\tthis.viewTemplate = this.getAttribute(\"template\",\"\");\n\tswitch(this.viewFormat) {\n\t\tcase \"htmlwikified\":\n\t\t\tthis.text = this.getValueAsHtmlWikified();\n\t\t\tbreak;\n\t\tcase \"plainwikified\":\n\t\t\tthis.text = this.getValueAsPlainWikified();\n\t\t\tbreak;\n\t\tcase \"htmlencodedplainwikified\":\n\t\t\tthis.text = this.getValueAsHtmlEncodedPlainWikified();\n\t\t\tbreak;\n\t\tcase \"htmlencoded\":\n\t\t\tthis.text = this.getValueAsHtmlEncoded();\n\t\t\tbreak;\n\t\tcase \"urlencoded\":\n\t\t\tthis.text = this.getValueAsUrlEncoded();\n\t\t\tbreak;\n\t\tcase \"doubleurlencoded\":\n\t\t\tthis.text = this.getValueAsDoubleUrlEncoded();\n\t\t\tbreak;\n\t\tcase \"date\":\n\t\t\tthis.text = this.getValueAsDate(this.viewTemplate);\n\t\t\tbreak;\n\t\tcase \"relativedate\":\n\t\t\tthis.text = this.getValueAsRelativeDate();\n\t\t\tbreak;\n\t\tcase \"stripcomments\":\n\t\t\tthis.text = this.getValueAsStrippedComments();\n\t\t\tbreak;\n\t\tcase \"jsencoded\":\n\t\t\tthis.text = this.getValueAsJsEncoded();\n\t\t\tbreak;\n\t\tdefault: // \"text\"\n\t\t\tthis.text = this.getValueAsText();\n\t\t\tbreak;\n\t}\n};\n\n/*\nThe various formatter functions are baked into this widget for the moment. Eventually they will be replaced by macro functions\n*/\n\n/*\nRetrieve the value of the widget. Options are:\nasString: Optionally return the value as a string\n*/\nViewWidget.prototype.getValue = function(options) {\n\toptions = options || {};\n\tvar value = options.asString ? \"\" : undefined;\n\tif(this.viewIndex) {\n\t\tvalue = this.wiki.extractTiddlerDataItem(this.viewTitle,this.viewIndex);\n\t} else {\n\t\tvar tiddler;\n\t\tif(this.viewSubtiddler) {\n\t\t\ttiddler = this.wiki.getSubTiddler(this.viewTitle,this.viewSubtiddler);\t\n\t\t} else {\n\t\t\ttiddler = this.wiki.getTiddler(this.viewTitle);\n\t\t}\n\t\tif(tiddler) {\n\t\t\tif(this.viewField === \"text\" && !this.viewSubtiddler) {\n\t\t\t\t// Calling getTiddlerText() triggers lazy loading of skinny tiddlers\n\t\t\t\tvalue = this.wiki.getTiddlerText(this.viewTitle);\n\t\t\t} else {\n\t\t\t\tif($tw.utils.hop(tiddler.fields,this.viewField)) {\n\t\t\t\t\tif(options.asString) {\n\t\t\t\t\t\tvalue = tiddler.getFieldString(this.viewField);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvalue = tiddler.fields[this.viewField];\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif(this.viewField === \"title\") {\n\t\t\t\tvalue = this.viewTitle;\n\t\t\t}\n\t\t}\n\t}\n\treturn value;\n};\n\nViewWidget.prototype.getValueAsText = function() {\n\treturn this.getValue({asString: true});\n};\n\nViewWidget.prototype.getValueAsHtmlWikified = function() {\n\treturn this.wiki.renderText(\"text/html\",\"text/vnd.tiddlywiki\",this.getValueAsText(),{parentWidget: this});\n};\n\nViewWidget.prototype.getValueAsPlainWikified = function() {\n\treturn this.wiki.renderText(\"text/plain\",\"text/vnd.tiddlywiki\",this.getValueAsText(),{parentWidget: this});\n};\n\nViewWidget.prototype.getValueAsHtmlEncodedPlainWikified = function() {\n\treturn $tw.utils.htmlEncode(this.wiki.renderText(\"text/plain\",\"text/vnd.tiddlywiki\",this.getValueAsText(),{parentWidget: this}));\n};\n\nViewWidget.prototype.getValueAsHtmlEncoded = function() {\n\treturn $tw.utils.htmlEncode(this.getValueAsText());\n};\n\nViewWidget.prototype.getValueAsUrlEncoded = function() {\n\treturn encodeURIComponent(this.getValueAsText());\n};\n\nViewWidget.prototype.getValueAsDoubleUrlEncoded = function() {\n\treturn encodeURIComponent(encodeURIComponent(this.getValueAsText()));\n};\n\nViewWidget.prototype.getValueAsDate = function(format) {\n\tformat = format || \"YYYY MM DD 0hh:0mm\";\n\tvar value = $tw.utils.parseDate(this.getValue());\n\tif(value && $tw.utils.isDate(value) && value.toString() !== \"Invalid Date\") {\n\t\treturn $tw.utils.formatDateString(value,format);\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\nViewWidget.prototype.getValueAsRelativeDate = function(format) {\n\tvar value = $tw.utils.parseDate(this.getValue());\n\tif(value && $tw.utils.isDate(value) && value.toString() !== \"Invalid Date\") {\n\t\treturn $tw.utils.getRelativeDate((new Date()) - (new Date(value))).description;\n\t} else {\n\t\treturn \"\";\n\t}\n};\n\nViewWidget.prototype.getValueAsStrippedComments = function() {\n\tvar lines = this.getValueAsText().split(\"\\n\"),\n\t\tout = [];\n\tfor(var line=0; line<lines.length; line++) {\n\t\tvar text = lines[line];\n\t\tif(!/^\\s*\\/\\/#/.test(text)) {\n\t\t\tout.push(text);\n\t\t}\n\t}\n\treturn out.join(\"\\n\");\n};\n\nViewWidget.prototype.getValueAsJsEncoded = function() {\n\treturn $tw.utils.stringify(this.getValueAsText());\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nViewWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\tif(changedAttributes.tiddler || changedAttributes.field || changedAttributes.index || changedAttributes.template || changedAttributes.format || changedTiddlers[this.viewTitle]) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\treturn false;\t\n\t}\n};\n\nexports.view = ViewWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/view.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/widget.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/widget.js\ntype: application/javascript\nmodule-type: widget\n\nWidget base class\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nCreate a widget object for a parse tree node\n\tparseTreeNode: reference to the parse tree node to be rendered\n\toptions: see below\nOptions include:\n\twiki: mandatory reference to wiki associated with this render tree\n\tparentWidget: optional reference to a parent renderer node for the context chain\n\tdocument: optional document object to use instead of global document\n*/\nvar Widget = function(parseTreeNode,options) {\n\tif(arguments.length > 0) {\n\t\tthis.initialise(parseTreeNode,options);\n\t}\n};\n\n/*\nInitialise widget properties. These steps are pulled out of the constructor so that we can reuse them in subclasses\n*/\nWidget.prototype.initialise = function(parseTreeNode,options) {\n\toptions = options || {};\n\t// Save widget info\n\tthis.parseTreeNode = parseTreeNode;\n\tthis.wiki = options.wiki;\n\tthis.parentWidget = options.parentWidget;\n\tthis.variablesConstructor = function() {};\n\tthis.variablesConstructor.prototype = this.parentWidget ? this.parentWidget.variables : {};\n\tthis.variables = new this.variablesConstructor();\n\tthis.document = options.document;\n\tthis.attributes = {};\n\tthis.children = [];\n\tthis.domNodes = [];\n\tthis.eventListeners = {};\n\t// Hashmap of the widget classes\n\tif(!this.widgetClasses) {\n\t\tWidget.prototype.widgetClasses = $tw.modules.applyMethods(\"widget\");\n\t}\n};\n\n/*\nRender this widget into the DOM\n*/\nWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nWidget.prototype.execute = function() {\n\tthis.makeChildWidgets();\n};\n\n/*\nSet the value of a context variable\nname: name of the variable\nvalue: value of the variable\nparams: array of {name:, default:} for each parameter\n*/\nWidget.prototype.setVariable = function(name,value,params) {\n\tthis.variables[name] = {value: value, params: params};\n};\n\n/*\nGet the prevailing value of a context variable\nname: name of variable\noptions: see below\nOptions include\nparams: array of {name:, value:} for each parameter\ndefaultValue: default value if the variable is not defined\n*/\nWidget.prototype.getVariable = function(name,options) {\n\toptions = options || {};\n\tvar actualParams = options.params || [],\n\t\tparentWidget = this.parentWidget;\n\t// Check for the variable defined in the parent widget (or an ancestor in the prototype chain)\n\tif(parentWidget && name in parentWidget.variables) {\n\t\tvar variable = parentWidget.variables[name],\n\t\t\tvalue = variable.value;\n\t\t// Substitute any parameters specified in the definition\n\t\tvalue = this.substituteVariableParameters(value,variable.params,actualParams);\n\t\tvalue = this.substituteVariableReferences(value);\n\t\treturn value;\n\t}\n\t// If the variable doesn't exist in the parent widget then look for a macro module\n\treturn this.evaluateMacroModule(name,actualParams,options.defaultValue);\n};\n\nWidget.prototype.substituteVariableParameters = function(text,formalParams,actualParams) {\n\tif(formalParams) {\n\t\tvar nextAnonParameter = 0, // Next candidate anonymous parameter in macro call\n\t\t\tparamInfo, paramValue;\n\t\t// Step through each of the parameters in the macro definition\n\t\tfor(var p=0; p<formalParams.length; p++) {\n\t\t\t// Check if we've got a macro call parameter with the same name\n\t\t\tparamInfo = formalParams[p];\n\t\t\tparamValue = undefined;\n\t\t\tfor(var m=0; m<actualParams.length; m++) {\n\t\t\t\tif(actualParams[m].name === paramInfo.name) {\n\t\t\t\t\tparamValue = actualParams[m].value;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If not, use the next available anonymous macro call parameter\n\t\t\twhile(nextAnonParameter < actualParams.length && actualParams[nextAnonParameter].name) {\n\t\t\t\tnextAnonParameter++;\n\t\t\t}\n\t\t\tif(paramValue === undefined && nextAnonParameter < actualParams.length) {\n\t\t\t\tparamValue = actualParams[nextAnonParameter++].value;\n\t\t\t}\n\t\t\t// If we've still not got a value, use the default, if any\n\t\t\tparamValue = paramValue || paramInfo[\"default\"] || \"\";\n\t\t\t// Replace any instances of this parameter\n\t\t\ttext = text.replace(new RegExp(\"\\\\$\" + $tw.utils.escapeRegExp(paramInfo.name) + \"\\\\$\",\"mg\"),paramValue);\n\t\t}\n\t}\n\treturn text;\n};\n\nWidget.prototype.substituteVariableReferences = function(text) {\n\tvar self = this;\n\treturn (text || \"\").replace(/\\$\\(([^\\)\\$]+)\\)\\$/g,function(match,p1,offset,string) {\n\t\treturn self.getVariable(p1,{defaultValue: \"\"});\n\t});\n};\n\nWidget.prototype.evaluateMacroModule = function(name,actualParams,defaultValue) {\n\tif($tw.utils.hop($tw.macros,name)) {\n\t\tvar macro = $tw.macros[name],\n\t\t\targs = [];\n\t\tif(macro.params.length > 0) {\n\t\t\tvar nextAnonParameter = 0, // Next candidate anonymous parameter in macro call\n\t\t\t\tparamInfo, paramValue;\n\t\t\t// Step through each of the parameters in the macro definition\n\t\t\tfor(var p=0; p<macro.params.length; p++) {\n\t\t\t\t// Check if we've got a macro call parameter with the same name\n\t\t\t\tparamInfo = macro.params[p];\n\t\t\t\tparamValue = undefined;\n\t\t\t\tfor(var m=0; m<actualParams.length; m++) {\n\t\t\t\t\tif(actualParams[m].name === paramInfo.name) {\n\t\t\t\t\t\tparamValue = actualParams[m].value;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// If not, use the next available anonymous macro call parameter\n\t\t\t\twhile(nextAnonParameter < actualParams.length && actualParams[nextAnonParameter].name) {\n\t\t\t\t\tnextAnonParameter++;\n\t\t\t\t}\n\t\t\t\tif(paramValue === undefined && nextAnonParameter < actualParams.length) {\n\t\t\t\t\tparamValue = actualParams[nextAnonParameter++].value;\n\t\t\t\t}\n\t\t\t\t// If we've still not got a value, use the default, if any\n\t\t\t\tparamValue = paramValue || paramInfo[\"default\"] || \"\";\n\t\t\t\t// Save the parameter\n\t\t\t\targs.push(paramValue);\n\t\t\t}\n\t\t}\n\t\telse for(var i=0; i<actualParams.length; ++i) {\n\t\t\targs.push(actualParams[i].value);\n\t\t}\n\t\treturn (macro.run.apply(this,args) || \"\").toString();\n\t} else {\n\t\treturn defaultValue;\n\t}\n};\n\n/*\nCheck whether a given context variable value exists in the parent chain\n*/\nWidget.prototype.hasVariable = function(name,value) {\n\tvar node = this;\n\twhile(node) {\n\t\tif($tw.utils.hop(node.variables,name) && node.variables[name].value === value) {\n\t\t\treturn true;\n\t\t}\n\t\tnode = node.parentWidget;\n\t}\n\treturn false;\n};\n\n/*\nConstruct a qualifying string based on a hash of concatenating the values of a given variable in the parent chain\n*/\nWidget.prototype.getStateQualifier = function(name) {\n\tthis.qualifiers = this.qualifiers || Object.create(null);\n\tname = name || \"transclusion\";\n\tif(this.qualifiers[name]) {\n\t\treturn this.qualifiers[name];\n\t} else {\n\t\tvar output = [],\n\t\t\tnode = this;\n\t\twhile(node && node.parentWidget) {\n\t\t\tif($tw.utils.hop(node.parentWidget.variables,name)) {\n\t\t\t\toutput.push(node.getVariable(name));\n\t\t\t}\n\t\t\tnode = node.parentWidget;\n\t\t}\n\t\tvar value = $tw.utils.hashString(output.join(\"\"));\n\t\tthis.qualifiers[name] = value;\n\t\treturn value;\n\t}\n};\n\n/*\nCompute the current values of the attributes of the widget. Returns a hashmap of the names of the attributes that have changed\n*/\nWidget.prototype.computeAttributes = function() {\n\tvar changedAttributes = {},\n\t\tself = this,\n\t\tvalue;\n\t$tw.utils.each(this.parseTreeNode.attributes,function(attribute,name) {\n\t\tif(attribute.type === \"indirect\") {\n\t\t\tvalue = self.wiki.getTextReference(attribute.textReference,\"\",self.getVariable(\"currentTiddler\"));\n\t\t} else if(attribute.type === \"macro\") {\n\t\t\tvalue = self.getVariable(attribute.value.name,{params: attribute.value.params});\n\t\t} else { // String attribute\n\t\t\tvalue = attribute.value;\n\t\t}\n\t\t// Check whether the attribute has changed\n\t\tif(self.attributes[name] !== value) {\n\t\t\tself.attributes[name] = value;\n\t\t\tchangedAttributes[name] = true;\n\t\t}\n\t});\n\treturn changedAttributes;\n};\n\n/*\nCheck for the presence of an attribute\n*/\nWidget.prototype.hasAttribute = function(name) {\n\treturn $tw.utils.hop(this.attributes,name);\n};\n\n/*\nGet the value of an attribute\n*/\nWidget.prototype.getAttribute = function(name,defaultText) {\n\tif($tw.utils.hop(this.attributes,name)) {\n\t\treturn this.attributes[name];\n\t} else {\n\t\treturn defaultText;\n\t}\n};\n\n/*\nAssign the computed attributes of the widget to a domNode\noptions include:\nexcludeEventAttributes: ignores attributes whose name begins with \"on\"\n*/\nWidget.prototype.assignAttributes = function(domNode,options) {\n\toptions = options || {};\n\tvar self = this;\n\t$tw.utils.each(this.attributes,function(v,a) {\n\t\t// Check exclusions\n\t\tif(options.excludeEventAttributes && a.substr(0,2) === \"on\") {\n\t\t\tv = undefined;\n\t\t}\n\t\tif(v !== undefined) {\n\t\t\tvar b = a.split(\":\");\n\t\t\t// Setting certain attributes can cause a DOM error (eg xmlns on the svg element)\n\t\t\ttry {\n\t\t\t\tif (b.length == 2 && b[0] == \"xlink\"){\n\t\t\t\t\tdomNode.setAttributeNS(\"http://www.w3.org/1999/xlink\",b[1],v);\n\t\t\t\t} else {\n\t\t\t\t\tdomNode.setAttributeNS(null,a,v);\n\t\t\t\t}\n\t\t\t} catch(e) {\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\nMake child widgets correspondng to specified parseTreeNodes\n*/\nWidget.prototype.makeChildWidgets = function(parseTreeNodes) {\n\tthis.children = [];\n\tvar self = this;\n\t$tw.utils.each(parseTreeNodes || (this.parseTreeNode && this.parseTreeNode.children),function(childNode) {\n\t\tself.children.push(self.makeChildWidget(childNode));\n\t});\n};\n\n/*\nConstruct the widget object for a parse tree node\n*/\nWidget.prototype.makeChildWidget = function(parseTreeNode) {\n\tvar WidgetClass = this.widgetClasses[parseTreeNode.type];\n\tif(!WidgetClass) {\n\t\tWidgetClass = this.widgetClasses.text;\n\t\tparseTreeNode = {type: \"text\", text: \"Undefined widget '\" + parseTreeNode.type + \"'\"};\n\t}\n\treturn new WidgetClass(parseTreeNode,{\n\t\twiki: this.wiki,\n\t\tvariables: {},\n\t\tparentWidget: this,\n\t\tdocument: this.document\n\t});\n};\n\n/*\nGet the next sibling of this widget\n*/\nWidget.prototype.nextSibling = function() {\n\tif(this.parentWidget) {\n\t\tvar index = this.parentWidget.children.indexOf(this);\n\t\tif(index !== -1 && index < this.parentWidget.children.length-1) {\n\t\t\treturn this.parentWidget.children[index+1];\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nGet the previous sibling of this widget\n*/\nWidget.prototype.previousSibling = function() {\n\tif(this.parentWidget) {\n\t\tvar index = this.parentWidget.children.indexOf(this);\n\t\tif(index !== -1 && index > 0) {\n\t\t\treturn this.parentWidget.children[index-1];\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nRender the children of this widget into the DOM\n*/\nWidget.prototype.renderChildren = function(parent,nextSibling) {\n\t$tw.utils.each(this.children,function(childWidget) {\n\t\tchildWidget.render(parent,nextSibling);\n\t});\n};\n\n/*\nAdd a list of event listeners from an array [{type:,handler:},...]\n*/\nWidget.prototype.addEventListeners = function(listeners) {\n\tvar self = this;\n\t$tw.utils.each(listeners,function(listenerInfo) {\n\t\tself.addEventListener(listenerInfo.type,listenerInfo.handler);\n\t});\n};\n\n/*\nAdd an event listener\n*/\nWidget.prototype.addEventListener = function(type,handler) {\n\tvar self = this;\n\tif(typeof handler === \"string\") { // The handler is a method name on this widget\n\t\tthis.eventListeners[type] = function(event) {\n\t\t\treturn self[handler].call(self,event);\n\t\t};\n\t} else { // The handler is a function\n\t\tthis.eventListeners[type] = function(event) {\n\t\t\treturn handler.call(self,event);\n\t\t};\n\t}\n};\n\n/*\nDispatch an event to a widget. If the widget doesn't handle the event then it is also dispatched to the parent widget\n*/\nWidget.prototype.dispatchEvent = function(event) {\n\t// Dispatch the event if this widget handles it\n\tvar listener = this.eventListeners[event.type];\n\tif(listener) {\n\t\t// Don't propagate the event if the listener returned false\n\t\tif(!listener(event)) {\n\t\t\treturn false;\n\t\t}\n\t}\n\t// Dispatch the event to the parent widget\n\tif(this.parentWidget) {\n\t\treturn this.parentWidget.dispatchEvent(event);\n\t}\n\treturn true;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nWidget.prototype.refresh = function(changedTiddlers) {\n\treturn this.refreshChildren(changedTiddlers);\n};\n\n/*\nRebuild a previously rendered widget\n*/\nWidget.prototype.refreshSelf = function() {\n\tvar nextSibling = this.findNextSiblingDomNode();\n\tthis.removeChildDomNodes();\n\tthis.render(this.parentDomNode,nextSibling);\n};\n\n/*\nRefresh all the children of a widget\n*/\nWidget.prototype.refreshChildren = function(changedTiddlers) {\n\tvar self = this,\n\t\trefreshed = false;\n\t$tw.utils.each(this.children,function(childWidget) {\n\t\trefreshed = childWidget.refresh(changedTiddlers) || refreshed;\n\t});\n\treturn refreshed;\n};\n\n/*\nFind the next sibling in the DOM to this widget. This is done by scanning the widget tree through all next siblings and their descendents that share the same parent DOM node\n*/\nWidget.prototype.findNextSiblingDomNode = function(startIndex) {\n\t// Refer to this widget by its index within its parents children\n\tvar parent = this.parentWidget,\n\t\tindex = startIndex !== undefined ? startIndex : parent.children.indexOf(this);\nif(index === -1) {\n\tthrow \"node not found in parents children\";\n}\n\t// Look for a DOM node in the later siblings\n\twhile(++index < parent.children.length) {\n\t\tvar domNode = parent.children[index].findFirstDomNode();\n\t\tif(domNode) {\n\t\t\treturn domNode;\n\t\t}\n\t}\n\t// Go back and look for later siblings of our parent if it has the same parent dom node\n\tvar grandParent = parent.parentWidget;\n\tif(grandParent && parent.parentDomNode === this.parentDomNode) {\n\t\tindex = grandParent.children.indexOf(parent);\n\t\tif(index !== -1) {\n\t\t\treturn parent.findNextSiblingDomNode(index);\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nFind the first DOM node generated by a widget or its children\n*/\nWidget.prototype.findFirstDomNode = function() {\n\t// Return the first dom node of this widget, if we've got one\n\tif(this.domNodes.length > 0) {\n\t\treturn this.domNodes[0];\n\t}\n\t// Otherwise, recursively call our children\n\tfor(var t=0; t<this.children.length; t++) {\n\t\tvar domNode = this.children[t].findFirstDomNode();\n\t\tif(domNode) {\n\t\t\treturn domNode;\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nRemove any DOM nodes created by this widget or its children\n*/\nWidget.prototype.removeChildDomNodes = function() {\n\t// If this widget has directly created DOM nodes, delete them and exit. This assumes that any child widgets are contained within the created DOM nodes, which would normally be the case\n\tif(this.domNodes.length > 0) {\n\t\t$tw.utils.each(this.domNodes,function(domNode) {\n\t\t\tdomNode.parentNode.removeChild(domNode);\n\t\t});\n\t\tthis.domNodes = [];\n\t} else {\n\t\t// Otherwise, ask the child widgets to delete their DOM nodes\n\t\t$tw.utils.each(this.children,function(childWidget) {\n\t\t\tchildWidget.removeChildDomNodes();\n\t\t});\n\t}\n};\n\n/*\nInvoke the action widgets that are descendents of the current widget.\n*/\nWidget.prototype.invokeActions = function(triggeringWidget,event) {\n\tvar handled = false;\n\t// For each child widget\n\tfor(var t=0; t<this.children.length; t++) {\n\t\tvar child = this.children[t];\n\t\t// Invoke the child if it is an action widget\n\t\tif(child.invokeAction && child.invokeAction(triggeringWidget,event)) {\n\t\t\thandled = true;\n\t\t}\n\t\t// Propagate through through the child if it permits it\n\t\tif(child.allowActionPropagation() && child.invokeActions(triggeringWidget,event)) {\n\t\t\thandled = true;\n\t\t}\n\t}\n\treturn handled;\n};\n\n/*\nInvoke the action widgets defined in a string\n*/\nWidget.prototype.invokeActionString = function(actions,triggeringWidget,event) {\n\tactions = actions || \"\";\n\tvar parser = this.wiki.parseText(\"text/vnd.tiddlywiki\",actions,{\n\t\t\tparentWidget: this,\n\t\t\tdocument: this.document\n\t\t}),\n\t\twidgetNode = this.wiki.makeWidget(parser,{\n\t\t\tparentWidget: this,\n\t\t\tdocument: this.document\n\t\t});\n\tvar container = this.document.createElement(\"div\");\n\twidgetNode.render(container,null);\n\treturn widgetNode.invokeActions(this,event);\n};\n\nWidget.prototype.allowActionPropagation = function() {\n\treturn true;\n};\n\nexports.widget = Widget;\n\n})();\n",
            "title": "$:/core/modules/widgets/widget.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/widgets/wikify.js": {
            "text": "/*\\\ntitle: $:/core/modules/widgets/wikify.js\ntype: application/javascript\nmodule-type: widget\n\nWidget to wikify text into a variable\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar Widget = require(\"$:/core/modules/widgets/widget.js\").widget;\n\nvar WikifyWidget = function(parseTreeNode,options) {\n\tthis.initialise(parseTreeNode,options);\n};\n\n/*\nInherit from the base widget class\n*/\nWikifyWidget.prototype = new Widget();\n\n/*\nRender this widget into the DOM\n*/\nWikifyWidget.prototype.render = function(parent,nextSibling) {\n\tthis.parentDomNode = parent;\n\tthis.computeAttributes();\n\tthis.execute();\n\tthis.renderChildren(parent,nextSibling);\n};\n\n/*\nCompute the internal state of the widget\n*/\nWikifyWidget.prototype.execute = function() {\n\t// Get our parameters\n\tthis.wikifyName = this.getAttribute(\"name\");\n\tthis.wikifyText = this.getAttribute(\"text\");\n\tthis.wikifyType = this.getAttribute(\"type\");\n\tthis.wikifyMode = this.getAttribute(\"mode\",\"block\");\n\tthis.wikifyOutput = this.getAttribute(\"output\",\"text\");\n\t// Create the parse tree\n\tthis.wikifyParser = this.wiki.parseText(this.wikifyType,this.wikifyText,{\n\t\t\tparseAsInline: this.wikifyMode === \"inline\"\n\t\t});\n\t// Create the widget tree \n\tthis.wikifyWidgetNode = this.wiki.makeWidget(this.wikifyParser,{\n\t\t\tdocument: $tw.fakeDocument,\n\t\t\tparentWidget: this\n\t\t});\n\t// Render the widget tree to the container\n\tthis.wikifyContainer = $tw.fakeDocument.createElement(\"div\");\n\tthis.wikifyWidgetNode.render(this.wikifyContainer,null);\n\tthis.wikifyResult = this.getResult();\n\t// Set context variable\n\tthis.setVariable(this.wikifyName,this.wikifyResult);\n\t// Construct the child widgets\n\tthis.makeChildWidgets();\n};\n\n/*\nReturn the result string\n*/\nWikifyWidget.prototype.getResult = function() {\n\tvar result;\n\tswitch(this.wikifyOutput) {\n\t\tcase \"text\":\n\t\t\tresult = this.wikifyContainer.textContent;\n\t\t\tbreak;\n\t\tcase \"html\":\n\t\t\tresult = this.wikifyContainer.innerHTML;\n\t\t\tbreak;\n\t\tcase \"parsetree\":\n\t\t\tresult = JSON.stringify(this.wikifyParser.tree,0,$tw.config.preferences.jsonSpaces);\n\t\t\tbreak;\n\t\tcase \"widgettree\":\n\t\t\tresult = JSON.stringify(this.getWidgetTree(),0,$tw.config.preferences.jsonSpaces);\n\t\t\tbreak;\n\t}\n\treturn result;\n};\n\n/*\nReturn a string of the widget tree\n*/\nWikifyWidget.prototype.getWidgetTree = function() {\n\tvar copyNode = function(widgetNode,resultNode) {\n\t\t\tvar type = widgetNode.parseTreeNode.type;\n\t\t\tresultNode.type = type;\n\t\t\tswitch(type) {\n\t\t\t\tcase \"element\":\n\t\t\t\t\tresultNode.tag = widgetNode.parseTreeNode.tag;\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"text\":\n\t\t\t\t\tresultNode.text = widgetNode.parseTreeNode.text;\n\t\t\t\t\tbreak;\t\n\t\t\t}\n\t\t\tif(Object.keys(widgetNode.attributes || {}).length > 0) {\n\t\t\t\tresultNode.attributes = {};\n\t\t\t\t$tw.utils.each(widgetNode.attributes,function(attr,attrName) {\n\t\t\t\t\tresultNode.attributes[attrName] = widgetNode.getAttribute(attrName);\n\t\t\t\t});\n\t\t\t}\n\t\t\tif(Object.keys(widgetNode.children || {}).length > 0) {\n\t\t\t\tresultNode.children = [];\n\t\t\t\t$tw.utils.each(widgetNode.children,function(widgetChildNode) {\n\t\t\t\t\tvar node = {};\n\t\t\t\t\tresultNode.children.push(node);\n\t\t\t\t\tcopyNode(widgetChildNode,node);\n\t\t\t\t});\n\t\t\t}\n\t\t},\n\t\tresults = {};\n\tcopyNode(this.wikifyWidgetNode,results);\n\treturn results;\n};\n\n/*\nSelectively refreshes the widget if needed. Returns true if the widget or any of its children needed re-rendering\n*/\nWikifyWidget.prototype.refresh = function(changedTiddlers) {\n\tvar changedAttributes = this.computeAttributes();\n\t// Refresh ourselves entirely if any of our attributes have changed\n\tif(changedAttributes.name || changedAttributes.text || changedAttributes.type || changedAttributes.mode || changedAttributes.output) {\n\t\tthis.refreshSelf();\n\t\treturn true;\n\t} else {\n\t\t// Refresh the widget tree\n\t\tif(this.wikifyWidgetNode.refresh(changedTiddlers)) {\n\t\t\t// Check if there was any change\n\t\t\tvar result = this.getResult();\n\t\t\tif(result !== this.wikifyResult) {\n\t\t\t\t// If so, save the change\n\t\t\t\tthis.wikifyResult = result;\n\t\t\t\tthis.setVariable(this.wikifyName,this.wikifyResult);\n\t\t\t\t// Refresh each of our child widgets\n\t\t\t\t$tw.utils.each(this.children,function(childWidget) {\n\t\t\t\t\tchildWidget.refreshSelf();\n\t\t\t\t});\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t\t// Just refresh the children\n\t\treturn this.refreshChildren(changedTiddlers);\n\t}\n};\n\nexports.wikify = WikifyWidget;\n\n})();\n",
            "title": "$:/core/modules/widgets/wikify.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/core/modules/wiki-bulkops.js": {
            "text": "/*\\\ntitle: $:/core/modules/wiki-bulkops.js\ntype: application/javascript\nmodule-type: wikimethod\n\nBulk tiddler operations such as rename.\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\n/*\nRename a tiddler, and relink any tags or lists that reference it.\n*/\nexports.renameTiddler = function(fromTitle,toTitle) {\n\tvar self = this;\n\tfromTitle = (fromTitle || \"\").trim();\n\ttoTitle = (toTitle || \"\").trim();\n\tif(fromTitle && toTitle && fromTitle !== toTitle) {\n\t\t// Rename the tiddler itself\n\t\tvar tiddler = this.getTiddler(fromTitle);\n\t\tthis.addTiddler(new $tw.Tiddler(tiddler,{title: toTitle},this.getModificationFields()));\n\t\tthis.deleteTiddler(fromTitle);\n\t\t// Rename any tags or lists that reference it\n\t\tthis.each(function(tiddler,title) {\n\t\t\tvar tags = (tiddler.fields.tags || []).slice(0),\n\t\t\t\tlist = (tiddler.fields.list || []).slice(0),\n\t\t\t\tisModified = false;\n\t\t\t// Rename tags\n\t\t\t$tw.utils.each(tags,function (title,index) {\n\t\t\t\tif(title === fromTitle) {\n\t\t\t\t\ttags[index] = toTitle;\n\t\t\t\t\tisModified = true;\n\t\t\t\t}\n\t\t\t});\n\t\t\t// Rename lists\n\t\t\t$tw.utils.each(list,function (title,index) {\n\t\t\t\tif(title === fromTitle) {\n\t\t\t\t\tlist[index] = toTitle;\n\t\t\t\t\tisModified = true;\n\t\t\t\t}\n\t\t\t});\n\t\t\tif(isModified) {\n\t\t\t\tself.addTiddler(new $tw.Tiddler(tiddler,{tags: tags, list: list},self.getModificationFields()));\n\t\t\t}\n\t\t});\n\t}\n}\n\n})();\n",
            "title": "$:/core/modules/wiki-bulkops.js",
            "type": "application/javascript",
            "module-type": "wikimethod"
        },
        "$:/core/modules/wiki.js": {
            "text": "/*\\\ntitle: $:/core/modules/wiki.js\ntype: application/javascript\nmodule-type: wikimethod\n\nExtension methods for the $tw.Wiki object\n\nAdds the following properties to the wiki object:\n\n* `eventListeners` is a hashmap by type of arrays of listener functions\n* `changedTiddlers` is a hashmap describing changes to named tiddlers since wiki change events were last dispatched. Each entry is a hashmap containing two fields:\n\tmodified: true/false\n\tdeleted: true/false\n* `changeCount` is a hashmap by tiddler title containing a numerical index that starts at zero and is incremented each time a tiddler is created changed or deleted\n* `caches` is a hashmap by tiddler title containing a further hashmap of named cache objects. Caches are automatically cleared when a tiddler is modified or deleted\n* `globalCache` is a hashmap by cache name of cache objects that are cleared whenever any tiddler change occurs\n\n\\*/\n(function(){\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar widget = require(\"$:/core/modules/widgets/widget.js\");\n\nvar USER_NAME_TITLE = \"$:/status/UserName\";\n\n/*\nGet the value of a text reference. Text references can have any of these forms:\n\t<tiddlertitle>\n\t<tiddlertitle>!!<fieldname>\n\t!!<fieldname> - specifies a field of the current tiddlers\n\t<tiddlertitle>##<index>\n*/\nexports.getTextReference = function(textRef,defaultText,currTiddlerTitle) {\n\tvar tr = $tw.utils.parseTextReference(textRef),\n\t\ttitle = tr.title || currTiddlerTitle;\n\tif(tr.field) {\n\t\tvar tiddler = this.getTiddler(title);\n\t\tif(tr.field === \"title\") { // Special case so we can return the title of a non-existent tiddler\n\t\t\treturn title;\n\t\t} else if(tiddler && $tw.utils.hop(tiddler.fields,tr.field)) {\n\t\t\treturn tiddler.getFieldString(tr.field);\n\t\t} else {\n\t\t\treturn defaultText;\n\t\t}\n\t} else if(tr.index) {\n\t\treturn this.extractTiddlerDataItem(title,tr.index,defaultText);\n\t} else {\n\t\treturn this.getTiddlerText(title,defaultText);\n\t}\n};\n\nexports.setTextReference = function(textRef,value,currTiddlerTitle) {\n\tvar tr = $tw.utils.parseTextReference(textRef),\n\t\ttitle = tr.title || currTiddlerTitle;\n\tthis.setText(title,tr.field,tr.index,value);\n};\n\nexports.setText = function(title,field,index,value,options) {\n\toptions = options || {};\n\tvar creationFields = options.suppressTimestamp ? {} : this.getCreationFields(),\n\t\tmodificationFields = options.suppressTimestamp ? {} : this.getModificationFields();\n\t// Check if it is a reference to a tiddler field\n\tif(index) {\n\t\tvar data = this.getTiddlerData(title,Object.create(null));\n\t\tif(value !== undefined) {\n\t\t\tdata[index] = value;\n\t\t} else {\n\t\t\tdelete data[index];\n\t\t}\n\t\tthis.setTiddlerData(title,data,modificationFields);\n\t} else {\n\t\tvar tiddler = this.getTiddler(title),\n\t\t\tfields = {title: title};\n\t\tfields[field || \"text\"] = value;\n\t\tthis.addTiddler(new $tw.Tiddler(creationFields,tiddler,fields,modificationFields));\n\t}\n};\n\nexports.deleteTextReference = function(textRef,currTiddlerTitle) {\n\tvar tr = $tw.utils.parseTextReference(textRef),\n\t\ttitle,tiddler,fields;\n\t// Check if it is a reference to a tiddler\n\tif(tr.title && !tr.field) {\n\t\tthis.deleteTiddler(tr.title);\n\t// Else check for a field reference\n\t} else if(tr.field) {\n\t\ttitle = tr.title || currTiddlerTitle;\n\t\ttiddler = this.getTiddler(title);\n\t\tif(tiddler && $tw.utils.hop(tiddler.fields,tr.field)) {\n\t\t\tfields = Object.create(null);\n\t\t\tfields[tr.field] = undefined;\n\t\t\tthis.addTiddler(new $tw.Tiddler(tiddler,fields,this.getModificationFields()));\n\t\t}\n\t}\n};\n\nexports.addEventListener = function(type,listener) {\n\tthis.eventListeners = this.eventListeners || {};\n\tthis.eventListeners[type] = this.eventListeners[type]  || [];\n\tthis.eventListeners[type].push(listener);\t\n};\n\nexports.removeEventListener = function(type,listener) {\n\tvar listeners = this.eventListeners[type];\n\tif(listeners) {\n\t\tvar p = listeners.indexOf(listener);\n\t\tif(p !== -1) {\n\t\t\tlisteners.splice(p,1);\n\t\t}\n\t}\n};\n\nexports.dispatchEvent = function(type /*, args */) {\n\tvar args = Array.prototype.slice.call(arguments,1),\n\t\tlisteners = this.eventListeners[type];\n\tif(listeners) {\n\t\tfor(var p=0; p<listeners.length; p++) {\n\t\t\tvar listener = listeners[p];\n\t\t\tlistener.apply(listener,args);\n\t\t}\n\t}\n};\n\n/*\nCauses a tiddler to be marked as changed, incrementing the change count, and triggers event handlers.\nThis method should be called after the changes it describes have been made to the wiki.tiddlers[] array.\n\ttitle: Title of tiddler\n\tisDeleted: defaults to false (meaning the tiddler has been created or modified),\n\t\ttrue if the tiddler has been deleted\n*/\nexports.enqueueTiddlerEvent = function(title,isDeleted) {\n\t// Record the touch in the list of changed tiddlers\n\tthis.changedTiddlers = this.changedTiddlers || Object.create(null);\n\tthis.changedTiddlers[title] = this.changedTiddlers[title] || Object.create(null);\n\tthis.changedTiddlers[title][isDeleted ? \"deleted\" : \"modified\"] = true;\n\t// Increment the change count\n\tthis.changeCount = this.changeCount || Object.create(null);\n\tif($tw.utils.hop(this.changeCount,title)) {\n\t\tthis.changeCount[title]++;\n\t} else {\n\t\tthis.changeCount[title] = 1;\n\t}\n\t// Trigger events\n\tthis.eventListeners = this.eventListeners || {};\n\tif(!this.eventsTriggered) {\n\t\tvar self = this;\n\t\t$tw.utils.nextTick(function() {\n\t\t\tvar changes = self.changedTiddlers;\n\t\t\tself.changedTiddlers = Object.create(null);\n\t\t\tself.eventsTriggered = false;\n\t\t\tif($tw.utils.count(changes) > 0) {\n\t\t\t\tself.dispatchEvent(\"change\",changes);\n\t\t\t}\n\t\t});\n\t\tthis.eventsTriggered = true;\n\t}\n};\n\nexports.getSizeOfTiddlerEventQueue = function() {\n\treturn $tw.utils.count(this.changedTiddlers);\n};\n\nexports.clearTiddlerEventQueue = function() {\n\tthis.changedTiddlers = Object.create(null);\n\tthis.changeCount = Object.create(null);\n};\n\nexports.getChangeCount = function(title) {\n\tthis.changeCount = this.changeCount || Object.create(null);\n\tif($tw.utils.hop(this.changeCount,title)) {\n\t\treturn this.changeCount[title];\n\t} else {\n\t\treturn 0;\n\t}\n};\n\n/*\nGenerate an unused title from the specified base\n*/\nexports.generateNewTitle = function(baseTitle,options) {\n\toptions = options || {};\n\tvar c = 0,\n\t\ttitle = baseTitle;\n\twhile(this.tiddlerExists(title) || this.isShadowTiddler(title) || this.findDraft(title)) {\n\t\ttitle = baseTitle + \n\t\t\t(options.prefix || \" \") + \n\t\t\t(++c);\n\t}\n\treturn title;\n};\n\nexports.isSystemTiddler = function(title) {\n\treturn title && title.indexOf(\"$:/\") === 0;\n};\n\nexports.isTemporaryTiddler = function(title) {\n\treturn title && title.indexOf(\"$:/temp/\") === 0;\n};\n\nexports.isImageTiddler = function(title) {\n\tvar tiddler = this.getTiddler(title);\n\tif(tiddler) {\t\t\n\t\tvar contentTypeInfo = $tw.config.contentTypeInfo[tiddler.fields.type || \"text/vnd.tiddlywiki\"];\n\t\treturn !!contentTypeInfo && contentTypeInfo.flags.indexOf(\"image\") !== -1;\n\t} else {\n\t\treturn null;\n\t}\n};\n\n/*\nLike addTiddler() except it will silently reject any plugin tiddlers that are older than the currently loaded version. Returns true if the tiddler was imported\n*/\nexports.importTiddler = function(tiddler) {\n\tvar existingTiddler = this.getTiddler(tiddler.fields.title);\n\t// Check if we're dealing with a plugin\n\tif(tiddler && tiddler.hasField(\"plugin-type\") && tiddler.hasField(\"version\") && existingTiddler && existingTiddler.hasField(\"plugin-type\") && existingTiddler.hasField(\"version\")) {\n\t\t// Reject the incoming plugin if it is older\n\t\tif(!$tw.utils.checkVersions(tiddler.fields.version,existingTiddler.fields.version)) {\n\t\t\treturn false;\n\t\t}\n\t}\n\t// Fall through to adding the tiddler\n\tthis.addTiddler(tiddler);\n\treturn true;\n};\n\n/*\nReturn a hashmap of the fields that should be set when a tiddler is created\n*/\nexports.getCreationFields = function() {\n\tvar fields = {\n\t\t\tcreated: new Date()\n\t\t},\n\t\tcreator = this.getTiddlerText(USER_NAME_TITLE);\n\tif(creator) {\n\t\tfields.creator = creator;\n\t}\n\treturn fields;\n};\n\n/*\nReturn a hashmap of the fields that should be set when a tiddler is modified\n*/\nexports.getModificationFields = function() {\n\tvar fields = Object.create(null),\n\t\tmodifier = this.getTiddlerText(USER_NAME_TITLE);\n\tfields.modified = new Date();\n\tif(modifier) {\n\t\tfields.modifier = modifier;\n\t}\n\treturn fields;\n};\n\n/*\nReturn a sorted array of tiddler titles.  Options include:\nsortField: field to sort by\nexcludeTag: tag to exclude\nincludeSystem: whether to include system tiddlers (defaults to false)\n*/\nexports.getTiddlers = function(options) {\n\toptions = options || Object.create(null);\n\tvar self = this,\n\t\tsortField = options.sortField || \"title\",\n\t\ttiddlers = [], t, titles = [];\n\tthis.each(function(tiddler,title) {\n\t\tif(options.includeSystem || !self.isSystemTiddler(title)) {\n\t\t\tif(!options.excludeTag || !tiddler.hasTag(options.excludeTag)) {\n\t\t\t\ttiddlers.push(tiddler);\n\t\t\t}\n\t\t}\n\t});\n\ttiddlers.sort(function(a,b) {\n\t\tvar aa = a.fields[sortField].toLowerCase() || \"\",\n\t\t\tbb = b.fields[sortField].toLowerCase() || \"\";\n\t\tif(aa < bb) {\n\t\t\treturn -1;\n\t\t} else {\n\t\t\tif(aa > bb) {\n\t\t\t\treturn 1;\n\t\t\t} else {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t});\n\tfor(t=0; t<tiddlers.length; t++) {\n\t\ttitles.push(tiddlers[t].fields.title);\n\t}\n\treturn titles;\n};\n\nexports.countTiddlers = function(excludeTag) {\n\tvar tiddlers = this.getTiddlers({excludeTag: excludeTag});\n\treturn $tw.utils.count(tiddlers);\n};\n\n/*\nReturns a function iterator(callback) that iterates through the specified titles, and invokes the callback with callback(tiddler,title)\n*/\nexports.makeTiddlerIterator = function(titles) {\n\tvar self = this;\n\tif(!$tw.utils.isArray(titles)) {\n\t\ttitles = Object.keys(titles);\n\t} else {\n\t\ttitles = titles.slice(0);\n\t}\n\treturn function(callback) {\n\t\ttitles.forEach(function(title) {\n\t\t\tcallback(self.getTiddler(title),title);\n\t\t});\n\t};\n};\n\n/*\nSort an array of tiddler titles by a specified field\n\ttitles: array of titles (sorted in place)\n\tsortField: name of field to sort by\n\tisDescending: true if the sort should be descending\n\tisCaseSensitive: true if the sort should consider upper and lower case letters to be different\n*/\nexports.sortTiddlers = function(titles,sortField,isDescending,isCaseSensitive,isNumeric) {\n\tvar self = this;\n\ttitles.sort(function(a,b) {\n\t\tvar x,y,\n\t\t\tcompareNumbers = function(x,y) {\n\t\t\t\tvar result = \n\t\t\t\t\tisNaN(x) && !isNaN(y) ? (isDescending ? -1 : 1) :\n\t\t\t\t\t!isNaN(x) && isNaN(y) ? (isDescending ? 1 : -1) :\n\t\t\t\t\t                        (isDescending ? y - x :  x - y);\n\t\t\t\treturn result;\n\t\t\t};\n\t\tif(sortField !== \"title\") {\n\t\t\tvar tiddlerA = self.getTiddler(a),\n\t\t\t\ttiddlerB = self.getTiddler(b);\n\t\t\tif(tiddlerA) {\n\t\t\t\ta = tiddlerA.fields[sortField] || \"\";\n\t\t\t} else {\n\t\t\t\ta = \"\";\n\t\t\t}\n\t\t\tif(tiddlerB) {\n\t\t\t\tb = tiddlerB.fields[sortField] || \"\";\n\t\t\t} else {\n\t\t\t\tb = \"\";\n\t\t\t}\n\t\t}\n\t\tx = Number(a);\n\t\ty = Number(b);\n\t\tif(isNumeric && (!isNaN(x) || !isNaN(y))) {\n\t\t\treturn compareNumbers(x,y);\n\t\t} else if($tw.utils.isDate(a) && $tw.utils.isDate(b)) {\n\t\t\treturn isDescending ? b - a : a - b;\n\t\t} else {\n\t\t\ta = String(a);\n\t\t\tb = String(b);\n\t\t\tif(!isCaseSensitive) {\n\t\t\t\ta = a.toLowerCase();\n\t\t\t\tb = b.toLowerCase();\n\t\t\t}\n\t\t\treturn isDescending ? b.localeCompare(a) : a.localeCompare(b);\n\t\t}\n\t});\n};\n\n/*\nFor every tiddler invoke a callback(title,tiddler) with `this` set to the wiki object. Options include:\nsortField: field to sort by\nexcludeTag: tag to exclude\nincludeSystem: whether to include system tiddlers (defaults to false)\n*/\nexports.forEachTiddler = function(/* [options,]callback */) {\n\tvar arg = 0,\n\t\toptions = arguments.length >= 2 ? arguments[arg++] : {},\n\t\tcallback = arguments[arg++],\n\t\ttitles = this.getTiddlers(options),\n\t\tt, tiddler;\n\tfor(t=0; t<titles.length; t++) {\n\t\ttiddler = this.getTiddler(titles[t]);\n\t\tif(tiddler) {\n\t\t\tcallback.call(this,tiddler.fields.title,tiddler);\n\t\t}\n\t}\n};\n\n/*\nReturn an array of tiddler titles that are directly linked from the specified tiddler\n*/\nexports.getTiddlerLinks = function(title) {\n\tvar self = this;\n\t// We'll cache the links so they only get computed if the tiddler changes\n\treturn this.getCacheForTiddler(title,\"links\",function() {\n\t\t// Parse the tiddler\n\t\tvar parser = self.parseTiddler(title);\n\t\t// Count up the links\n\t\tvar links = [],\n\t\t\tcheckParseTree = function(parseTree) {\n\t\t\t\tfor(var t=0; t<parseTree.length; t++) {\n\t\t\t\t\tvar parseTreeNode = parseTree[t];\n\t\t\t\t\tif(parseTreeNode.type === \"link\" && parseTreeNode.attributes.to && parseTreeNode.attributes.to.type === \"string\") {\n\t\t\t\t\t\tvar value = parseTreeNode.attributes.to.value;\n\t\t\t\t\t\tif(links.indexOf(value) === -1) {\n\t\t\t\t\t\t\tlinks.push(value);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif(parseTreeNode.children) {\n\t\t\t\t\t\tcheckParseTree(parseTreeNode.children);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\tif(parser) {\n\t\t\tcheckParseTree(parser.tree);\n\t\t}\n\t\treturn links;\n\t});\n};\n\n/*\nReturn an array of tiddler titles that link to the specified tiddler\n*/\nexports.getTiddlerBacklinks = function(targetTitle) {\n\tvar self = this,\n\t\tbacklinks = [];\n\tthis.forEachTiddler(function(title,tiddler) {\n\t\tvar links = self.getTiddlerLinks(title);\n\t\tif(links.indexOf(targetTitle) !== -1) {\n\t\t\tbacklinks.push(title);\n\t\t}\n\t});\n\treturn backlinks;\n};\n\n/*\nReturn a hashmap of tiddler titles that are referenced but not defined. Each value is the number of times the missing tiddler is referenced\n*/\nexports.getMissingTitles = function() {\n\tvar self = this,\n\t\tmissing = [];\n// We should cache the missing tiddler list, even if we recreate it every time any tiddler is modified\n\tthis.forEachTiddler(function(title,tiddler) {\n\t\tvar links = self.getTiddlerLinks(title);\n\t\t$tw.utils.each(links,function(link) {\n\t\t\tif((!self.tiddlerExists(link) && !self.isShadowTiddler(link)) && missing.indexOf(link) === -1) {\n\t\t\t\tmissing.push(link);\n\t\t\t}\n\t\t});\n\t});\n\treturn missing;\n};\n\nexports.getOrphanTitles = function() {\n\tvar self = this,\n\t\torphans = this.getTiddlers();\n\tthis.forEachTiddler(function(title,tiddler) {\n\t\tvar links = self.getTiddlerLinks(title);\n\t\t$tw.utils.each(links,function(link) {\n\t\t\tvar p = orphans.indexOf(link);\n\t\t\tif(p !== -1) {\n\t\t\t\torphans.splice(p,1);\n\t\t\t}\n\t\t});\n\t});\n\treturn orphans; // Todo\n};\n\n/*\nRetrieves a list of the tiddler titles that are tagged with a given tag\n*/\nexports.getTiddlersWithTag = function(tag) {\n\tvar self = this;\n\treturn this.getGlobalCache(\"taglist-\" + tag,function() {\n\t\tvar tagmap = self.getTagMap();\n\t\treturn self.sortByList(tagmap[tag],tag);\n\t});\n};\n\n/*\nGet a hashmap by tag of arrays of tiddler titles\n*/\nexports.getTagMap = function() {\n\tvar self = this;\n\treturn this.getGlobalCache(\"tagmap\",function() {\n\t\tvar tags = Object.create(null),\n\t\t\tstoreTags = function(tagArray,title) {\n\t\t\t\tif(tagArray) {\n\t\t\t\t\tfor(var index=0; index<tagArray.length; index++) {\n\t\t\t\t\t\tvar tag = tagArray[index];\n\t\t\t\t\t\tif($tw.utils.hop(tags,tag)) {\n\t\t\t\t\t\t\ttags[tag].push(title);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttags[tag] = [title];\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\ttitle, tiddler;\n\t\t// Collect up all the tags\n\t\tself.eachShadow(function(tiddler,title) {\n\t\t\tif(!self.tiddlerExists(title)) {\n\t\t\t\ttiddler = self.getTiddler(title);\n\t\t\t\tstoreTags(tiddler.fields.tags,title);\n\t\t\t}\n\t\t});\n\t\tself.each(function(tiddler,title) {\n\t\t\tstoreTags(tiddler.fields.tags,title);\n\t\t});\n\t\treturn tags;\n\t});\n};\n\n/*\nLookup a given tiddler and return a list of all the tiddlers that include it in the specified list field\n*/\nexports.findListingsOfTiddler = function(targetTitle,fieldName) {\n\tfieldName = fieldName || \"list\";\n\tvar titles = [];\n\tthis.each(function(tiddler,title) {\n\t\tvar list = $tw.utils.parseStringArray(tiddler.fields[fieldName]);\n\t\tif(list && list.indexOf(targetTitle) !== -1) {\n\t\t\ttitles.push(title);\n\t\t}\n\t});\n\treturn titles;\n};\n\n/*\nSorts an array of tiddler titles according to an ordered list\n*/\nexports.sortByList = function(array,listTitle) {\n\tvar list = this.getTiddlerList(listTitle);\n\tif(!array || array.length === 0) {\n\t\treturn [];\n\t} else {\n\t\tvar titles = [], t, title;\n\t\t// First place any entries that are present in the list\n\t\tfor(t=0; t<list.length; t++) {\n\t\t\ttitle = list[t];\n\t\t\tif(array.indexOf(title) !== -1) {\n\t\t\t\ttitles.push(title);\n\t\t\t}\n\t\t}\n\t\t// Then place any remaining entries\n\t\tfor(t=0; t<array.length; t++) {\n\t\t\ttitle = array[t];\n\t\t\tif(list.indexOf(title) === -1) {\n\t\t\t\ttitles.push(title);\n\t\t\t}\n\t\t}\n\t\t// Finally obey the list-before and list-after fields of each tiddler in turn\n\t\tvar sortedTitles = titles.slice(0);\n\t\tfor(t=0; t<sortedTitles.length; t++) {\n\t\t\ttitle = sortedTitles[t];\n\t\t\tvar currPos = titles.indexOf(title),\n\t\t\t\tnewPos = -1,\n\t\t\t\ttiddler = this.getTiddler(title);\n\t\t\tif(tiddler) {\n\t\t\t\tvar beforeTitle = tiddler.fields[\"list-before\"],\n\t\t\t\t\tafterTitle = tiddler.fields[\"list-after\"];\n\t\t\t\tif(beforeTitle === \"\") {\n\t\t\t\t\tnewPos = 0;\n\t\t\t\t} else if(beforeTitle) {\n\t\t\t\t\tnewPos = titles.indexOf(beforeTitle);\n\t\t\t\t} else if(afterTitle) {\n\t\t\t\t\tnewPos = titles.indexOf(afterTitle);\n\t\t\t\t\tif(newPos >= 0) {\n\t\t\t\t\t\t++newPos;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif(newPos === -1) {\n\t\t\t\t\tnewPos = currPos;\n\t\t\t\t}\n\t\t\t\tif(newPos !== currPos) {\n\t\t\t\t\ttitles.splice(currPos,1);\n\t\t\t\t\tif(newPos >= currPos) {\n\t\t\t\t\t\tnewPos--;\n\t\t\t\t\t}\n\t\t\t\t\ttitles.splice(newPos,0,title);\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\treturn titles;\n\t}\n};\n\nexports.getSubTiddler = function(title,subTiddlerTitle) {\n\tvar bundleInfo = this.getPluginInfo(title) || this.getTiddlerDataCached(title);\n\tif(bundleInfo && bundleInfo.tiddlers) {\n\t\tvar subTiddler = bundleInfo.tiddlers[subTiddlerTitle];\n\t\tif(subTiddler) {\n\t\t\treturn new $tw.Tiddler(subTiddler);\n\t\t}\n\t}\n\treturn null;\n};\n\n/*\nRetrieve a tiddler as a JSON string of the fields\n*/\nexports.getTiddlerAsJson = function(title) {\n\tvar tiddler = this.getTiddler(title);\n\tif(tiddler) {\n\t\tvar fields = Object.create(null);\n\t\t$tw.utils.each(tiddler.fields,function(value,name) {\n\t\t\tfields[name] = tiddler.getFieldString(name);\n\t\t});\n\t\treturn JSON.stringify(fields);\n\t} else {\n\t\treturn JSON.stringify({title: title});\n\t}\n};\n\n/*\nGet the content of a tiddler as a JavaScript object. How this is done depends on the type of the tiddler:\n\napplication/json: the tiddler JSON is parsed into an object\napplication/x-tiddler-dictionary: the tiddler is parsed as sequence of name:value pairs\n\nOther types currently just return null.\n\ntitleOrTiddler: string tiddler title or a tiddler object\ndefaultData: default data to be returned if the tiddler is missing or doesn't contain data\n\nNote that the same value is returned for repeated calls for the same tiddler data. The value is frozen to prevent modification; otherwise modifications would be visible to all callers\n*/\nexports.getTiddlerDataCached = function(titleOrTiddler,defaultData) {\n\tvar self = this,\n\t\ttiddler = titleOrTiddler;\n\tif(!(tiddler instanceof $tw.Tiddler)) {\n\t\ttiddler = this.getTiddler(tiddler);\t\n\t}\n\tif(tiddler) {\n\t\treturn this.getCacheForTiddler(tiddler.fields.title,\"data\",function() {\n\t\t\t// Return the frozen value\n\t\t\tvar value = self.getTiddlerData(tiddler.fields.title,defaultData);\n\t\t\t$tw.utils.deepFreeze(value);\n\t\t\treturn value;\n\t\t});\n\t} else {\n\t\treturn defaultData;\n\t}\n};\n\n/*\nAlternative, uncached version of getTiddlerDataCached(). The return value can be mutated freely and reused\n*/\nexports.getTiddlerData = function(titleOrTiddler,defaultData) {\n\tvar tiddler = titleOrTiddler,\n\t\tdata;\n\tif(!(tiddler instanceof $tw.Tiddler)) {\n\t\ttiddler = this.getTiddler(tiddler);\t\n\t}\n\tif(tiddler && tiddler.fields.text) {\n\t\tswitch(tiddler.fields.type) {\n\t\t\tcase \"application/json\":\n\t\t\t\t// JSON tiddler\n\t\t\t\ttry {\n\t\t\t\t\tdata = JSON.parse(tiddler.fields.text);\n\t\t\t\t} catch(ex) {\n\t\t\t\t\treturn defaultData;\n\t\t\t\t}\n\t\t\t\treturn data;\n\t\t\tcase \"application/x-tiddler-dictionary\":\n\t\t\t\treturn $tw.utils.parseFields(tiddler.fields.text);\n\t\t}\n\t}\n\treturn defaultData;\n};\n\n/*\nExtract an indexed field from within a data tiddler\n*/\nexports.extractTiddlerDataItem = function(titleOrTiddler,index,defaultText) {\n\tvar data = this.getTiddlerData(titleOrTiddler,Object.create(null)),\n\t\ttext;\n\tif(data && $tw.utils.hop(data,index)) {\n\t\ttext = data[index];\n\t}\n\tif(typeof text === \"string\" || typeof text === \"number\") {\n\t\treturn text.toString();\n\t} else {\n\t\treturn defaultText;\n\t}\n};\n\n/*\nSet a tiddlers content to a JavaScript object. Currently this is done by setting the tiddler's type to \"application/json\" and setting the text to the JSON text of the data.\ntitle: title of tiddler\ndata: object that can be serialised to JSON\nfields: optional hashmap of additional tiddler fields to be set\n*/\nexports.setTiddlerData = function(title,data,fields) {\n\tvar existingTiddler = this.getTiddler(title),\n\t\tnewFields = {\n\t\t\ttitle: title\n\t};\n\tif(existingTiddler && existingTiddler.fields.type === \"application/x-tiddler-dictionary\") {\n\t\tnewFields.text = $tw.utils.makeTiddlerDictionary(data);\n\t} else {\n\t\tnewFields.type = \"application/json\";\n\t\tnewFields.text = JSON.stringify(data,null,$tw.config.preferences.jsonSpaces);\n\t}\n\tthis.addTiddler(new $tw.Tiddler(this.getCreationFields(),existingTiddler,fields,newFields,this.getModificationFields()));\n};\n\n/*\nReturn the content of a tiddler as an array containing each line\n*/\nexports.getTiddlerList = function(title,field,index) {\n\tif(index) {\n\t\treturn $tw.utils.parseStringArray(this.extractTiddlerDataItem(title,index,\"\"));\n\t}\n\tfield = field || \"list\";\n\tvar tiddler = this.getTiddler(title);\n\tif(tiddler) {\n\t\treturn ($tw.utils.parseStringArray(tiddler.fields[field]) || []).slice(0);\n\t}\n\treturn [];\n};\n\n// Return a named global cache object. Global cache objects are cleared whenever a tiddler change occurs\nexports.getGlobalCache = function(cacheName,initializer) {\n\tthis.globalCache = this.globalCache || Object.create(null);\n\tif($tw.utils.hop(this.globalCache,cacheName)) {\n\t\treturn this.globalCache[cacheName];\n\t} else {\n\t\tthis.globalCache[cacheName] = initializer();\n\t\treturn this.globalCache[cacheName];\n\t}\n};\n\nexports.clearGlobalCache = function() {\n\tthis.globalCache = Object.create(null);\n};\n\n// Return the named cache object for a tiddler. If the cache doesn't exist then the initializer function is invoked to create it\nexports.getCacheForTiddler = function(title,cacheName,initializer) {\n\tthis.caches = this.caches || Object.create(null);\n\tvar caches = this.caches[title];\n\tif(caches && caches[cacheName]) {\n\t\treturn caches[cacheName];\n\t} else {\n\t\tif(!caches) {\n\t\t\tcaches = Object.create(null);\n\t\t\tthis.caches[title] = caches;\n\t\t}\n\t\tcaches[cacheName] = initializer();\n\t\treturn caches[cacheName];\n\t}\n};\n\n// Clear all caches associated with a particular tiddler, or, if the title is null, clear all the caches for all the tiddlers\nexports.clearCache = function(title) {\n\tif(title) {\n\t\tthis.caches = this.caches || Object.create(null);\n\t\tif($tw.utils.hop(this.caches,title)) {\n\t\t\tdelete this.caches[title];\n\t\t}\n\t} else {\n\t\tthis.caches = Object.create(null);\n\t}\n};\n\nexports.initParsers = function(moduleType) {\n\t// Install the parser modules\n\t$tw.Wiki.parsers = {};\n\tvar self = this;\n\t$tw.modules.forEachModuleOfType(\"parser\",function(title,module) {\n\t\tfor(var f in module) {\n\t\t\tif($tw.utils.hop(module,f)) {\n\t\t\t\t$tw.Wiki.parsers[f] = module[f]; // Store the parser class\n\t\t\t}\n\t\t}\n\t});\n};\n\n/*\nParse a block of text of a specified MIME type\n\ttype: content type of text to be parsed\n\ttext: text\n\toptions: see below\nOptions include:\n\tparseAsInline: if true, the text of the tiddler will be parsed as an inline run\n\t_canonical_uri: optional string of the canonical URI of this content\n*/\nexports.parseText = function(type,text,options) {\n\ttext = text || \"\";\n\toptions = options || {};\n\t// Select a parser\n\tvar Parser = $tw.Wiki.parsers[type];\n\tif(!Parser && $tw.utils.getFileExtensionInfo(type)) {\n\t\tParser = $tw.Wiki.parsers[$tw.utils.getFileExtensionInfo(type).type];\n\t}\n\tif(!Parser) {\n\t\tParser = $tw.Wiki.parsers[options.defaultType || \"text/vnd.tiddlywiki\"];\n\t}\n\tif(!Parser) {\n\t\treturn null;\n\t}\n\t// Return the parser instance\n\treturn new Parser(type,text,{\n\t\tparseAsInline: options.parseAsInline,\n\t\twiki: this,\n\t\t_canonical_uri: options._canonical_uri\n\t});\n};\n\n/*\nParse a tiddler according to its MIME type\n*/\nexports.parseTiddler = function(title,options) {\n\toptions = $tw.utils.extend({},options);\n\tvar cacheType = options.parseAsInline ? \"inlineParseTree\" : \"blockParseTree\",\n\t\ttiddler = this.getTiddler(title),\n\t\tself = this;\n\treturn tiddler ? this.getCacheForTiddler(title,cacheType,function() {\n\t\t\tif(tiddler.hasField(\"_canonical_uri\")) {\n\t\t\t\toptions._canonical_uri = tiddler.fields._canonical_uri;\n\t\t\t}\n\t\t\treturn self.parseText(tiddler.fields.type,tiddler.fields.text,options);\n\t\t}) : null;\n};\n\nexports.parseTextReference = function(title,field,index,options) {\n\tvar tiddler,text;\n\tif(options.subTiddler) {\n\t\ttiddler = this.getSubTiddler(title,options.subTiddler);\n\t} else {\n\t\ttiddler = this.getTiddler(title);\n\t\tif(field === \"text\" || (!field && !index)) {\n\t\t\tthis.getTiddlerText(title); // Force the tiddler to be lazily loaded\n\t\t\treturn this.parseTiddler(title,options);\n\t\t}\n\t}\n\tif(field === \"text\" || (!field && !index)) {\n\t\tif(tiddler && tiddler.fields) {\n\t\t\treturn this.parseText(tiddler.fields.type || \"text/vnd.tiddlywiki\",tiddler.fields.text,options);\t\t\t\n\t\t} else {\n\t\t\treturn null;\n\t\t}\n\t} else if(field) {\n\t\tif(field === \"title\") {\n\t\t\ttext = title;\n\t\t} else {\n\t\t\tif(!tiddler || !tiddler.hasField(field)) {\n\t\t\t\treturn null;\n\t\t\t}\n\t\t\ttext = tiddler.fields[field];\n\t\t}\n\t\treturn this.parseText(\"text/vnd.tiddlywiki\",text.toString(),options);\n\t} else if(index) {\n\t\tthis.getTiddlerText(title); // Force the tiddler to be lazily loaded\n\t\ttext = this.extractTiddlerDataItem(tiddler,index,undefined);\n\t\tif(text === undefined) {\n\t\t\treturn null;\n\t\t}\n\t\treturn this.parseText(\"text/vnd.tiddlywiki\",text,options);\n\t}\n};\n\n/*\nMake a widget tree for a parse tree\nparser: parser object\noptions: see below\nOptions include:\ndocument: optional document to use\nvariables: hashmap of variables to set\nparentWidget: optional parent widget for the root node\n*/\nexports.makeWidget = function(parser,options) {\n\toptions = options || {};\n\tvar widgetNode = {\n\t\t\ttype: \"widget\",\n\t\t\tchildren: []\n\t\t},\n\t\tcurrWidgetNode = widgetNode;\n\t// Create set variable widgets for each variable\n\t$tw.utils.each(options.variables,function(value,name) {\n\t\tvar setVariableWidget = {\n\t\t\ttype: \"set\",\n\t\t\tattributes: {\n\t\t\t\tname: {type: \"string\", value: name},\n\t\t\t\tvalue: {type: \"string\", value: value}\n\t\t\t},\n\t\t\tchildren: []\n\t\t};\n\t\tcurrWidgetNode.children = [setVariableWidget];\n\t\tcurrWidgetNode = setVariableWidget;\n\t});\n\t// Add in the supplied parse tree nodes\n\tcurrWidgetNode.children = parser ? parser.tree : [];\n\t// Create the widget\n\treturn new widget.widget(widgetNode,{\n\t\twiki: this,\n\t\tdocument: options.document || $tw.fakeDocument,\n\t\tparentWidget: options.parentWidget\n\t});\n};\n\n/*\nMake a widget tree for transclusion\ntitle: target tiddler title\noptions: as for wiki.makeWidget() plus:\noptions.field: optional field to transclude (defaults to \"text\")\noptions.mode: transclusion mode \"inline\" or \"block\"\noptions.children: optional array of children for the transclude widget\n*/\nexports.makeTranscludeWidget = function(title,options) {\n\toptions = options || {};\n\tvar parseTree = {tree: [{\n\t\t\ttype: \"element\",\n\t\t\ttag: \"div\",\n\t\t\tchildren: [{\n\t\t\t\ttype: \"transclude\",\n\t\t\t\tattributes: {\n\t\t\t\t\ttiddler: {\n\t\t\t\t\t\tname: \"tiddler\",\n\t\t\t\t\t\ttype: \"string\",\n\t\t\t\t\t\tvalue: title}},\n\t\t\t\tisBlock: !options.parseAsInline}]}\n\t]};\n\tif(options.field) {\n\t\tparseTree.tree[0].children[0].attributes.field = {type: \"string\", value: options.field};\n\t}\n\tif(options.mode) {\n\t\tparseTree.tree[0].children[0].attributes.mode = {type: \"string\", value: options.mode};\n\t}\n\tif(options.children) {\n\t\tparseTree.tree[0].children[0].children = options.children;\n\t}\n\treturn $tw.wiki.makeWidget(parseTree,options);\n};\n\n/*\nParse text in a specified format and render it into another format\n\toutputType: content type for the output\n\ttextType: content type of the input text\n\ttext: input text\n\toptions: see below\nOptions include:\nvariables: hashmap of variables to set\nparentWidget: optional parent widget for the root node\n*/\nexports.renderText = function(outputType,textType,text,options) {\n\toptions = options || {};\n\tvar parser = this.parseText(textType,text,options),\n\t\twidgetNode = this.makeWidget(parser,options);\n\tvar container = $tw.fakeDocument.createElement(\"div\");\n\twidgetNode.render(container,null);\n\treturn outputType === \"text/html\" ? container.innerHTML : container.textContent;\n};\n\n/*\nParse text from a tiddler and render it into another format\n\toutputType: content type for the output\n\ttitle: title of the tiddler to be rendered\n\toptions: see below\nOptions include:\nvariables: hashmap of variables to set\nparentWidget: optional parent widget for the root node\n*/\nexports.renderTiddler = function(outputType,title,options) {\n\toptions = options || {};\n\tvar parser = this.parseTiddler(title,options),\n\t\twidgetNode = this.makeWidget(parser,options);\n\tvar container = $tw.fakeDocument.createElement(\"div\");\n\twidgetNode.render(container,null);\n\treturn outputType === \"text/html\" ? container.innerHTML : (outputType === \"text/plain-formatted\" ? container.formattedTextContent : container.textContent);\n};\n\n/*\nReturn an array of tiddler titles that match a search string\n\ttext: The text string to search for\n\toptions: see below\nOptions available:\n\tsource: an iterator function for the source tiddlers, called source(iterator), where iterator is called as iterator(tiddler,title)\n\texclude: An array of tiddler titles to exclude from the search\n\tinvert: If true returns tiddlers that do not contain the specified string\n\tcaseSensitive: If true forces a case sensitive search\n\tliteral: If true, searches for literal string, rather than separate search terms\n\tfield: If specified, restricts the search to the specified field\n*/\nexports.search = function(text,options) {\n\toptions = options || {};\n\tvar self = this,\n\t\tt,\n\t\tinvert = !!options.invert;\n\t// Convert the search string into a regexp for each term\n\tvar terms, searchTermsRegExps,\n\t\tflags = options.caseSensitive ? \"\" : \"i\";\n\tif(options.literal) {\n\t\tif(text.length === 0) {\n\t\t\tsearchTermsRegExps = null;\n\t\t} else {\n\t\t\tsearchTermsRegExps = [new RegExp(\"(\" + $tw.utils.escapeRegExp(text) + \")\",flags)];\n\t\t}\n\t} else {\n\t\tterms = text.split(/ +/);\n\t\tif(terms.length === 1 && terms[0] === \"\") {\n\t\t\tsearchTermsRegExps = null;\n\t\t} else {\n\t\t\tsearchTermsRegExps = [];\n\t\t\tfor(t=0; t<terms.length; t++) {\n\t\t\t\tsearchTermsRegExps.push(new RegExp(\"(\" + $tw.utils.escapeRegExp(terms[t]) + \")\",flags));\n\t\t\t}\n\t\t}\n\t}\n\t// Function to check a given tiddler for the search term\n\tvar searchTiddler = function(title) {\n\t\tif(!searchTermsRegExps) {\n\t\t\treturn true;\n\t\t}\n\t\tvar tiddler = self.getTiddler(title);\n\t\tif(!tiddler) {\n\t\t\ttiddler = new $tw.Tiddler({title: title, text: \"\", type: \"text/vnd.tiddlywiki\"});\n\t\t}\n\t\tvar contentTypeInfo = $tw.config.contentTypeInfo[tiddler.fields.type] || $tw.config.contentTypeInfo[\"text/vnd.tiddlywiki\"],\n\t\t\tmatch;\n\t\tfor(var t=0; t<searchTermsRegExps.length; t++) {\n\t\t\tmatch = false;\n\t\t\tif(options.field) {\n\t\t\t\tmatch = searchTermsRegExps[t].test(tiddler.getFieldString(options.field));\n\t\t\t} else {\n\t\t\t\t// Search title, tags and body\n\t\t\t\tif(contentTypeInfo.encoding === \"utf8\") {\n\t\t\t\t\tmatch = match || searchTermsRegExps[t].test(tiddler.fields.text);\n\t\t\t\t}\n\t\t\t\tvar tags = tiddler.fields.tags ? tiddler.fields.tags.join(\"\\0\") : \"\";\n\t\t\t\tmatch = match || searchTermsRegExps[t].test(tags) || searchTermsRegExps[t].test(tiddler.fields.title);\n\t\t\t}\n\t\t\tif(!match) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t};\n\t// Loop through all the tiddlers doing the search\n\tvar results = [],\n\t\tsource = options.source || this.each;\n\tsource(function(tiddler,title) {\n\t\tif(searchTiddler(title) !== options.invert) {\n\t\t\tresults.push(title);\n\t\t}\n\t});\n\t// Remove any of the results we have to exclude\n\tif(options.exclude) {\n\t\tfor(t=0; t<options.exclude.length; t++) {\n\t\t\tvar p = results.indexOf(options.exclude[t]);\n\t\t\tif(p !== -1) {\n\t\t\t\tresults.splice(p,1);\n\t\t\t}\n\t\t}\n\t}\n\treturn results;\n};\n\n/*\nTrigger a load for a tiddler if it is skinny. Returns the text, or undefined if the tiddler is missing, null if the tiddler is being lazily loaded.\n*/\nexports.getTiddlerText = function(title,defaultText) {\n\tvar tiddler = this.getTiddler(title);\n\t// Return undefined if the tiddler isn't found\n\tif(!tiddler) {\n\t\treturn defaultText;\n\t}\n\tif(tiddler.fields.text !== undefined) {\n\t\t// Just return the text if we've got it\n\t\treturn tiddler.fields.text;\n\t} else {\n\t\t// Tell any listeners about the need to lazily load this tiddler\n\t\tthis.dispatchEvent(\"lazyLoad\",title);\n\t\t// Indicate that the text is being loaded\n\t\treturn null;\n\t}\n};\n\n/*\nRead an array of browser File objects, invoking callback(tiddlerFieldsArray) once they're all read\n*/\nexports.readFiles = function(files,callback) {\n\tvar result = [],\n\t\toutstanding = files.length;\n\tfor(var f=0; f<files.length; f++) {\n\t\tthis.readFile(files[f],function(tiddlerFieldsArray) {\n\t\t\tresult.push.apply(result,tiddlerFieldsArray);\n\t\t\tif(--outstanding === 0) {\n\t\t\t\tcallback(result);\n\t\t\t}\n\t\t});\n\t}\n\treturn files.length;\n};\n\n/*\nRead a browser File object, invoking callback(tiddlerFieldsArray) with an array of tiddler fields objects\n*/\nexports.readFile = function(file,callback) {\n\t// Get the type, falling back to the filename extension\n\tvar self = this,\n\t\ttype = file.type;\n\tif(type === \"\" || !type) {\n\t\tvar dotPos = file.name.lastIndexOf(\".\");\n\t\tif(dotPos !== -1) {\n\t\t\tvar fileExtensionInfo = $tw.utils.getFileExtensionInfo(file.name.substr(dotPos));\n\t\t\tif(fileExtensionInfo) {\n\t\t\t\ttype = fileExtensionInfo.type;\n\t\t\t}\n\t\t}\n\t}\n\t// Figure out if we're reading a binary file\n\tvar contentTypeInfo = $tw.config.contentTypeInfo[type],\n\t\tisBinary = contentTypeInfo ? contentTypeInfo.encoding === \"base64\" : false;\n\t// Log some debugging information\n\tif($tw.log.IMPORT) {\n\t\tconsole.log(\"Importing file '\" + file.name + \"', type: '\" + type + \"', isBinary: \" + isBinary);\n\t}\n\t// Create the FileReader\n\tvar reader = new FileReader();\n\t// Onload\n\treader.onload = function(event) {\n\t\t// Deserialise the file contents\n\t\tvar text = event.target.result,\n\t\t\ttiddlerFields = {title: file.name || \"Untitled\", type: type};\n\t\t// Are we binary?\n\t\tif(isBinary) {\n\t\t\t// The base64 section starts after the first comma in the data URI\n\t\t\tvar commaPos = text.indexOf(\",\");\n\t\t\tif(commaPos !== -1) {\n\t\t\t\ttiddlerFields.text = text.substr(commaPos+1);\n\t\t\t\tcallback([tiddlerFields]);\n\t\t\t}\n\t\t} else {\n\t\t\t// Check whether this is an encrypted TiddlyWiki file\n\t\t\tvar encryptedJson = $tw.utils.extractEncryptedStoreArea(text);\n\t\t\tif(encryptedJson) {\n\t\t\t\t// If so, attempt to decrypt it with the current password\n\t\t\t\t$tw.utils.decryptStoreAreaInteractive(encryptedJson,function(tiddlers) {\n\t\t\t\t\tcallback(tiddlers);\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\t// Otherwise, just try to deserialise any tiddlers in the file\n\t\t\t\tcallback(self.deserializeTiddlers(type,text,tiddlerFields));\n\t\t\t}\n\t\t}\n\t};\n\t// Kick off the read\n\tif(isBinary) {\n\t\treader.readAsDataURL(file);\n\t} else {\n\t\treader.readAsText(file);\n\t}\n};\n\n/*\nFind any existing draft of a specified tiddler\n*/\nexports.findDraft = function(targetTitle) {\n\tvar draftTitle = undefined;\n\tthis.forEachTiddler({includeSystem: true},function(title,tiddler) {\n\t\tif(tiddler.fields[\"draft.title\"] && tiddler.fields[\"draft.of\"] === targetTitle) {\n\t\t\tdraftTitle = title;\n\t\t}\n\t});\n\treturn draftTitle;\n}\n\n/*\nCheck whether the specified draft tiddler has been modified.\nIf the original tiddler doesn't exist, create  a vanilla tiddler variable,\nto check if additional fields have been added.\n*/\nexports.isDraftModified = function(title) {\n\tvar tiddler = this.getTiddler(title);\n\tif(!tiddler.isDraft()) {\n\t\treturn false;\n\t}\n\tvar ignoredFields = [\"created\", \"modified\", \"title\", \"draft.title\", \"draft.of\"],\n\t\torigTiddler = this.getTiddler(tiddler.fields[\"draft.of\"]) || new $tw.Tiddler({text:\"\", tags:[]}),\n\t\ttitleModified = tiddler.fields[\"draft.title\"] !== tiddler.fields[\"draft.of\"];\n\treturn titleModified || !tiddler.isEqual(origTiddler,ignoredFields);\n};\n\n/*\nAdd a new record to the top of the history stack\ntitle: a title string or an array of title strings\nfromPageRect: page coordinates of the origin of the navigation\nhistoryTitle: title of history tiddler (defaults to $:/HistoryList)\n*/\nexports.addToHistory = function(title,fromPageRect,historyTitle) {\n\tvar story = new $tw.Story({wiki: this, historyTitle: historyTitle});\n\tstory.addToHistory(title,fromPageRect);\n};\n\n/*\nInvoke the available upgrader modules\ntitles: array of tiddler titles to be processed\ntiddlers: hashmap by title of tiddler fields of pending import tiddlers. These can be modified by the upgraders. An entry with no fields indicates a tiddler that was pending import has been suppressed. When entries are added to the pending import the tiddlers hashmap may have entries that are not present in the titles array\nReturns a hashmap of messages keyed by tiddler title.\n*/\nexports.invokeUpgraders = function(titles,tiddlers) {\n\t// Collect up the available upgrader modules\n\tvar self = this;\n\tif(!this.upgraderModules) {\n\t\tthis.upgraderModules = [];\n\t\t$tw.modules.forEachModuleOfType(\"upgrader\",function(title,module) {\n\t\t\tif(module.upgrade) {\n\t\t\t\tself.upgraderModules.push(module);\n\t\t\t}\n\t\t});\n\t}\n\t// Invoke each upgrader in turn\n\tvar messages = {};\n\tfor(var t=0; t<this.upgraderModules.length; t++) {\n\t\tvar upgrader = this.upgraderModules[t],\n\t\t\tupgraderMessages = upgrader.upgrade(this,titles,tiddlers);\n\t\t$tw.utils.extend(messages,upgraderMessages);\n\t}\n\treturn messages;\n};\n\n})();\n",
            "title": "$:/core/modules/wiki.js",
            "type": "application/javascript",
            "module-type": "wikimethod"
        },
        "$:/palettes/Blanca": {
            "title": "$:/palettes/Blanca",
            "name": "Blanca",
            "description": "A clean white palette to let you focus",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #66cccc\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #999999\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #ffffff\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #7897f3\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #ccc\nsidebar-foreground-shadow: rgba(255,255,255, 0.8)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #ffffff\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #7897f3\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #eeeeee\ntab-border-selected: #cccccc\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ffeedd\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: #eee\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #ff9900\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Blue": {
            "title": "$:/palettes/Blue",
            "name": "Blue",
            "description": "A blue theme",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #fff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour foreground>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333353\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #999999\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #ddddff\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #5778d8\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #ffffff\nsidebar-foreground-shadow: rgba(255,255,255, 0.8)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: <<colour page-background>>\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #5959c0\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: <<colour background>>\ntab-background: #ccccdd\ntab-border-selected: #ccccdd\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #eeeeff\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #666666\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #ffffff\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #ffffff\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #5959c0\ntoolbar-new-button: #5eb95e\ntoolbar-options-button: rgb(128, 88, 165)\ntoolbar-save-button: #0e90d2\ntoolbar-info-button: #0e90d2\ntoolbar-edit-button: rgb(243, 123, 29)\ntoolbar-close-button: #dd514c\ntoolbar-delete-button: #dd514c\ntoolbar-cancel-button: rgb(243, 123, 29)\ntoolbar-done-button: #5eb95e\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Muted": {
            "title": "$:/palettes/Muted",
            "name": "Muted",
            "description": "Bright tiddlers on a muted background",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #bbb\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #6f6f70\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #29a6ee\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #c2c1c2\nsidebar-foreground-shadow: rgba(255,255,255,0)\nsidebar-foreground: #d3d2d4\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #6f6f70\nsidebar-tab-background: #666667\nsidebar-tab-border-selected: #999\nsidebar-tab-border: #515151\nsidebar-tab-divider: #999\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: #999\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #d1d0d2\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #d5ad34\ntag-foreground: #ffffff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #182955\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/ContrastLight": {
            "title": "$:/palettes/ContrastLight",
            "name": "Contrast (Light)",
            "description": "High contrast and unambiguous (light version)",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #f00\nalert-border: <<colour background>>\nalert-highlight: <<colour foreground>>\nalert-muted-foreground: #800\nbackground: #fff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background: <<colour background>>\nbutton-foreground: <<colour foreground>>\nbutton-border: <<colour foreground>>\ncode-background: <<colour background>>\ncode-border: <<colour foreground>>\ncode-foreground: <<colour foreground>>\ndirty-indicator: #f00\ndownload-background: #080\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: <<colour foreground>>\ndropdown-tab-background: <<colour foreground>>\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #00a\nexternal-link-foreground: #00e\nforeground: #000\nmessage-background: <<colour foreground>>\nmessage-border: <<colour background>>\nmessage-foreground: <<colour background>>\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: <<colour foreground>>\nmodal-footer-background: <<colour background>>\nmodal-footer-border: <<colour foreground>>\nmodal-header-border: <<colour foreground>>\nmuted-foreground: <<colour foreground>>\nnotification-background: <<colour background>>\nnotification-border: <<colour foreground>>\npage-background: <<colour background>>\npre-background: <<colour background>>\npre-border: <<colour foreground>>\nprimary: #00f\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: <<colour background>>\nsidebar-controls-foreground: <<colour foreground>>\nsidebar-foreground-shadow: rgba(0,0,0, 0)\nsidebar-foreground: <<colour foreground>>\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: <<colour foreground>>\nsidebar-tab-background-selected: <<colour background>>\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: <<colour foreground>>\nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: <<colour foreground>>\nsidebar-tiddler-link-foreground: <<colour primary>>\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: <<colour background>>\ntab-background: <<colour foreground>>\ntab-border-selected: <<colour foreground>>\ntab-border: <<colour foreground>>\ntab-divider: <<colour foreground>>\ntab-foreground-selected: <<colour foreground>>\ntab-foreground: <<colour background>>\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #000\ntag-foreground: #fff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour foreground>>\ntiddler-controls-foreground-hover: #ddd\ntiddler-controls-foreground-selected: #fdd\ntiddler-controls-foreground: <<colour foreground>>\ntiddler-editor-background: <<colour background>>\ntiddler-editor-border-image: <<colour foreground>>\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: <<colour background>>\ntiddler-editor-fields-odd: <<colour background>>\ntiddler-info-background: <<colour background>>\ntiddler-info-border: <<colour foreground>>\ntiddler-info-tab-background: <<colour background>>\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: <<colour foreground>>\ntiddler-title-foreground: <<colour foreground>>\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: <<colour foreground>>\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/ContrastDark": {
            "title": "$:/palettes/ContrastDark",
            "name": "Contrast (Dark)",
            "description": "High contrast and unambiguous (dark version)",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #f00\nalert-border: <<colour background>>\nalert-highlight: <<colour foreground>>\nalert-muted-foreground: #800\nbackground: #000\nblockquote-bar: <<colour muted-foreground>>\nbutton-background: <<colour background>>\nbutton-foreground: <<colour foreground>>\nbutton-border: <<colour foreground>>\ncode-background: <<colour background>>\ncode-border: <<colour foreground>>\ncode-foreground: <<colour foreground>>\ndirty-indicator: #f00\ndownload-background: #080\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: <<colour foreground>>\ndropdown-tab-background: <<colour foreground>>\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #00a\nexternal-link-foreground: #00e\nforeground: #fff\nmessage-background: <<colour foreground>>\nmessage-border: <<colour background>>\nmessage-foreground: <<colour background>>\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: <<colour foreground>>\nmodal-footer-background: <<colour background>>\nmodal-footer-border: <<colour foreground>>\nmodal-header-border: <<colour foreground>>\nmuted-foreground: <<colour foreground>>\nnotification-background: <<colour background>>\nnotification-border: <<colour foreground>>\npage-background: <<colour background>>\npre-background: <<colour background>>\npre-border: <<colour foreground>>\nprimary: #00f\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: <<colour background>>\nsidebar-controls-foreground: <<colour foreground>>\nsidebar-foreground-shadow: rgba(0,0,0, 0)\nsidebar-foreground: <<colour foreground>>\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: <<colour foreground>>\nsidebar-tab-background-selected: <<colour background>>\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: <<colour foreground>>\nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: <<colour foreground>>\nsidebar-tiddler-link-foreground: <<colour primary>>\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: <<colour background>>\ntab-background: <<colour foreground>>\ntab-border-selected: <<colour foreground>>\ntab-border: <<colour foreground>>\ntab-divider: <<colour foreground>>\ntab-foreground-selected: <<colour foreground>>\ntab-foreground: <<colour background>>\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #fff\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: <<colour foreground>>\ntiddler-controls-foreground-hover: #ddd\ntiddler-controls-foreground-selected: #fdd\ntiddler-controls-foreground: <<colour foreground>>\ntiddler-editor-background: <<colour background>>\ntiddler-editor-border-image: <<colour foreground>>\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: <<colour background>>\ntiddler-editor-fields-odd: <<colour background>>\ntiddler-info-background: <<colour background>>\ntiddler-info-border: <<colour foreground>>\ntiddler-info-tab-background: <<colour background>>\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: <<colour foreground>>\ntiddler-title-foreground: <<colour foreground>>\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: <<colour foreground>>\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/DarkPhotos": {
            "created": "20150402111612188",
            "description": "Good with dark photo backgrounds",
            "modified": "20150402112344080",
            "name": "DarkPhotos",
            "tags": "$:/tags/Palette",
            "title": "$:/palettes/DarkPhotos",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background: \nbutton-foreground: \nbutton-border: \ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #ddd\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #336438\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #5778d8\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #ccf\nsidebar-controls-foreground: #fff\nsidebar-foreground-shadow: rgba(0,0,0, 0.5)\nsidebar-foreground: #fff\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #eee\nsidebar-tab-background-selected: rgba(255,255,255, 0.8)\nsidebar-tab-background: rgba(255,255,255, 0.4)\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: rgba(255,255,255, 0.2)\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #aaf\nsidebar-tiddler-link-foreground: #ddf\nsite-title-foreground: #fff\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ec6\ntag-foreground: #ffffff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #182955\ntoolbar-new-button: \ntoolbar-options-button: \ntoolbar-save-button: \ntoolbar-info-button: \ntoolbar-edit-button: \ntoolbar-close-button: \ntoolbar-delete-button: \ntoolbar-cancel-button: \ntoolbar-done-button: \nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Rocker": {
            "title": "$:/palettes/Rocker",
            "name": "Rocker",
            "description": "A dark theme",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #999999\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #000\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #cc0000\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #ffffff\nsidebar-foreground-shadow: rgba(255,255,255, 0.0)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #000\nsidebar-tab-background: <<colour tab-background>>\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: <<colour tab-divider>>\nsidebar-tab-foreground-selected: \nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #ffbb99\nsidebar-tiddler-link-foreground: #cc0000\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ffbb99\ntag-foreground: #000\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #cc0000\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/SolarFlare": {
            "title": "$:/palettes/SolarFlare",
            "name": "Solar Flare",
            "description": "Warm, relaxing earth colours",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": ": Background Tones\n\nbase03: #002b36\nbase02: #073642\n\n: Content Tones\n\nbase01: #586e75\nbase00: #657b83\nbase0: #839496\nbase1: #93a1a1\n\n: Background Tones\n\nbase2: #eee8d5\nbase3: #fdf6e3\n\n: Accent Colors\n\nyellow: #b58900\norange: #cb4b16\nred: #dc322f\nmagenta: #d33682\nviolet: #6c71c4\nblue: #268bd2\ncyan: #2aa198\ngreen: #859900\n\n: Additional Tones (RA)\n\nbase10: #c0c4bb\nviolet-muted: #7c81b0\nblue-muted: #4e7baa\n\nyellow-hot: #ffcc44\norange-hot: #eb6d20\nred-hot: #ff2222\nblue-hot: #2298ee\ngreen-hot: #98ee22\n\n: Palette\n\n: Do not use colour macro for background and foreground\nbackground: #fdf6e3\n    download-foreground: <<colour background>>\n    dragger-foreground: <<colour background>>\n    dropdown-background: <<colour background>>\n    modal-background: <<colour background>>\n    sidebar-foreground-shadow: <<colour background>>\n    tiddler-background: <<colour background>>\n    tiddler-border: <<colour background>>\n    tiddler-link-background: <<colour background>>\n    tab-background-selected: <<colour background>>\n        dropdown-tab-background-selected: <<colour tab-background-selected>>\nforeground: #657b83\n    dragger-background: <<colour foreground>>\n    tab-foreground: <<colour foreground>>\n        tab-foreground-selected: <<colour tab-foreground>>\n            sidebar-tab-foreground-selected: <<colour tab-foreground-selected>>\n        sidebar-tab-foreground: <<colour tab-foreground>>\n    sidebar-button-foreground: <<colour foreground>>\n    sidebar-controls-foreground: <<colour foreground>>\n    sidebar-foreground: <<colour foreground>>\n: base03\n: base02\n: base01\n    alert-muted-foreground: <<colour base01>>\n: base00\n    code-foreground: <<colour base00>>\n    message-foreground: <<colour base00>>\n    tag-foreground: <<colour base00>>\n: base0\n    sidebar-tiddler-link-foreground: <<colour base0>>\n: base1\n    muted-foreground: <<colour base1>>\n        blockquote-bar: <<colour muted-foreground>>\n        dropdown-border: <<colour muted-foreground>>\n        sidebar-muted-foreground: <<colour muted-foreground>>\n        tiddler-title-foreground: <<colour muted-foreground>>\n            site-title-foreground: <<colour tiddler-title-foreground>>\n: base2\n    modal-footer-background: <<colour base2>>\n    page-background: <<colour base2>>\n        modal-backdrop: <<colour page-background>>\n        notification-background: <<colour page-background>>\n        code-background: <<colour page-background>>\n            code-border: <<colour code-background>>\n        pre-background: <<colour page-background>>\n            pre-border: <<colour pre-background>>\n        sidebar-tab-background-selected: <<colour page-background>>\n    table-header-background: <<colour base2>>\n    tag-background: <<colour base2>>\n    tiddler-editor-background: <<colour base2>>\n    tiddler-info-background: <<colour base2>>\n    tiddler-info-tab-background: <<colour base2>>\n    tab-background: <<colour base2>>\n        dropdown-tab-background: <<colour tab-background>>\n: base3\n    alert-background: <<colour base3>>\n    message-background: <<colour base3>>\n: yellow\n: orange\n: red\n: magenta\n    alert-highlight: <<colour magenta>>\n: violet\n    external-link-foreground: <<colour violet>>\n: blue\n: cyan\n: green\n: base10\n    tiddler-controls-foreground: <<colour base10>>\n: violet-muted\n    external-link-foreground-visited: <<colour violet-muted>>\n: blue-muted\n    primary: <<colour blue-muted>>\n        download-background: <<colour primary>>\n        tiddler-link-foreground: <<colour primary>>\n\nalert-border: #b99e2f\ndirty-indicator: #ff0000\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nmessage-border: #cfd6e6\nmodal-border: #999999\nsidebar-controls-foreground-hover:\nsidebar-muted-foreground-hover:\nsidebar-tab-background: #ded8c5\nsidebar-tiddler-link-foreground-hover:\nstatic-alert-foreground: #aaaaaa\ntab-border: #cccccc\n    modal-footer-border: <<colour tab-border>>\n    modal-header-border: <<colour tab-border>>\n    notification-border: <<colour tab-border>>\n    sidebar-tab-border: <<colour tab-border>>\n    tab-border-selected: <<colour tab-border>>\n        sidebar-tab-border-selected: <<colour tab-border-selected>>\ntab-divider: #d8d8d8\n    sidebar-tab-divider: <<colour tab-divider>>\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-border: #dddddd\ntiddler-subtitle-foreground: #c0c0c0\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/palettes/Vanilla": {
            "title": "$:/palettes/Vanilla",
            "name": "Vanilla",
            "description": "Pale and unobtrusive",
            "tags": "$:/tags/Palette",
            "type": "application/x-tiddler-dictionary",
            "text": "alert-background: #ffe476\nalert-border: #b99e2f\nalert-highlight: #881122\nalert-muted-foreground: #b99e2f\nbackground: #ffffff\nblockquote-bar: <<colour muted-foreground>>\nbutton-background:\nbutton-foreground:\nbutton-border:\ncode-background: #f7f7f9\ncode-border: #e1e1e8\ncode-foreground: #dd1144\ndirty-indicator: #ff0000\ndownload-background: #34c734\ndownload-foreground: <<colour background>>\ndragger-background: <<colour foreground>>\ndragger-foreground: <<colour background>>\ndropdown-background: <<colour background>>\ndropdown-border: <<colour muted-foreground>>\ndropdown-tab-background-selected: #fff\ndropdown-tab-background: #ececec\ndropzone-background: rgba(0,200,0,0.7)\nexternal-link-background-hover: inherit\nexternal-link-background-visited: inherit\nexternal-link-background: inherit\nexternal-link-foreground-hover: inherit\nexternal-link-foreground-visited: #0000aa\nexternal-link-foreground: #0000ee\nforeground: #333333\nmessage-background: #ecf2ff\nmessage-border: #cfd6e6\nmessage-foreground: #547599\nmodal-backdrop: <<colour foreground>>\nmodal-background: <<colour background>>\nmodal-border: #999999\nmodal-footer-background: #f5f5f5\nmodal-footer-border: #dddddd\nmodal-header-border: #eeeeee\nmuted-foreground: #bbb\nnotification-background: #ffffdd\nnotification-border: #999999\npage-background: #f4f4f4\npre-background: #f5f5f5\npre-border: #cccccc\nprimary: #5778d8\nsidebar-button-foreground: <<colour foreground>>\nsidebar-controls-foreground-hover: #000000\nsidebar-controls-foreground: #aaaaaa\nsidebar-foreground-shadow: rgba(255,255,255, 0.8)\nsidebar-foreground: #acacac\nsidebar-muted-foreground-hover: #444444\nsidebar-muted-foreground: #c0c0c0\nsidebar-tab-background-selected: #f4f4f4\nsidebar-tab-background: #e0e0e0\nsidebar-tab-border-selected: <<colour tab-border-selected>>\nsidebar-tab-border: <<colour tab-border>>\nsidebar-tab-divider: #e4e4e4\nsidebar-tab-foreground-selected:\nsidebar-tab-foreground: <<colour tab-foreground>>\nsidebar-tiddler-link-foreground-hover: #444444\nsidebar-tiddler-link-foreground: #999999\nsite-title-foreground: <<colour tiddler-title-foreground>>\nstatic-alert-foreground: #aaaaaa\ntab-background-selected: #ffffff\ntab-background: #d8d8d8\ntab-border-selected: #d8d8d8\ntab-border: #cccccc\ntab-divider: #d8d8d8\ntab-foreground-selected: <<colour tab-foreground>>\ntab-foreground: #666666\ntable-border: #dddddd\ntable-footer-background: #a8a8a8\ntable-header-background: #f0f0f0\ntag-background: #ec6\ntag-foreground: #ffffff\ntiddler-background: <<colour background>>\ntiddler-border: <<colour background>>\ntiddler-controls-foreground-hover: #888888\ntiddler-controls-foreground-selected: #444444\ntiddler-controls-foreground: #cccccc\ntiddler-editor-background: #f8f8f8\ntiddler-editor-border-image: #ffffff\ntiddler-editor-border: #cccccc\ntiddler-editor-fields-even: #e0e8e0\ntiddler-editor-fields-odd: #f0f4f0\ntiddler-info-background: #f8f8f8\ntiddler-info-border: #dddddd\ntiddler-info-tab-background: #f8f8f8\ntiddler-link-background: <<colour background>>\ntiddler-link-foreground: <<colour primary>>\ntiddler-subtitle-foreground: #c0c0c0\ntiddler-title-foreground: #182955\ntoolbar-new-button:\ntoolbar-options-button:\ntoolbar-save-button:\ntoolbar-info-button:\ntoolbar-edit-button:\ntoolbar-close-button:\ntoolbar-delete-button:\ntoolbar-cancel-button:\ntoolbar-done-button:\nuntagged-background: #999999\nvery-muted-foreground: #888888\n"
        },
        "$:/core/readme": {
            "title": "$:/core/readme",
            "text": "This plugin contains TiddlyWiki's core components, comprising:\n\n* JavaScript code modules\n* Icons\n* Templates needed to create TiddlyWiki's user interface\n* British English (''en-GB'') translations of the localisable strings used by the core\n"
        },
        "$:/core/templates/alltiddlers.template.html": {
            "title": "$:/core/templates/alltiddlers.template.html",
            "type": "text/vnd.tiddlywiki-html",
            "text": "<!-- This template is provided for backwards compatibility with older versions of TiddlyWiki -->\n\n<$set name=\"exportFilter\" value=\"[!is[system]sort[title]]\">\n\n{{$:/core/templates/exporters/StaticRiver}}\n\n</$set>\n"
        },
        "$:/core/templates/canonical-uri-external-image": {
            "title": "$:/core/templates/canonical-uri-external-image",
            "text": "<!--\n\nThis template is used to assign the ''_canonical_uri'' field to external images.\n\nChange the `./images/` part to a different base URI. The URI can be relative or absolute.\n\n-->\n./images/<$view field=\"title\" format=\"doubleurlencoded\"/>"
        },
        "$:/core/templates/canonical-uri-external-text": {
            "title": "$:/core/templates/canonical-uri-external-text",
            "text": "<!--\n\nThis template is used to assign the ''_canonical_uri'' field to external text files.\n\nChange the `./text/` part to a different base URI. The URI can be relative or absolute.\n\n-->\n./text/<$view field=\"title\" format=\"doubleurlencoded\"/>.tid"
        },
        "$:/core/templates/css-tiddler": {
            "title": "$:/core/templates/css-tiddler",
            "text": "<!--\n\nThis template is used for saving CSS tiddlers as a style tag with data attributes representing the tiddler fields.\n\n-->`<style`<$fields template=' data-tiddler-$name$=\"$encoded_value$\"'></$fields>` type=\"text/css\">`<$view field=\"text\" format=\"text\" />`</style>`"
        },
        "$:/core/templates/exporters/CsvFile": {
            "title": "$:/core/templates/exporters/CsvFile",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/CsvFile}}",
            "extension": ".csv",
            "text": "\\define renderContent()\n<$text text=<<csvtiddlers filter:\"\"\"$(exportFilter)$\"\"\" format:\"quoted-comma-sep\">>/>\n\\end\n<<renderContent>>\n"
        },
        "$:/core/templates/exporters/JsonFile": {
            "title": "$:/core/templates/exporters/JsonFile",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/JsonFile}}",
            "extension": ".json",
            "text": "\\define renderContent()\n<$text text=<<jsontiddlers filter:\"\"\"$(exportFilter)$\"\"\">>/>\n\\end\n<<renderContent>>\n"
        },
        "$:/core/templates/exporters/StaticRiver": {
            "title": "$:/core/templates/exporters/StaticRiver",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/StaticRiver}}",
            "extension": ".html",
            "text": "\\define tv-wikilink-template() #$uri_encoded$\n\\define tv-config-toolbar-icons() no\n\\define tv-config-toolbar-text() no\n\\define tv-config-toolbar-class() tc-btn-invisible\n\\rules only filteredtranscludeinline transcludeinline\n<!doctype html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"{{$:/core/templates/version}}\" />\n<meta name=\"format-detection\" content=\"telephone=no\">\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<title>{{$:/core/wiki/title}}</title>\n<div id=\"styleArea\">\n{{$:/boot/boot.css||$:/core/templates/css-tiddler}}\n</div>\n<style type=\"text/css\">\n{{$:/core/ui/PageStylesheet||$:/core/templates/wikified-tiddler}}\n</style>\n</head>\n<body class=\"tc-body\">\n{{$:/StaticBanner||$:/core/templates/html-tiddler}}\n<section class=\"tc-story-river\">\n{{$:/core/templates/exporters/StaticRiver/Content||$:/core/templates/html-tiddler}}\n</section>\n</body>\n</html>\n"
        },
        "$:/core/templates/exporters/StaticRiver/Content": {
            "title": "$:/core/templates/exporters/StaticRiver/Content",
            "text": "\\define renderContent()\n{{{ $(exportFilter)$ ||$:/core/templates/static-tiddler}}}\n\\end\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n<<renderContent>>\n</$importvariables>\n"
        },
        "$:/core/templates/exporters/TidFile": {
            "title": "$:/core/templates/exporters/TidFile",
            "tags": "$:/tags/Exporter",
            "description": "{{$:/language/Exporters/TidFile}}",
            "extension": ".tid",
            "text": "\\define renderContent()\n{{{ $(exportFilter)$ +[limit[1]] ||$:/core/templates/tid-tiddler}}}\n\\end\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\"><<renderContent>></$importvariables>"
        },
        "$:/core/templates/html-div-tiddler": {
            "title": "$:/core/templates/html-div-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers as an HTML DIV tag with attributes representing the tiddler fields.\n\n-->`<div`<$fields template=' $name$=\"$encoded_value$\"'></$fields>`>\n<pre>`<$view field=\"text\" format=\"htmlencoded\" />`</pre>\n</div>`\n"
        },
        "$:/core/templates/html-tiddler": {
            "title": "$:/core/templates/html-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers as raw HTML\n\n--><$view field=\"text\" format=\"htmlwikified\" />"
        },
        "$:/core/templates/javascript-tiddler": {
            "title": "$:/core/templates/javascript-tiddler",
            "text": "<!--\n\nThis template is used for saving JavaScript tiddlers as a script tag with data attributes representing the tiddler fields.\n\n-->`<script`<$fields template=' data-tiddler-$name$=\"$encoded_value$\"'></$fields>` type=\"text/javascript\">`<$view field=\"text\" format=\"text\" />`</script>`"
        },
        "$:/core/templates/module-tiddler": {
            "title": "$:/core/templates/module-tiddler",
            "text": "<!--\n\nThis template is used for saving JavaScript tiddlers as a script tag with data attributes representing the tiddler fields. The body of the tiddler is wrapped in a call to the `$tw.modules.define` function in order to define the body of the tiddler as a module\n\n-->`<script`<$fields template=' data-tiddler-$name$=\"$encoded_value$\"'></$fields>` type=\"text/javascript\" data-module=\"yes\">$tw.modules.define(\"`<$view field=\"title\" format=\"jsencoded\" />`\",\"`<$view field=\"module-type\" format=\"jsencoded\" />`\",function(module,exports,require) {`<$view field=\"text\" format=\"text\" />`});\n</script>`"
        },
        "$:/core/templates/MOTW.html": {
            "title": "$:/core/templates/MOTW.html",
            "text": "\\rules only filteredtranscludeinline transcludeinline entity\n<!-- The following comment is called a MOTW comment and is necessary for the TiddlyIE Internet Explorer extension -->\n<!-- saved from url=(0021)http://tiddlywiki.com -->&#13;&#10;"
        },
        "$:/core/templates/plain-text-tiddler": {
            "title": "$:/core/templates/plain-text-tiddler",
            "text": "<$view field=\"text\" format=\"text\" />"
        },
        "$:/core/templates/raw-static-tiddler": {
            "title": "$:/core/templates/raw-static-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers as static HTML\n\n--><$view field=\"text\" format=\"plainwikified\" />"
        },
        "$:/core/save/all": {
            "title": "$:/core/save/all",
            "text": "\\define saveTiddlerFilter()\n[is[tiddler]] -[prefix[$:/state/popup/]] -[[$:/HistoryList]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] +[sort[title]] $(publishFilter)$\n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/save/empty": {
            "title": "$:/core/save/empty",
            "text": "\\define saveTiddlerFilter()\n[is[system]] -[prefix[$:/state/popup/]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] +[sort[title]]\n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/save/lazy-all": {
            "title": "$:/core/save/lazy-all",
            "text": "\\define saveTiddlerFilter()\n[is[system]] -[prefix[$:/state/popup/]] -[[$:/HistoryList]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] +[sort[title]] \n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/save/lazy-images": {
            "title": "$:/core/save/lazy-images",
            "text": "\\define saveTiddlerFilter()\n[is[tiddler]] -[prefix[$:/state/popup/]] -[[$:/HistoryList]] -[[$:/boot/boot.css]] -[type[application/javascript]library[yes]] -[[$:/boot/boot.js]] -[[$:/boot/bootprefix.js]] -[!is[system]is[image]] +[sort[title]] \n\\end\n{{$:/core/templates/tiddlywiki5.html}}\n"
        },
        "$:/core/templates/single.tiddler.window": {
            "title": "$:/core/templates/single.tiddler.window",
            "text": "<$set name=\"themeTitle\" value={{$:/view}}>\n\n<$set name=\"tempCurrentTiddler\" value=<<currentTiddler>>>\n\n<$set name=\"currentTiddler\" value={{$:/language}}>\n\n<$set name=\"languageTitle\" value={{!!name}}>\n\n<$set name=\"currentTiddler\" value=<<tempCurrentTiddler>>>\n\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<$navigator story=\"$:/StoryList\" history=\"$:/HistoryList\">\n\n<$transclude mode=\"block\"/>\n\n</$navigator>\n\n</$importvariables>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n"
        },
        "$:/core/templates/split-recipe": {
            "title": "$:/core/templates/split-recipe",
            "text": "<$list filter=\"[!is[system]]\">\ntiddler: <$view field=\"title\" format=\"urlencoded\"/>.tid\n</$list>\n"
        },
        "$:/core/templates/static-tiddler": {
            "title": "$:/core/templates/static-tiddler",
            "text": "<a name=<<currentTiddler>>>\n<$transclude tiddler=\"$:/core/ui/ViewTemplate\"/>\n</a>"
        },
        "$:/core/templates/static.area": {
            "title": "$:/core/templates/static.area",
            "text": "<$reveal type=\"nomatch\" state=\"$:/isEncrypted\" text=\"yes\">\n{{{ [all[shadows+tiddlers]tag[$:/tags/RawStaticContent]!has[draft.of]] ||$:/core/templates/raw-static-tiddler}}}\n{{$:/core/templates/static.content||$:/core/templates/html-tiddler}}\n</$reveal>\n<$reveal type=\"match\" state=\"$:/isEncrypted\" text=\"yes\">\nThis file contains an encrypted ~TiddlyWiki. Enable ~JavaScript and enter the decryption password when prompted.\n</$reveal>\n"
        },
        "$:/core/templates/static.content": {
            "title": "$:/core/templates/static.content",
            "type": "text/vnd.tiddlywiki",
            "text": "<!-- For Google, and people without JavaScript-->\nThis [[TiddlyWiki|http://tiddlywiki.com]] contains the following tiddlers:\n\n<ul>\n<$list filter=<<saveTiddlerFilter>>>\n<li><$view field=\"title\" format=\"text\"></$view></li>\n</$list>\n</ul>\n"
        },
        "$:/core/templates/static.template.css": {
            "title": "$:/core/templates/static.template.css",
            "text": "{{$:/boot/boot.css||$:/core/templates/plain-text-tiddler}}\n\n{{$:/core/ui/PageStylesheet||$:/core/templates/wikified-tiddler}}\n"
        },
        "$:/core/templates/static.template.html": {
            "title": "$:/core/templates/static.template.html",
            "type": "text/vnd.tiddlywiki-html",
            "text": "\\define tv-wikilink-template() static/$uri_doubleencoded$.html\n\\define tv-config-toolbar-icons() no\n\\define tv-config-toolbar-text() no\n\\define tv-config-toolbar-class() tc-btn-invisible\n\\rules only filteredtranscludeinline transcludeinline\n<!doctype html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"{{$:/core/templates/version}}\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\" />\n<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\" />\n<meta name=\"mobile-web-app-capable\" content=\"yes\"/>\n<meta name=\"format-detection\" content=\"telephone=no\">\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<title>{{$:/core/wiki/title}}</title>\n<div id=\"styleArea\">\n{{$:/boot/boot.css||$:/core/templates/css-tiddler}}\n</div>\n<style type=\"text/css\">\n{{$:/core/ui/PageStylesheet||$:/core/templates/wikified-tiddler}}\n</style>\n</head>\n<body class=\"tc-body\">\n{{$:/StaticBanner||$:/core/templates/html-tiddler}}\n{{$:/core/ui/PageTemplate||$:/core/templates/html-tiddler}}\n</body>\n</html>\n"
        },
        "$:/core/templates/static.tiddler.html": {
            "title": "$:/core/templates/static.tiddler.html",
            "text": "\\define tv-wikilink-template() $uri_doubleencoded$.html\n\\define tv-config-toolbar-icons() no\n\\define tv-config-toolbar-text() no\n\\define tv-config-toolbar-class() tc-btn-invisible\n`<!doctype html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"`{{$:/core/templates/version}}`\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\" />\n<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\" />\n<meta name=\"mobile-web-app-capable\" content=\"yes\"/>\n<meta name=\"format-detection\" content=\"telephone=no\">\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<link rel=\"stylesheet\" href=\"static.css\">\n<title>`<$view field=\"caption\"><$view field=\"title\"/></$view>: {{$:/core/wiki/title}}`</title>\n</head>\n<body class=\"tc-body\">\n`{{$:/StaticBanner||$:/core/templates/html-tiddler}}`\n<section class=\"tc-story-river\">\n`<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n<$view tiddler=\"$:/core/ui/ViewTemplate\" format=\"htmlwikified\"/>\n</$importvariables>`\n</section>\n</body>\n</html>\n`"
        },
        "$:/core/templates/store.area.template.html": {
            "title": "$:/core/templates/store.area.template.html",
            "text": "<$reveal type=\"nomatch\" state=\"$:/isEncrypted\" text=\"yes\">\n`<div id=\"storeArea\" style=\"display:none;\">`\n<$list filter=<<saveTiddlerFilter>> template=\"$:/core/templates/html-div-tiddler\"/>\n`</div>`\n</$reveal>\n<$reveal type=\"match\" state=\"$:/isEncrypted\" text=\"yes\">\n`<!--~~ Encrypted tiddlers ~~-->`\n`<pre id=\"encryptedStoreArea\" type=\"text/plain\" style=\"display:none;\">`\n<$encrypt filter=<<saveTiddlerFilter>>/>\n`</pre>`\n</$reveal>"
        },
        "$:/core/templates/tid-tiddler": {
            "title": "$:/core/templates/tid-tiddler",
            "text": "<!--\n\nThis template is used for saving tiddlers in TiddlyWeb *.tid format\n\n--><$fields exclude='text bag' template='$name$: $value$\n'></$fields>`\n`<$view field=\"text\" format=\"text\" />"
        },
        "$:/core/templates/tiddler-metadata": {
            "title": "$:/core/templates/tiddler-metadata",
            "text": "<!--\n\nThis template is used for saving tiddler metadata *.meta files\n\n--><$fields exclude='text bag' template='$name$: $value$\n'></$fields>"
        },
        "$:/core/templates/tiddlywiki5.html": {
            "title": "$:/core/templates/tiddlywiki5.html",
            "text": "\\rules only filteredtranscludeinline transcludeinline\n<!doctype html>\n{{$:/core/templates/MOTW.html}}<html>\n<head>\n<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />\t\t<!-- Force IE standards mode for Intranet and HTA - should be the first meta -->\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" />\n<meta name=\"application-name\" content=\"TiddlyWiki\" />\n<meta name=\"generator\" content=\"TiddlyWiki\" />\n<meta name=\"tiddlywiki-version\" content=\"{{$:/core/templates/version}}\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\" />\n<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\" />\n<meta name=\"mobile-web-app-capable\" content=\"yes\"/>\n<meta name=\"format-detection\" content=\"telephone=no\" />\n<meta name=\"copyright\" content=\"{{$:/core/copyright.txt}}\" />\n<link id=\"faviconLink\" rel=\"shortcut icon\" href=\"favicon.ico\">\n<title>{{$:/core/wiki/title}}</title>\n<!--~~ This is a Tiddlywiki file. The points of interest in the file are marked with this pattern ~~-->\n\n<!--~~ Raw markup ~~-->\n{{{ [all[shadows+tiddlers]tag[$:/core/wiki/rawmarkup]] [all[shadows+tiddlers]tag[$:/tags/RawMarkup]] ||$:/core/templates/plain-text-tiddler}}}\n</head>\n<body class=\"tc-body\">\n<!--~~ Static styles ~~-->\n<div id=\"styleArea\">\n{{$:/boot/boot.css||$:/core/templates/css-tiddler}}\n</div>\n<!--~~ Static content for Google and browsers without JavaScript ~~-->\n<noscript>\n<div id=\"splashArea\">\n{{$:/core/templates/static.area}}\n</div>\n</noscript>\n<!--~~ Ordinary tiddlers ~~-->\n{{$:/core/templates/store.area.template.html}}\n<!--~~ Library modules ~~-->\n<div id=\"libraryModules\" style=\"display:none;\">\n{{{ [is[system]type[application/javascript]library[yes]] ||$:/core/templates/javascript-tiddler}}}\n</div>\n<!--~~ Boot kernel prologue ~~-->\n<div id=\"bootKernelPrefix\" style=\"display:none;\">\n{{ $:/boot/bootprefix.js ||$:/core/templates/javascript-tiddler}}\n</div>\n<!--~~ Boot kernel ~~-->\n<div id=\"bootKernel\" style=\"display:none;\">\n{{ $:/boot/boot.js ||$:/core/templates/javascript-tiddler}}\n</div>\n</body>\n</html>\n"
        },
        "$:/core/templates/version": {
            "title": "$:/core/templates/version",
            "text": "<<version>>"
        },
        "$:/core/templates/wikified-tiddler": {
            "title": "$:/core/templates/wikified-tiddler",
            "text": "<$transclude />"
        },
        "$:/core/ui/AboveStory/tw2-plugin-check": {
            "title": "$:/core/ui/AboveStory/tw2-plugin-check",
            "tags": "$:/tags/AboveStory",
            "text": "\\define lingo-base() $:/language/AboveStory/ClassicPlugin/\n<$list filter=\"[all[system+tiddlers]tag[systemConfig]limit[1]]\">\n\n<div class=\"tc-message-box\">\n\n<<lingo Warning>>\n\n<ul>\n\n<$list filter=\"[all[system+tiddlers]tag[systemConfig]limit[1]]\">\n\n<li>\n\n<$link><$view field=\"title\"/></$link>\n\n</li>\n\n</$list>\n\n</ul>\n\n</div>\n\n</$list>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter": {
            "title": "$:/core/ui/AdvancedSearch/Filter",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/Filter/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<<lingo Filter/Hint>>\n\n<div class=\"tc-search tc-advanced-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/AdvancedSearch/FilterButton]!has[draft.of]]\"><$transclude/></$list>\n</div>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$set name=\"resultCount\" value=\"\"\"<$count filter={{$:/temp/advancedsearch}}/>\"\"\">\n<div class=\"tc-search-results\">\n<<lingo Filter/Matches>>\n<$list filter={{$:/temp/advancedsearch}} template=\"$:/core/ui/ListItemTemplate\"/>\n</div>\n</$set>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/clear": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/clear",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/delete": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/delete",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button popup=<<qualify \"$:/state/filterDeleteDropdown\">> class=\"tc-btn-invisible\">\n{{$:/core/images/delete-button}}\n</$button>\n</$reveal>\n\n<$reveal state=<<qualify \"$:/state/filterDeleteDropdown\">> type=\"popup\" position=\"belowleft\" animate=\"yes\">\n<div class=\"tc-block-dropdown-wrapper\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<div class=\"tc-dropdown-item-plain\">\n<$set name=\"resultCount\" value=\"\"\"<$count filter={{$:/temp/advancedsearch}}/>\"\"\">\nAre you sure you wish to delete <<resultCount>> tiddler(s)?\n</$set>\n</div>\n<div class=\"tc-dropdown-item-plain\">\n<$button class=\"tc-btn\">\n<$action-deletetiddler $filter={{$:/temp/advancedsearch}}/>\nDelete these tiddlers\n</$button>\n</div>\n</div>\n</div>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/dropdown": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/dropdown",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/filterDropdown\">> class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n</$button>\n</span>\n\n<$reveal state=<<qualify \"$:/state/filterDropdown\">> type=\"popup\" position=\"belowleft\" animate=\"yes\">\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n<div class=\"tc-block-dropdown-wrapper\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Filter]]\"><$link to={{!!filter}}><$transclude field=\"description\"/></$link>\n</$list>\n</div>\n</div>\n</$linkcatcher>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Filter/FilterButtons/export": {
            "title": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/export",
            "tags": "$:/tags/AdvancedSearch/FilterButton",
            "text": "<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$macrocall $name=\"exportButton\" exportFilter={{$:/temp/advancedsearch}} lingoBase=\"$:/language/Buttons/ExportTiddlers/\"/>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Shadows": {
            "title": "$:/core/ui/AdvancedSearch/Shadows",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/Shadows/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo Shadows/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n\n<$set name=\"resultCount\" value=\"\"\"<$count filter=\"[all[shadows]search{$:/temp/advancedsearch}] -[[$:/temp/advancedsearch]]\"/>\"\"\">\n\n<div class=\"tc-search-results\">\n\n<<lingo Shadows/Matches>>\n\n<$list filter=\"[all[shadows]search{$:/temp/advancedsearch}sort[title]limit[250]] -[[$:/temp/advancedsearch]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n</div>\n\n</$set>\n\n</$reveal>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"match\" text=\"\">\n\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/Standard": {
            "title": "$:/core/ui/AdvancedSearch/Standard",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/Standard/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo Standard/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$set name=\"searchTiddler\" value=\"$:/temp/advancedsearch\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]butfirst[]limit[1]]\" emptyMessage=\"\"\"\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\">\n<$transclude/>\n</$list>\n\"\"\">\n<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\" default={{$:/config/SearchResults/Default}}/>\n</$list>\n</$set>\n</$reveal>\n"
        },
        "$:/core/ui/AdvancedSearch/System": {
            "title": "$:/core/ui/AdvancedSearch/System",
            "tags": "$:/tags/AdvancedSearch",
            "caption": "{{$:/language/Search/System/Caption}}",
            "text": "\\define lingo-base() $:/language/Search/\n<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo System/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n\n<$set name=\"resultCount\" value=\"\"\"<$count filter=\"[is[system]search{$:/temp/advancedsearch}] -[[$:/temp/advancedsearch]]\"/>\"\"\">\n\n<div class=\"tc-search-results\">\n\n<<lingo System/Matches>>\n\n<$list filter=\"[is[system]search{$:/temp/advancedsearch}sort[title]limit[250]] -[[$:/temp/advancedsearch]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n</div>\n\n</$set>\n\n</$reveal>\n\n<$reveal state=\"$:/temp/advancedsearch\" type=\"match\" text=\"\">\n\n</$reveal>\n"
        },
        "$:/AdvancedSearch": {
            "title": "$:/AdvancedSearch",
            "icon": "$:/core/images/advanced-search-button",
            "color": "#bbb",
            "text": "<div class=\"tc-advanced-search\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/AdvancedSearch]!has[draft.of]]\" \"$:/core/ui/AdvancedSearch/System\">>\n</div>\n"
        },
        "$:/core/ui/AlertTemplate": {
            "title": "$:/core/ui/AlertTemplate",
            "text": "<div class=\"tc-alert\">\n<div class=\"tc-alert-toolbar\">\n<$button class=\"tc-btn-invisible\"><$action-deletetiddler $tiddler=<<currentTiddler>>/>{{$:/core/images/delete-button}}</$button>\n</div>\n<div class=\"tc-alert-subtitle\">\n<$view field=\"component\"/> - <$view field=\"modified\" format=\"date\" template=\"0hh:0mm:0ss DD MM YYYY\"/> <$reveal type=\"nomatch\" state=\"!!count\" text=\"\"><span class=\"tc-alert-highlight\">({{$:/language/Count}}: <$view field=\"count\"/>)</span></$reveal>\n</div>\n<div class=\"tc-alert-body\">\n\n<$transclude/>\n\n</div>\n</div>\n"
        },
        "$:/core/ui/BinaryWarning": {
            "title": "$:/core/ui/BinaryWarning",
            "text": "\\define lingo-base() $:/language/BinaryWarning/\n<div class=\"tc-binary-warning\">\n\n<<lingo Prompt>>\n\n</div>\n"
        },
        "$:/core/ui/Components/tag-link": {
            "title": "$:/core/ui/Components/tag-link",
            "text": "<$link>\n<$set name=\"backgroundColor\" value={{!!color}}>\n<span style=<<tag-styles>> class=\"tc-tag-label\">\n<$view field=\"title\" format=\"text\"/>\n</span>\n</$set>\n</$link>"
        },
        "$:/core/ui/ControlPanel/Advanced": {
            "title": "$:/core/ui/ControlPanel/Advanced",
            "tags": "$:/tags/ControlPanel/Info",
            "caption": "{{$:/language/ControlPanel/Advanced/Caption}}",
            "text": "{{$:/language/ControlPanel/Advanced/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Advanced]!has[draft.of]]\" \"$:/core/ui/ControlPanel/TiddlerFields\">>\n</div>\n"
        },
        "$:/core/ui/ControlPanel/Appearance": {
            "title": "$:/core/ui/ControlPanel/Appearance",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Appearance/Caption}}",
            "text": "{{$:/language/ControlPanel/Appearance/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Appearance]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Theme\">>\n</div>\n"
        },
        "$:/core/ui/ControlPanel/Basics": {
            "title": "$:/core/ui/ControlPanel/Basics",
            "tags": "$:/tags/ControlPanel/Info",
            "caption": "{{$:/language/ControlPanel/Basics/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Basics/\n\n\\define show-filter-count(filter)\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $value=\"\"\"$filter$\"\"\"/>\n<$action-setfield $tiddler=\"$:/state/tab--1498284803\" $value=\"$:/core/ui/AdvancedSearch/Filter\"/>\n<$action-navigate $to=\"$:/AdvancedSearch\"/>\n''<$count filter=\"\"\"$filter$\"\"\"/>''\n{{$:/core/images/advanced-search-button}}\n</$button>\n\\end\n\n|<<lingo Version/Prompt>> |''<<version>>'' |\n|<$link to=\"$:/SiteTitle\"><<lingo Title/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteTitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/SiteSubtitle\"><<lingo Subtitle/Prompt>></$link> |<$edit-text tiddler=\"$:/SiteSubtitle\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/status/UserName\"><<lingo Username/Prompt>></$link> |<$edit-text tiddler=\"$:/status/UserName\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/config/AnimationDuration\"><<lingo AnimDuration/Prompt>></$link> |<$edit-text tiddler=\"$:/config/AnimationDuration\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/DefaultTiddlers\"><<lingo DefaultTiddlers/Prompt>></$link> |<<lingo DefaultTiddlers/TopHint>><br> <$edit tag=\"textarea\" tiddler=\"$:/DefaultTiddlers\" class=\"tc-edit-texteditor\"/><br>//<<lingo DefaultTiddlers/BottomHint>>// |\n|<$link to=\"$:/config/NewJournal/Title\"><<lingo NewJournal/Title/Prompt>></$link> |<$edit-text tiddler=\"$:/config/NewJournal/Title\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/config/NewJournal/Tags\"><<lingo NewJournal/Tags/Prompt>></$link> |<$edit-text tiddler=\"$:/config/NewJournal/Tags\" default=\"\" tag=\"input\"/> |\n|<<lingo Language/Prompt>> |{{$:/snippets/minilanguageswitcher}} |\n|<<lingo Tiddlers/Prompt>> |<<show-filter-count \"[!is[system]sort[title]]\">> |\n|<<lingo Tags/Prompt>> |<<show-filter-count \"[tags[]sort[title]]\">> |\n|<<lingo SystemTiddlers/Prompt>> |<<show-filter-count \"[is[system]sort[title]]\">> |\n|<<lingo ShadowTiddlers/Prompt>> |<<show-filter-count \"[all[shadows]sort[title]]\">> |\n|<<lingo OverriddenShadowTiddlers/Prompt>> |<<show-filter-count \"[is[tiddler]is[shadow]sort[title]]\">> |\n"
        },
        "$:/core/ui/ControlPanel/EditorTypes": {
            "title": "$:/core/ui/ControlPanel/EditorTypes",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/EditorTypes/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/EditorTypes/\n\n<<lingo Hint>>\n\n<table>\n<tbody>\n<tr>\n<th><<lingo Type/Caption>></th>\n<th><<lingo Editor/Caption>></th>\n</tr>\n<$list filter=\"[all[shadows+tiddlers]prefix[$:/config/EditorTypeMappings/]sort[title]]\">\n<tr>\n<td>\n<$link>\n<$list filter=\"[all[current]removeprefix[$:/config/EditorTypeMappings/]]\">\n<$text text={{!!title}}/>\n</$list>\n</$link>\n</td>\n<td>\n<$view field=\"text\"/>\n</td>\n</tr>\n</$list>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/ControlPanel/Info": {
            "title": "$:/core/ui/ControlPanel/Info",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Info/Caption}}",
            "text": "{{$:/language/ControlPanel/Info/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Info]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Basics\">>\n</div>\n"
        },
        "$:/core/ui/ControlPanel/KeyboardShortcuts": {
            "title": "$:/core/ui/ControlPanel/KeyboardShortcuts",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/KeyboardShortcuts/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/KeyboardShortcuts/\n\n\\define new-shortcut(title)\n<div class=\"tc-dropdown-item-plain\">\n<$edit-shortcut tiddler=\"$title$\" placeholder={{$:/language/ControlPanel/KeyboardShortcuts/Add/Prompt}} style=\"width:auto;\"/> <$button>\n<<lingo Add/Caption>>\n<$action-listops\n\t$tiddler=\"$(shortcutTitle)$\"\n\t$field=\"text\"\n\t$subfilter=\"[{$title$}]\"\n/>\n<$action-deletetiddler\n\t$tiddler=\"$title$\"\n/>\n</$button>\n</div>\n\\end\n\n\\define shortcut-list-item(caption)\n<td>\n</td>\n<td style=\"text-align:right;font-size:0.7em;\">\n<<lingo Platform/$caption$>>\n</td>\n<td>\n<div style=\"position:relative;\">\n<$button popup=<<qualify \"$:/state/dropdown/$(shortcutTitle)$\">> class=\"tc-btn-invisible\">\n{{$:/core/images/edit-button}}\n</$button>\n<$macrocall $name=\"displayshortcuts\" $output=\"text/html\" shortcuts={{$(shortcutTitle)$}} prefix=\"<kbd>\" separator=\"</kbd> <kbd>\" suffix=\"</kbd>\"/>\n\n<$reveal state=<<qualify \"$:/state/dropdown/$(shortcutTitle)$\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-block-dropdown-wrapper\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown tc-popup-keep\">\n<$list filter=\"[list[$(shortcutTitle)$!!text]sort[title]]\" variable=\"shortcut\" emptyMessage=\"\"\"\n<div class=\"tc-dropdown-item-plain\">\n//<<lingo NoShortcuts/Caption>>//\n</div>\n\"\"\">\n<div class=\"tc-dropdown-item-plain\">\n<$button class=\"tc-btn-invisible\" tooltip=<<lingo Remove/Hint>>>\n<$action-listops\n\t$tiddler=\"$(shortcutTitle)$\"\n\t$field=\"text\"\n\t$subfilter=\"+[remove<shortcut>]\"\n/>\n&times;\n</$button>\n<kbd>\n<$macrocall $name=\"displayshortcuts\" $output=\"text/html\" shortcuts=<<shortcut>>/>\n</kbd>\n</div>\n</$list>\n<hr/>\n<$macrocall $name=\"new-shortcut\" title=<<qualify \"$:/state/new-shortcut/$(shortcutTitle)$\">>/>\n</div>\n</div>\n</$reveal>\n</div>\n</td>\n\\end\n\n\\define shortcut-list(caption,prefix)\n<tr>\n<$list filter=\"[all[tiddlers+shadows][$prefix$$(shortcutName)$]]\" variable=\"shortcutTitle\">\n<<shortcut-list-item \"$caption$\">>\n</$list>\n</tr>\n\\end\n\n\\define shortcut-editor()\n<<shortcut-list \"All\" \"$:/config/shortcuts/\">>\n<<shortcut-list \"Mac\" \"$:/config/shortcuts-mac/\">>\n<<shortcut-list \"NonMac\" \"$:/config/shortcuts-not-mac/\">>\n<<shortcut-list \"Linux\" \"$:/config/shortcuts-linux/\">>\n<<shortcut-list \"NonLinux\" \"$:/config/shortcuts-not-linux/\">>\n<<shortcut-list \"Windows\" \"$:/config/shortcuts-windows/\">>\n<<shortcut-list \"NonWindows\" \"$:/config/shortcuts-not-windows/\">>\n\\end\n\n\\define shortcut-preview()\n<$macrocall $name=\"displayshortcuts\" $output=\"text/html\" shortcuts={{$(shortcutPrefix)$$(shortcutName)$}} prefix=\"<kbd>\" separator=\"</kbd> <kbd>\" suffix=\"</kbd>\"/>\n\\end\n\n\\define shortcut-item-inner()\n<tr>\n<td>\n<$reveal type=\"nomatch\" state=<<dropdownStateTitle>> text=\"open\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield\n\t$tiddler=<<dropdownStateTitle>>\n\t$value=\"open\"\n/>\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<dropdownStateTitle>> text=\"open\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield\n\t$tiddler=<<dropdownStateTitle>>\n\t$value=\"close\"\n/>\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n''<$text text=<<shortcutName>>/>''\n</td>\n<td>\n<$transclude tiddler=\"$:/config/ShortcutInfo/$(shortcutName)$\"/>\n</td>\n<td>\n<$list filter=\"$:/config/shortcuts/ $:/config/shortcuts-mac/ $:/config/shortcuts-not-mac/ $:/config/shortcuts-linux/ $:/config/shortcuts-not-linux/ $:/config/shortcuts-windows/ $:/config/shortcuts-not-windows/\" variable=\"shortcutPrefix\">\n<<shortcut-preview>>\n</$list>\n</td>\n</tr>\n<$set name=\"dropdownState\" value={{$(dropdownStateTitle)$}}>\n<$list filter=\"[<dropdownState>prefix[open]]\" variable=\"listItem\">\n<<shortcut-editor>>\n</$list>\n</$set>\n\\end\n\n\\define shortcut-item()\n<$set name=\"dropdownStateTitle\" value=<<qualify \"$:/state/dropdown/keyboardshortcut/$(shortcutName)$\">>>\n<<shortcut-item-inner>>\n</$set>\n\\end\n\n<table>\n<tbody>\n<$list filter=\"[all[shadows+tiddlers]removeprefix[$:/config/ShortcutInfo/]]\" variable=\"shortcutName\">\n<<shortcut-item>>\n</$list>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/ControlPanel/LoadedModules": {
            "title": "$:/core/ui/ControlPanel/LoadedModules",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/LoadedModules/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/\n<<lingo LoadedModules/Hint>>\n\n{{$:/snippets/modules}}\n"
        },
        "$:/core/ui/ControlPanel/Modals/AddPlugins": {
            "title": "$:/core/ui/ControlPanel/Modals/AddPlugins",
            "subtitle": "{{$:/core/images/download-button}} {{$:/language/ControlPanel/Plugins/Add/Caption}}",
            "text": "\\define install-plugin-button()\n<$button>\n<$action-sendmessage $message=\"tm-load-plugin-from-library\" url={{!!url}} title={{$(assetInfo)$!!original-title}}/>\n<$list filter=\"[<assetInfo>get[original-title]get[version]]\" variable=\"installedVersion\" emptyMessage=\"\"\"{{$:/language/ControlPanel/Plugins/Install/Caption}}\"\"\">\n{{$:/language/ControlPanel/Plugins/Reinstall/Caption}}\n</$list>\n</$button>\n\\end\n\n\\define popup-state-macro()\n$:/state/add-plugin-info/$(connectionTiddler)$/$(assetInfo)$\n\\end\n\n\\define display-plugin-info(type)\n<$set name=\"popup-state\" value=<<popup-state-macro>>>\n<div class=\"tc-plugin-info\">\n<div class=\"tc-plugin-info-chunk tc-small-icon\">\n<$reveal type=\"nomatch\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"yes\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"no\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<$list filter=\"[<assetInfo>has[icon]]\" emptyMessage=\"\"\"<$transclude tiddler=\"$:/core/images/plugin-generic-$type$\"/>\"\"\">\n<img src={{$(assetInfo)$!!icon}}/>\n</$list>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<h1><$view tiddler=<<assetInfo>> field=\"description\"/></h1>\n<h2><$view tiddler=<<assetInfo>> field=\"original-title\"/></h2>\n<div><em><$view tiddler=<<assetInfo>> field=\"version\"/></em></div>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<<install-plugin-button>>\n</div>\n</div>\n<$reveal type=\"match\" text=\"yes\" state=<<popup-state>>>\n<div class=\"tc-plugin-info-dropdown\">\n<div class=\"tc-plugin-info-dropdown-message\">\n<$list filter=\"[<assetInfo>get[original-title]get[version]]\" variable=\"installedVersion\" emptyMessage=\"\"\"{{$:/language/ControlPanel/Plugins/NotInstalled/Hint}}\"\"\">\n<em>\n{{$:/language/ControlPanel/Plugins/AlreadyInstalled/Hint}}\n</em>\n</$list>\n</div>\n<div class=\"tc-plugin-info-dropdown-body\">\n<$transclude tiddler=<<assetInfo>> field=\"readme\" mode=\"block\"/>\n</div>\n</div>\n</$reveal>\n</$set>\n\\end\n\n\\define load-plugin-library-button()\n<$button class=\"tc-btn-big-green\">\n<$action-sendmessage $message=\"tm-load-plugin-library\" url={{!!url}} infoTitlePrefix=\"$:/temp/RemoteAssetInfo/\"/>\n{{$:/core/images/chevron-right}} {{$:/language/ControlPanel/Plugins/OpenPluginLibrary}}\n</$button>\n\\end\n\n\\define display-server-assets(type)\n{{$:/language/Search/Search}}: <$edit-text tiddler=\"\"\"$:/temp/RemoteAssetSearch/$(currentTiddler)$\"\"\" default=\"\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"\"\"$:/temp/RemoteAssetSearch/$(currentTiddler)$\"\"\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"\"\"$:/temp/RemoteAssetSearch/$(currentTiddler)$\"\"\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n<div class=\"tc-plugin-library-listing\">\n<$list filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[$type$]search{$:/temp/RemoteAssetSearch/$(currentTiddler)$}sort[description]]\" variable=\"assetInfo\">\n<<display-plugin-info \"$type$\">>\n</$list>\n</div>\n\\end\n\n\\define display-server-connection()\n<$list filter=\"[all[tiddlers+shadows]tag[$:/tags/ServerConnection]suffix{!!url}]\" variable=\"connectionTiddler\" emptyMessage=<<load-plugin-library-button>>>\n\n<<tabs \"[[$:/core/ui/ControlPanel/Plugins/Add/Plugins]] [[$:/core/ui/ControlPanel/Plugins/Add/Themes]] [[$:/core/ui/ControlPanel/Plugins/Add/Languages]]\" \"$:/core/ui/ControlPanel/Plugins/Add/Plugins\">>\n\n</$list>\n\\end\n\n\\define plugin-library-listing()\n<$list filter=\"[all[tiddlers+shadows]tag[$:/tags/PluginLibrary]]\">\n<div class=\"tc-plugin-library\">\n\n!! <$link><$transclude field=\"caption\"><$view field=\"title\"/></$transclude></$link>\n\n//<$view field=\"url\"/>//\n\n<$transclude/>\n\n<<display-server-connection>>\n</div>\n</$list>\n\\end\n\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<div>\n<<plugin-library-listing>>\n</div>\n\n</$importvariables>\n"
        },
        "$:/core/ui/ControlPanel/Palette": {
            "title": "$:/core/ui/ControlPanel/Palette",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/Palette/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Palette/\n\n{{$:/snippets/paletteswitcher}}\n\n<$reveal type=\"nomatch\" state=\"$:/state/ShowPaletteEditor\" text=\"yes\">\n\n<$button set=\"$:/state/ShowPaletteEditor\" setTo=\"yes\"><<lingo ShowEditor/Caption>></$button>\n\n</$reveal>\n\n<$reveal type=\"match\" state=\"$:/state/ShowPaletteEditor\" text=\"yes\">\n\n<$button set=\"$:/state/ShowPaletteEditor\" setTo=\"no\"><<lingo HideEditor/Caption>></$button>\n{{$:/snippets/paletteeditor}}\n\n</$reveal>\n\n"
        },
        "$:/core/ui/ControlPanel/Parsing": {
            "title": "$:/core/ui/ControlPanel/Parsing",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/Parsing/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Parsing/\n\n\\define parsing-inner(typeCap)\n<li>\n<$checkbox tiddler=\"\"\"$:/config/WikiParserRules/$typeCap$/$(currentTiddler)$\"\"\" field=\"text\" checked=\"enable\" unchecked=\"disable\" default=\"enable\"> ''<$text text=<<currentTiddler>>/>'': </$checkbox>\n</li>\n\\end\n\n\\define parsing-outer(typeLower,typeCap)\n<ul>\n<$list filter=\"[wikiparserrules[$typeLower$]]\">\n<<parsing-inner typeCap:\"$typeCap$\">>\n</$list>\n</ul>\n\\end\n\n<<lingo Hint>>\n\n! <<lingo Pragma/Caption>>\n\n<<parsing-outer typeLower:\"pragma\" typeCap:\"Pragma\">>\n\n! <<lingo Inline/Caption>>\n\n<<parsing-outer typeLower:\"inline\" typeCap:\"Inline\">>\n\n! <<lingo Block/Caption>>\n\n<<parsing-outer typeLower:\"block\" typeCap:\"Block\">>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Add/Languages": {
            "title": "$:/core/ui/ControlPanel/Plugins/Add/Languages",
            "caption": "{{$:/language/ControlPanel/Plugins/Languages/Caption}} (<$count filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[language]]\"/>)",
            "text": "<<display-server-assets language>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Add/Plugins": {
            "title": "$:/core/ui/ControlPanel/Plugins/Add/Plugins",
            "caption": "{{$:/language/ControlPanel/Plugins/Plugins/Caption}}  (<$count filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[plugin]]\"/>)",
            "text": "<<display-server-assets plugin>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Add/Themes": {
            "title": "$:/core/ui/ControlPanel/Plugins/Add/Themes",
            "caption": "{{$:/language/ControlPanel/Plugins/Themes/Caption}}  (<$count filter=\"[all[tiddlers+shadows]tag[$:/tags/RemoteAssetInfo]server-url{!!url}original-plugin-type[theme]]\"/>)",
            "text": "<<display-server-assets theme>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/AddPlugins": {
            "title": "$:/core/ui/ControlPanel/Plugins/AddPlugins",
            "text": "\\define lingo-base() $:/language/ControlPanel/Plugins/\n\n<$button message=\"tm-modal\" param=\"$:/core/ui/ControlPanel/Modals/AddPlugins\" tooltip={{$:/language/ControlPanel/Plugins/Add/Hint}} class=\"tc-btn-big-green\" style=\"background:blue;\">\n{{$:/core/images/download-button}} <<lingo Add/Caption>>\n</$button>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Installed/Languages": {
            "title": "$:/core/ui/ControlPanel/Plugins/Installed/Languages",
            "caption": "{{$:/language/ControlPanel/Plugins/Languages/Caption}} (<$count filter=\"[!has[draft.of]plugin-type[language]]\"/>)",
            "text": "<<plugin-table language>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Installed/Plugins": {
            "title": "$:/core/ui/ControlPanel/Plugins/Installed/Plugins",
            "caption": "{{$:/language/ControlPanel/Plugins/Plugins/Caption}} (<$count filter=\"[!has[draft.of]plugin-type[plugin]]\"/>)",
            "text": "<<plugin-table plugin>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins/Installed/Themes": {
            "title": "$:/core/ui/ControlPanel/Plugins/Installed/Themes",
            "caption": "{{$:/language/ControlPanel/Plugins/Themes/Caption}} (<$count filter=\"[!has[draft.of]plugin-type[theme]]\"/>)",
            "text": "<<plugin-table theme>>\n"
        },
        "$:/core/ui/ControlPanel/Plugins": {
            "title": "$:/core/ui/ControlPanel/Plugins",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Plugins/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Plugins/\n\n\\define popup-state-macro()\n$(qualified-state)$-$(currentTiddler)$\n\\end\n\n\\define tabs-state-macro()\n$(popup-state)$-$(pluginInfoType)$\n\\end\n\n\\define plugin-icon-title()\n$(currentTiddler)$/icon\n\\end\n\n\\define plugin-disable-title()\n$:/config/Plugins/Disabled/$(currentTiddler)$\n\\end\n\n\\define plugin-table-body(type,disabledMessage)\n<div class=\"tc-plugin-info-chunk tc-small-icon\">\n<$reveal type=\"nomatch\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"yes\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<popup-state>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<popup-state>> setTo=\"no\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<$transclude tiddler=<<currentTiddler>> subtiddler=<<plugin-icon-title>>>\n<$transclude tiddler=\"$:/core/images/plugin-generic-$type$\"/>\n</$transclude>\n</div>\n<div class=\"tc-plugin-info-chunk\">\n<h1>\n''<$view field=\"description\"><$view field=\"title\"/></$view>'' $disabledMessage$\n</h1>\n<h2>\n<$view field=\"title\"/>\n</h2>\n<h2>\n<div><em><$view field=\"version\"/></em></div>\n</h2>\n</div>\n\\end\n\n\\define plugin-table(type)\n<$set name=\"qualified-state\" value=<<qualify \"$:/state/plugin-info\">>>\n<$list filter=\"[!has[draft.of]plugin-type[$type$]sort[description]]\" emptyMessage=<<lingo \"Empty/Hint\">>>\n<$set name=\"popup-state\" value=<<popup-state-macro>>>\n<$reveal type=\"nomatch\" state=<<plugin-disable-title>> text=\"yes\">\n<$link to={{!!title}} class=\"tc-plugin-info\">\n<<plugin-table-body type:\"$type$\">>\n</$link>\n</$reveal>\n<$reveal type=\"match\" state=<<plugin-disable-title>> text=\"yes\">\n<$link to={{!!title}} class=\"tc-plugin-info tc-plugin-info-disabled\">\n<<plugin-table-body type:\"$type$\" disabledMessage:\"<$macrocall $name='lingo' title='Disabled/Status'/>\">>\n</$link>\n</$reveal>\n<$reveal type=\"match\" text=\"yes\" state=<<popup-state>>>\n<div class=\"tc-plugin-info-dropdown\">\n<div class=\"tc-plugin-info-dropdown-body\">\n<$list filter=\"[all[current]] -[[$:/core]]\">\n<div style=\"float:right;\">\n<$reveal type=\"nomatch\" state=<<plugin-disable-title>> text=\"yes\">\n<$button set=<<plugin-disable-title>> setTo=\"yes\" tooltip={{$:/language/ControlPanel/Plugins/Disable/Hint}} aria-label={{$:/language/ControlPanel/Plugins/Disable/Caption}}>\n<<lingo Disable/Caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<plugin-disable-title>> text=\"yes\">\n<$button set=<<plugin-disable-title>> setTo=\"no\" tooltip={{$:/language/ControlPanel/Plugins/Enable/Hint}} aria-label={{$:/language/ControlPanel/Plugins/Enable/Caption}}>\n<<lingo Enable/Caption>>\n</$button>\n</$reveal>\n</div>\n</$list>\n<$reveal type=\"nomatch\" text=\"\" state=\"!!list\">\n<$macrocall $name=\"tabs\" state=<<tabs-state-macro>> tabsList={{!!list}} default=\"readme\" template=\"$:/core/ui/PluginInfo\"/>\n</$reveal>\n<$reveal type=\"match\" text=\"\" state=\"!!list\">\n<<lingo NoInformation/Hint>>\n</$reveal>\n</div>\n</div>\n</$reveal>\n</$set>\n</$list>\n</$set>\n\\end\n\n{{$:/core/ui/ControlPanel/Plugins/AddPlugins}}\n\n<<lingo Installed/Hint>>\n\n<<tabs \"[[$:/core/ui/ControlPanel/Plugins/Installed/Plugins]] [[$:/core/ui/ControlPanel/Plugins/Installed/Themes]] [[$:/core/ui/ControlPanel/Plugins/Installed/Languages]]\" \"$:/core/ui/ControlPanel/Plugins/Installed/Plugins\">>\n"
        },
        "$:/core/ui/ControlPanel/Saving": {
            "title": "$:/core/ui/ControlPanel/Saving",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Saving/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Saving/\n\\define backupURL()\nhttp://$(userName)$.tiddlyspot.com/backup/\n\\end\n\\define backupLink()\n<$reveal type=\"nomatch\" state=\"$:/UploadName\" text=\"\">\n<$set name=\"userName\" value={{$:/UploadName}}>\n<$reveal type=\"match\" state=\"$:/UploadURL\" text=\"\">\n<<backupURL>>\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/UploadURL\" text=\"\">\n<$macrocall $name=resolvePath source={{$:/UploadBackupDir}} root={{$:/UploadURL}}>>\n</$reveal>\n</$set>\n</$reveal>\n\\end\n! <<lingo TiddlySpot/Heading>>\n\n<<lingo TiddlySpot/Description>>\n\n|<<lingo TiddlySpot/UserName>> |<$edit-text tiddler=\"$:/UploadName\" default=\"\" tag=\"input\"/> |\n|<<lingo TiddlySpot/Password>> |<$password name=\"upload\"/> |\n|<<lingo TiddlySpot/Backups>> |<<backupLink>> |\n\n''<<lingo TiddlySpot/Advanced/Heading>>''\n\n|<<lingo TiddlySpot/ServerURL>>  |<$edit-text tiddler=\"$:/UploadURL\" default=\"\" tag=\"input\"/> |\n|<<lingo TiddlySpot/Filename>> |<$edit-text tiddler=\"$:/UploadFilename\" default=\"index.html\" tag=\"input\"/> |\n|<<lingo TiddlySpot/UploadDir>> |<$edit-text tiddler=\"$:/UploadDir\" default=\".\" tag=\"input\"/> |\n|<<lingo TiddlySpot/BackupDir>> |<$edit-text tiddler=\"$:/UploadBackupDir\" default=\".\" tag=\"input\"/> |\n\n<<lingo TiddlySpot/Hint>>"
        },
        "$:/core/ui/ControlPanel/Settings/AutoSave": {
            "title": "$:/core/ui/ControlPanel/Settings/AutoSave",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/AutoSave/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/AutoSave/\n\n<$link to=\"$:/config/AutoSave\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/AutoSave\" value=\"yes\"> <<lingo Enabled/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/AutoSave\" value=\"no\"> <<lingo Disabled/Description>> </$radio>\n"
        },
        "$:/core/buttonstyles/Borderless": {
            "title": "$:/core/buttonstyles/Borderless",
            "tags": "$:/tags/ToolbarButtonStyle",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Borderless}}",
            "text": "tc-btn-invisible"
        },
        "$:/core/buttonstyles/Boxed": {
            "title": "$:/core/buttonstyles/Boxed",
            "tags": "$:/tags/ToolbarButtonStyle",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Boxed}}",
            "text": "tc-btn-boxed"
        },
        "$:/core/buttonstyles/Rounded": {
            "title": "$:/core/buttonstyles/Rounded",
            "tags": "$:/tags/ToolbarButtonStyle",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Styles/Rounded}}",
            "text": "tc-btn-rounded"
        },
        "$:/core/ui/ControlPanel/Settings/CamelCase": {
            "title": "$:/core/ui/ControlPanel/Settings/CamelCase",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/CamelCase/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/CamelCase/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/WikiParserRules/Inline/wikilink\" field=\"text\" checked=\"enable\" unchecked=\"disable\" default=\"enable\"> <$link to=\"$:/config/WikiParserRules/Inline/wikilink\"><<lingo Description>></$link> </$checkbox>\n"
        },
        "$:/core/ui/ControlPanel/Settings/DefaultSidebarTab": {
            "caption": "{{$:/language/ControlPanel/Settings/DefaultSidebarTab/Caption}}",
            "tags": "$:/tags/ControlPanel/Settings",
            "title": "$:/core/ui/ControlPanel/Settings/DefaultSidebarTab",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/DefaultSidebarTab/\n\n<$link to=\"$:/config/DefaultSidebarTab\"><<lingo Hint>></$link>\n\n<$select tiddler=\"$:/config/DefaultSidebarTab\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SideBar]!has[draft.of]]\">\n<option value=<<currentTiddler>>><$transclude field=\"caption\"><$text text=<<currentTiddler>>/></$transclude></option>\n</$list>\n</$select>\n"
        },
        "$:/core/ui/ControlPanel/Settings/EditorToolbar": {
            "title": "$:/core/ui/ControlPanel/Settings/EditorToolbar",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/EditorToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/EditorToolbar/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/TextEditor/EnableToolbar\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"yes\"> <$link to=\"$:/config/TextEditor/EnableToolbar\"><<lingo Description>></$link> </$checkbox>\n\n"
        },
        "$:/core/ui/ControlPanel/Settings/LinkToBehaviour": {
            "title": "$:/core/ui/ControlPanel/Settings/LinkToBehaviour",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/LinkToBehaviour/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/LinkToBehaviour/\n\n<$link to=\"$:/config/Navigation/openLinkFromInsideRiver\"><<lingo \"InsideRiver/Hint\">></$link>\n\n<$select tiddler=\"$:/config/Navigation/openLinkFromInsideRiver\">\n  <option value=\"above\"><<lingo \"OpenAbove\">></option>\n  <option value=\"below\"><<lingo \"OpenBelow\">></option>\n  <option value=\"top\"><<lingo \"OpenAtTop\">></option>\n  <option value=\"bottom\"><<lingo \"OpenAtBottom\">></option>\n</$select>\n\n<$link to=\"$:/config/Navigation/openLinkFromOutsideRiver\"><<lingo \"OutsideRiver/Hint\">></$link>\n\n<$select tiddler=\"$:/config/Navigation/openLinkFromOutsideRiver\">\n  <option value=\"top\"><<lingo \"OpenAtTop\">></option>\n  <option value=\"bottom\"><<lingo \"OpenAtBottom\">></option>\n</$select>\n"
        },
        "$:/core/ui/ControlPanel/Settings/MissingLinks": {
            "title": "$:/core/ui/ControlPanel/Settings/MissingLinks",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/MissingLinks/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/MissingLinks/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/MissingLinks\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"yes\"> <$link to=\"$:/config/MissingLinks\"><<lingo Description>></$link> </$checkbox>\n\n"
        },
        "$:/core/ui/ControlPanel/Settings/NavigationAddressBar": {
            "title": "$:/core/ui/ControlPanel/Settings/NavigationAddressBar",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/NavigationAddressBar/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/NavigationAddressBar/\n\n<$link to=\"$:/config/Navigation/UpdateAddressBar\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateAddressBar\" value=\"permaview\"> <<lingo Permaview/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateAddressBar\" value=\"permalink\"> <<lingo Permalink/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateAddressBar\" value=\"no\"> <<lingo No/Description>> </$radio>\n"
        },
        "$:/core/ui/ControlPanel/Settings/NavigationHistory": {
            "title": "$:/core/ui/ControlPanel/Settings/NavigationHistory",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/NavigationHistory/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/NavigationHistory/\n<$link to=\"$:/config/Navigation/UpdateHistory\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateHistory\" value=\"yes\"> <<lingo Yes/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Navigation/UpdateHistory\" value=\"no\"> <<lingo No/Description>> </$radio>\n"
        },
        "$:/core/ui/ControlPanel/Settings/PerformanceInstrumentation": {
            "title": "$:/core/ui/ControlPanel/Settings/PerformanceInstrumentation",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/PerformanceInstrumentation/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/PerformanceInstrumentation/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/Performance/Instrumentation\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"no\"> <$link to=\"$:/config/Performance/Instrumentation\"><<lingo Description>></$link> </$checkbox>\n"
        },
        "$:/core/ui/ControlPanel/Settings/TitleLinks": {
            "title": "$:/core/ui/ControlPanel/Settings/TitleLinks",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/TitleLinks/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/TitleLinks/\n<$link to=\"$:/config/Tiddlers/TitleLinks\"><<lingo Hint>></$link>\n\n<$radio tiddler=\"$:/config/Tiddlers/TitleLinks\" value=\"yes\"> <<lingo Yes/Description>> </$radio>\n\n<$radio tiddler=\"$:/config/Tiddlers/TitleLinks\" value=\"no\"> <<lingo No/Description>> </$radio>\n"
        },
        "$:/core/ui/ControlPanel/Settings/ToolbarButtons": {
            "title": "$:/core/ui/ControlPanel/Settings/ToolbarButtons",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtons/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/ToolbarButtons/\n<<lingo Hint>>\n\n<$checkbox tiddler=\"$:/config/Toolbar/Icons\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"yes\"> <$link to=\"$:/config/Toolbar/Icons\"><<lingo Icons/Description>></$link> </$checkbox>\n\n<$checkbox tiddler=\"$:/config/Toolbar/Text\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"no\"> <$link to=\"$:/config/Toolbar/Text\"><<lingo Text/Description>></$link> </$checkbox>\n"
        },
        "$:/core/ui/ControlPanel/Settings/ToolbarButtonStyle": {
            "title": "$:/core/ui/ControlPanel/Settings/ToolbarButtonStyle",
            "tags": "$:/tags/ControlPanel/Settings",
            "caption": "{{$:/language/ControlPanel/Settings/ToolbarButtonStyle/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/ToolbarButtonStyle/\n<$link to=\"$:/config/Toolbar/ButtonClass\"><<lingo \"Hint\">></$link>\n\n<$select tiddler=\"$:/config/Toolbar/ButtonClass\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ToolbarButtonStyle]]\">\n<option value={{!!text}}>{{!!caption}}</option>\n</$list>\n</$select>\n"
        },
        "$:/core/ui/ControlPanel/Settings": {
            "title": "$:/core/ui/ControlPanel/Settings",
            "tags": "$:/tags/ControlPanel",
            "caption": "{{$:/language/ControlPanel/Settings/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/Settings/\n\n<<lingo Hint>>\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Settings]]\">\n\n<div style=\"border-top:1px solid #eee;\">\n\n!! <$link><$transclude field=\"caption\"/></$link>\n\n<$transclude/>\n\n</div>\n\n</$list>\n"
        },
        "$:/core/ui/ControlPanel/StoryView": {
            "title": "$:/core/ui/ControlPanel/StoryView",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/StoryView/Caption}}",
            "text": "{{$:/snippets/viewswitcher}}\n"
        },
        "$:/core/ui/ControlPanel/Theme": {
            "title": "$:/core/ui/ControlPanel/Theme",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/Theme/Caption}}",
            "text": "{{$:/snippets/themeswitcher}}\n"
        },
        "$:/core/ui/ControlPanel/TiddlerFields": {
            "title": "$:/core/ui/ControlPanel/TiddlerFields",
            "tags": "$:/tags/ControlPanel/Advanced",
            "caption": "{{$:/language/ControlPanel/TiddlerFields/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/\n\n<<lingo TiddlerFields/Hint>>\n\n{{$:/snippets/allfields}}"
        },
        "$:/core/ui/ControlPanel/Toolbars/EditorToolbar": {
            "title": "$:/core/ui/ControlPanel/Toolbars/EditorToolbar",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/EditorToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\n\\define config-title()\n$:/config/EditorToolbarButtons/Visibility/$(listItem)$\n\\end\n\n\\define toolbar-button()\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"> <$transclude tiddler={{$(listItem)$!!icon}}/> <$transclude tiddler=<<listItem>> field=\"caption\"/> -- <i class=\"tc-muted\"><$transclude tiddler=<<listItem>> field=\"description\"/></i></$checkbox>\n\\end\n\n{{$:/language/ControlPanel/Toolbars/EditorToolbar/Hint}}\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditorToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<<toolbar-button>>\n\n</$list>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars/EditToolbar": {
            "title": "$:/core/ui/ControlPanel/Toolbars/EditToolbar",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/EditToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/EditToolbarButtons/Visibility/$(listItem)$\n\\end\n\n{{$:/language/ControlPanel/Toolbars/EditToolbar/Hint}}\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>> field=\"caption\"/> <i class=\"tc-muted\">-- <$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars/PageControls": {
            "title": "$:/core/ui/ControlPanel/Toolbars/PageControls",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/PageControls/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n\n{{$:/language/ControlPanel/Toolbars/PageControls/Hint}}\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>> field=\"caption\"/> <i class=\"tc-muted\">-- <$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars/ViewToolbar": {
            "title": "$:/core/ui/ControlPanel/Toolbars/ViewToolbar",
            "tags": "$:/tags/ControlPanel/Toolbars",
            "caption": "{{$:/language/ControlPanel/Toolbars/ViewToolbar/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n\n{{$:/language/ControlPanel/Toolbars/ViewToolbar/Hint}}\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>> field=\"caption\"/> <i class=\"tc-muted\">-- <$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/ControlPanel/Toolbars": {
            "title": "$:/core/ui/ControlPanel/Toolbars",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ControlPanel/Toolbars/Caption}}",
            "text": "{{$:/language/ControlPanel/Toolbars/Hint}}\n\n<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel/Toolbars]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Toolbars/ViewToolbar\" \"$:/state/tabs/controlpanel/toolbars\" \"tc-vertical\">>\n</div>\n"
        },
        "$:/ControlPanel": {
            "title": "$:/ControlPanel",
            "icon": "$:/core/images/options-button",
            "color": "#bbb",
            "text": "<div class=\"tc-control-panel\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/ControlPanel]!has[draft.of]]\" \"$:/core/ui/ControlPanel/Info\">>\n</div>\n"
        },
        "$:/core/ui/DefaultSearchResultList": {
            "title": "$:/core/ui/DefaultSearchResultList",
            "tags": "$:/tags/SearchResults",
            "caption": "{{$:/language/Search/DefaultResults/Caption}}",
            "text": "\\define searchResultList()\n//<small>{{$:/language/Search/Matches/Title}}</small>//\n\n<$list filter=\"[!is[system]search:title{$(searchTiddler)$}sort[title]limit[250]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n//<small>{{$:/language/Search/Matches/All}}</small>//\n\n<$list filter=\"[!is[system]search{$(searchTiddler)$}sort[title]limit[250]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n\n\\end\n<<searchResultList>>\n"
        },
        "$:/core/ui/EditorToolbar/bold": {
            "title": "$:/core/ui/EditorToolbar/bold",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/bold",
            "caption": "{{$:/language/Buttons/Bold/Caption}}",
            "description": "{{$:/language/Buttons/Bold/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((bold))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"''\"\n\tsuffix=\"''\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/clear-dropdown": {
            "title": "$:/core/ui/EditorToolbar/clear-dropdown",
            "text": "''{{$:/language/Buttons/Clear/Hint}}''\n\n<div class=\"tc-colour-chooser\">\n\n<$macrocall $name=\"colour-picker\" actions=\"\"\"\n\n<$action-sendmessage\n\t$message=\"tm-edit-bitmap-operation\"\n\t$param=\"clear\"\n\tcolour=<<colour-picker-value>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n\"\"\"/>\n\n</div>\n"
        },
        "$:/core/ui/EditorToolbar/clear": {
            "title": "$:/core/ui/EditorToolbar/clear",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/erase",
            "caption": "{{$:/language/Buttons/Clear/Caption}}",
            "description": "{{$:/language/Buttons/Clear/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/clear-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/editor-height-dropdown": {
            "title": "$:/core/ui/EditorToolbar/editor-height-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/EditorHeight/\n''<<lingo Hint>>''\n\n<$radio tiddler=\"$:/config/TextEditor/EditorHeight/Mode\" value=\"auto\"> {{$:/core/images/auto-height}} <<lingo Caption/Auto>></$radio>\n\n<$radio tiddler=\"$:/config/TextEditor/EditorHeight/Mode\" value=\"fixed\"> {{$:/core/images/fixed-height}} <<lingo Caption/Fixed>> <$edit-text tag=\"input\" tiddler=\"$:/config/TextEditor/EditorHeight/Height\" default=\"100px\"/></$radio>\n"
        },
        "$:/core/ui/EditorToolbar/editor-height": {
            "title": "$:/core/ui/EditorToolbar/editor-height",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/fixed-height",
            "custom-icon": "yes",
            "caption": "{{$:/language/Buttons/EditorHeight/Caption}}",
            "description": "{{$:/language/Buttons/EditorHeight/Hint}}",
            "condition": "[<targetTiddler>!is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/editor-height-dropdown",
            "text": "<$reveal tag=\"span\" state=\"$:/config/TextEditor/EditorHeight/Mode\" type=\"match\" text=\"fixed\">\n{{$:/core/images/fixed-height}}\n</$reveal>\n<$reveal tag=\"span\" state=\"$:/config/TextEditor/EditorHeight/Mode\" type=\"match\" text=\"auto\">\n{{$:/core/images/auto-height}}\n</$reveal>\n"
        },
        "$:/core/ui/EditorToolbar/excise-dropdown": {
            "title": "$:/core/ui/EditorToolbar/excise-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Excise/\n\n\\define body(config-title)\n''<<lingo Hint>>''\n\n<<lingo Caption/NewTitle>> <$edit-text tag=\"input\" tiddler=\"$config-title$/new-title\" default=\"\" focus=\"true\"/>\n\n<$set name=\"new-title\" value={{$config-title$/new-title}}>\n<$list filter=\"\"\"[<new-title>is[tiddler]]\"\"\">\n<div class=\"tc-error\">\n<<lingo Caption/TiddlerExists>>\n</div>\n</$list>\n</$set>\n\n<$checkbox tiddler=\"\"\"$config-title$/tagnew\"\"\" field=\"text\" checked=\"yes\" unchecked=\"no\" default=\"false\"> <<lingo Caption/Tag>></$checkbox>\n\n<<lingo Caption/Replace>> <$select tiddler=\"\"\"$config-title$/type\"\"\" default=\"transclude\">\n<option value=\"link\"><<lingo Caption/Replace/Link>></option>\n<option value=\"transclude\"><<lingo Caption/Replace/Transclusion>></option>\n<option value=\"macro\"><<lingo Caption/Replace/Macro>></option>\n</$select>\n\n<$reveal state=\"\"\"$config-title$/type\"\"\" type=\"match\" text=\"macro\">\n<<lingo Caption/MacroName>> <$edit-text tag=\"input\" tiddler=\"\"\"$config-title$/macro-title\"\"\" default=\"translink\"/>\n</$reveal>\n\n<$button>\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"excise\"\n\ttitle={{$config-title$/new-title}}\n\ttype={{$config-title$/type}}\n\tmacro={{$config-title$/macro-title}}\n\ttagnew={{$config-title$/tagnew}}\n/>\n<$action-deletetiddler\n\t$tiddler=<<qualify \"$:/state/Excise/NewTitle\">>\n/>\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n<<lingo Caption/Excise>>\n</$button>\n\\end\n\n<$macrocall $name=\"body\" config-title=<<qualify \"$:/state/Excise/\">>/>\n"
        },
        "$:/core/ui/EditorToolbar/excise": {
            "title": "$:/core/ui/EditorToolbar/excise",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/excise",
            "caption": "{{$:/language/Buttons/Excise/Caption}}",
            "description": "{{$:/language/Buttons/Excise/Hint}}",
            "condition": "[<targetTiddler>!is[image]]",
            "shortcuts": "((excise))",
            "dropdown": "$:/core/ui/EditorToolbar/excise-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/heading-1": {
            "title": "$:/core/ui/EditorToolbar/heading-1",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-1",
            "caption": "{{$:/language/Buttons/Heading1/Caption}}",
            "description": "{{$:/language/Buttons/Heading1/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((heading-1))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"1\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-2": {
            "title": "$:/core/ui/EditorToolbar/heading-2",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-2",
            "caption": "{{$:/language/Buttons/Heading2/Caption}}",
            "description": "{{$:/language/Buttons/Heading2/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-2))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"2\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-3": {
            "title": "$:/core/ui/EditorToolbar/heading-3",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-3",
            "caption": "{{$:/language/Buttons/Heading3/Caption}}",
            "description": "{{$:/language/Buttons/Heading3/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-3))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"3\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-4": {
            "title": "$:/core/ui/EditorToolbar/heading-4",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-4",
            "caption": "{{$:/language/Buttons/Heading4/Caption}}",
            "description": "{{$:/language/Buttons/Heading4/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-4))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"4\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-5": {
            "title": "$:/core/ui/EditorToolbar/heading-5",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-5",
            "caption": "{{$:/language/Buttons/Heading5/Caption}}",
            "description": "{{$:/language/Buttons/Heading5/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-5))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"5\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/heading-6": {
            "title": "$:/core/ui/EditorToolbar/heading-6",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/heading-6",
            "caption": "{{$:/language/Buttons/Heading6/Caption}}",
            "description": "{{$:/language/Buttons/Heading6/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((heading-6))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"!\"\n\tcount=\"6\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/italic": {
            "title": "$:/core/ui/EditorToolbar/italic",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/italic",
            "caption": "{{$:/language/Buttons/Italic/Caption}}",
            "description": "{{$:/language/Buttons/Italic/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((italic))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"//\"\n\tsuffix=\"//\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/line-width-dropdown": {
            "title": "$:/core/ui/EditorToolbar/line-width-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/LineWidth/\n\n\\define toolbar-line-width-inner()\n<$button tag=\"a\" tooltip=\"\"\"$(line-width)$\"\"\">\n\n<$action-setfield\n\t$tiddler=\"$:/config/BitmapEditor/LineWidth\"\n\t$value=\"$(line-width)$\"\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<div style=\"display: inline-block; margin: 4px calc(80px - $(line-width)$); background-color: #000; width: calc(100px + $(line-width)$ * 2); height: $(line-width)$; border-radius: 120px; vertical-align: middle;\"/>\n\n<span style=\"margin-left: 8px;\">\n\n<$text text=\"\"\"$(line-width)$\"\"\"/>\n\n<$reveal state=\"$:/config/BitmapEditor/LineWidth\" type=\"match\" text=\"\"\"$(line-width)$\"\"\" tag=\"span\">\n\n<$entity entity=\"&nbsp;\"/>\n\n<$entity entity=\"&#x2713;\"/>\n\n</$reveal>\n\n</span>\n\n</$button>\n\\end\n\n''<<lingo Hint>>''\n\n<$list filter={{$:/config/BitmapEditor/LineWidths}} variable=\"line-width\">\n\n<<toolbar-line-width-inner>>\n\n</$list>\n"
        },
        "$:/core/ui/EditorToolbar/line-width": {
            "title": "$:/core/ui/EditorToolbar/line-width",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/line-width",
            "caption": "{{$:/language/Buttons/LineWidth/Caption}}",
            "description": "{{$:/language/Buttons/LineWidth/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/line-width-dropdown",
            "text": "<$text text={{$:/config/BitmapEditor/LineWidth}}/>"
        },
        "$:/core/ui/EditorToolbar/link-dropdown": {
            "title": "$:/core/ui/EditorToolbar/link-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Link/\n\n\\define link-actions()\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"make-link\"\n\ttext={{$(linkTiddler)$}}\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<searchTiddler>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<linkTiddler>>\n/>\n\\end\n\n\\define body(config-title)\n''<<lingo Hint>>''\n\n<$vars searchTiddler=\"\"\"$config-title$/search\"\"\" linkTiddler=\"\"\"$config-title$/link\"\"\">\n\n<$edit-text tiddler=<<searchTiddler>> type=\"search\" tag=\"input\" focus=\"true\" placeholder={{$:/language/Search/Search}} default=\"\"/>\n<$reveal tag=\"span\" state=<<searchTiddler>> type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\" style=\"width: auto; display: inline-block; background-colour: inherit;\">\n<$action-setfield $tiddler=<<searchTiddler>> text=\"\" />\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n\n<$reveal tag=\"div\" state=<<searchTiddler>> type=\"nomatch\" text=\"\">\n\n<$linkcatcher actions=<<link-actions>> to=<<linkTiddler>>>\n\n{{$:/core/ui/SearchResults}}\n\n</$linkcatcher>\n\n</$reveal>\n\n</$vars>\n\n\\end\n\n<$macrocall $name=\"body\" config-title=<<qualify \"$:/state/Link/\">>/>\n"
        },
        "$:/core/ui/EditorToolbar/link": {
            "title": "$:/core/ui/EditorToolbar/link",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/link",
            "caption": "{{$:/language/Buttons/Link/Caption}}",
            "description": "{{$:/language/Buttons/Link/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((link))",
            "dropdown": "$:/core/ui/EditorToolbar/link-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/list-bullet": {
            "title": "$:/core/ui/EditorToolbar/list-bullet",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/list-bullet",
            "caption": "{{$:/language/Buttons/ListBullet/Caption}}",
            "description": "{{$:/language/Buttons/ListBullet/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((list-bullet))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"*\"\n\tcount=\"1\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/list-number": {
            "title": "$:/core/ui/EditorToolbar/list-number",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/list-number",
            "caption": "{{$:/language/Buttons/ListNumber/Caption}}",
            "description": "{{$:/language/Buttons/ListNumber/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((list-number))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"prefix-lines\"\n\tcharacter=\"#\"\n\tcount=\"1\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/mono-block": {
            "title": "$:/core/ui/EditorToolbar/mono-block",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/mono-block",
            "caption": "{{$:/language/Buttons/MonoBlock/Caption}}",
            "description": "{{$:/language/Buttons/MonoBlock/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((mono-block))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-lines\"\n\tprefix=\"\n```\"\n\tsuffix=\"```\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/mono-line": {
            "title": "$:/core/ui/EditorToolbar/mono-line",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/mono-line",
            "caption": "{{$:/language/Buttons/MonoLine/Caption}}",
            "description": "{{$:/language/Buttons/MonoLine/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((mono-line))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"`\"\n\tsuffix=\"`\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/more-dropdown": {
            "title": "$:/core/ui/EditorToolbar/more-dropdown",
            "text": "\\define config-title()\n$:/config/EditorToolbarButtons/Visibility/$(toolbarItem)$\n\\end\n\n\\define conditional-button()\n<$list filter={{$(toolbarItem)$!!condition}} variable=\"condition\">\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/toolbar/button\" mode=\"inline\"/> <$transclude tiddler=<<toolbarItem>> field=\"description\"/>\n</$list>\n\\end\n\n<div class=\"tc-text-editor-toolbar-more\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditorToolbar]!has[draft.of]] -[[$:/core/ui/EditorToolbar/more]]\">\n<$reveal type=\"match\" state=<<config-visibility-title>> text=\"hide\" tag=\"div\">\n<<conditional-button>>\n</$reveal>\n</$list>\n</div>\n"
        },
        "$:/core/ui/EditorToolbar/more": {
            "title": "$:/core/ui/EditorToolbar/more",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/down-arrow",
            "caption": "{{$:/language/Buttons/More/Caption}}",
            "description": "{{$:/language/Buttons/More/Hint}}",
            "condition": "[<targetTiddler>]",
            "dropdown": "$:/core/ui/EditorToolbar/more-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/opacity-dropdown": {
            "title": "$:/core/ui/EditorToolbar/opacity-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Opacity/\n\n\\define toolbar-opacity-inner()\n<$button tag=\"a\" tooltip=\"\"\"$(opacity)$\"\"\">\n\n<$action-setfield\n\t$tiddler=\"$:/config/BitmapEditor/Opacity\"\n\t$value=\"$(opacity)$\"\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<div style=\"display: inline-block; vertical-align: middle; background-color: $(current-paint-colour)$; opacity: $(opacity)$; width: 1em; height: 1em; border-radius: 50%;\"/>\n\n<span style=\"margin-left: 8px;\">\n\n<$text text=\"\"\"$(opacity)$\"\"\"/>\n\n<$reveal state=\"$:/config/BitmapEditor/Opacity\" type=\"match\" text=\"\"\"$(opacity)$\"\"\" tag=\"span\">\n\n<$entity entity=\"&nbsp;\"/>\n\n<$entity entity=\"&#x2713;\"/>\n\n</$reveal>\n\n</span>\n\n</$button>\n\\end\n\n\\define toolbar-opacity()\n''<<lingo Hint>>''\n\n<$list filter={{$:/config/BitmapEditor/Opacities}} variable=\"opacity\">\n\n<<toolbar-opacity-inner>>\n\n</$list>\n\\end\n\n<$set name=\"current-paint-colour\" value={{$:/config/BitmapEditor/Colour}}>\n\n<$set name=\"current-opacity\" value={{$:/config/BitmapEditor/Opacity}}>\n\n<<toolbar-opacity>>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/EditorToolbar/opacity": {
            "title": "$:/core/ui/EditorToolbar/opacity",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/opacity",
            "caption": "{{$:/language/Buttons/Opacity/Caption}}",
            "description": "{{$:/language/Buttons/Opacity/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/opacity-dropdown",
            "text": "<$text text={{$:/config/BitmapEditor/Opacity}}/>\n"
        },
        "$:/core/ui/EditorToolbar/paint-dropdown": {
            "title": "$:/core/ui/EditorToolbar/paint-dropdown",
            "text": "''{{$:/language/Buttons/Paint/Hint}}''\n\n<$macrocall $name=\"colour-picker\" actions=\"\"\"\n\n<$action-setfield\n\t$tiddler=\"$:/config/BitmapEditor/Colour\"\n\t$value=<<colour-picker-value>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n\"\"\"/>\n"
        },
        "$:/core/ui/EditorToolbar/paint": {
            "title": "$:/core/ui/EditorToolbar/paint",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/paint",
            "caption": "{{$:/language/Buttons/Paint/Caption}}",
            "description": "{{$:/language/Buttons/Paint/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/paint-dropdown",
            "text": "\\define toolbar-paint()\n<div style=\"display: inline-block; vertical-align: middle; background-color: $(colour-picker-value)$; width: 1em; height: 1em; border-radius: 50%;\"/>\n\\end\n<$set name=\"colour-picker-value\" value={{$:/config/BitmapEditor/Colour}}>\n<<toolbar-paint>>\n</$set>\n"
        },
        "$:/core/ui/EditorToolbar/picture-dropdown": {
            "title": "$:/core/ui/EditorToolbar/picture-dropdown",
            "text": "\\define replacement-text()\n[img[$(imageTitle)$]]\n\\end\n\n''{{$:/language/Buttons/Picture/Hint}}''\n\n<$macrocall $name=\"image-picker\" actions=\"\"\"\n\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"replace-selection\"\n\ttext=<<replacement-text>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n\"\"\"/>\n"
        },
        "$:/core/ui/EditorToolbar/picture": {
            "title": "$:/core/ui/EditorToolbar/picture",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/picture",
            "caption": "{{$:/language/Buttons/Picture/Caption}}",
            "description": "{{$:/language/Buttons/Picture/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((picture))",
            "dropdown": "$:/core/ui/EditorToolbar/picture-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/preview-type-dropdown": {
            "title": "$:/core/ui/EditorToolbar/preview-type-dropdown",
            "text": "\\define preview-type-button()\n<$button tag=\"a\">\n\n<$action-setfield $tiddler=\"$:/state/editpreviewtype\" $value=\"$(previewType)$\"/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<$transclude tiddler=<<previewType>> field=\"caption\" mode=\"inline\">\n\n<$view tiddler=<<previewType>> field=\"title\" mode=\"inline\"/>\n\n</$transclude> \n\n<$reveal tag=\"span\" state=\"$:/state/editpreviewtype\" type=\"match\" text=<<previewType>> default=\"$:/core/ui/EditTemplate/body/preview/output\">\n\n<$entity entity=\"&nbsp;\"/>\n\n<$entity entity=\"&#x2713;\"/>\n\n</$reveal>\n\n</$button>\n\\end\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditPreview]!has[draft.of]]\" variable=\"previewType\">\n\n<<preview-type-button>>\n\n</$list>\n"
        },
        "$:/core/ui/EditorToolbar/preview-type": {
            "title": "$:/core/ui/EditorToolbar/preview-type",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/chevron-down",
            "caption": "{{$:/language/Buttons/PreviewType/Caption}}",
            "description": "{{$:/language/Buttons/PreviewType/Hint}}",
            "condition": "[all[shadows+tiddlers]tag[$:/tags/EditPreview]!has[draft.of]butfirst[]limit[1]]",
            "button-classes": "tc-text-editor-toolbar-item-adjunct",
            "dropdown": "$:/core/ui/EditorToolbar/preview-type-dropdown"
        },
        "$:/core/ui/EditorToolbar/preview": {
            "title": "$:/core/ui/EditorToolbar/preview",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/preview-open",
            "custom-icon": "yes",
            "caption": "{{$:/language/Buttons/Preview/Caption}}",
            "description": "{{$:/language/Buttons/Preview/Hint}}",
            "condition": "[<targetTiddler>]",
            "button-classes": "tc-text-editor-toolbar-item-start-group",
            "shortcuts": "((preview))",
            "text": "<$reveal state=\"$:/state/showeditpreview\" type=\"match\" text=\"yes\" tag=\"span\">\n{{$:/core/images/preview-open}}\n<$action-setfield $tiddler=\"$:/state/showeditpreview\" $value=\"no\"/>\n</$reveal>\n<$reveal state=\"$:/state/showeditpreview\" type=\"nomatch\" text=\"yes\" tag=\"span\">\n{{$:/core/images/preview-closed}}\n<$action-setfield $tiddler=\"$:/state/showeditpreview\" $value=\"yes\"/>\n</$reveal>\n"
        },
        "$:/core/ui/EditorToolbar/quote": {
            "title": "$:/core/ui/EditorToolbar/quote",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/quote",
            "caption": "{{$:/language/Buttons/Quote/Caption}}",
            "description": "{{$:/language/Buttons/Quote/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((quote))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-lines\"\n\tprefix=\"\n<<<\"\n\tsuffix=\"<<<\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/size-dropdown": {
            "title": "$:/core/ui/EditorToolbar/size-dropdown",
            "text": "\\define lingo-base() $:/language/Buttons/Size/\n\n\\define toolbar-button-size-preset(config-title)\n<$set name=\"width\" filter=\"$(sizePair)$ +[first[]]\">\n\n<$set name=\"height\" filter=\"$(sizePair)$ +[last[]]\">\n\n<$button tag=\"a\">\n\n<$action-setfield\n\t$tiddler=\"\"\"$config-title$/new-width\"\"\"\n\t$value=<<width>>\n/>\n\n<$action-setfield\n\t$tiddler=\"\"\"$config-title$/new-height\"\"\"\n\t$value=<<height>>\n/>\n\n<$action-deletetiddler\n\t$tiddler=\"\"\"$config-title$/presets-popup\"\"\"\n/>\n\n<$text text=<<width>>/> &times; <$text text=<<height>>/>\n\n</$button>\n\n</$set>\n\n</$set>\n\\end\n\n\\define toolbar-button-size(config-title)\n''{{$:/language/Buttons/Size/Hint}}''\n\n<<lingo Caption/Width>> <$edit-text tag=\"input\" tiddler=\"\"\"$config-title$/new-width\"\"\" default=<<tv-bitmap-editor-width>> focus=\"true\" size=\"8\"/> <<lingo Caption/Height>> <$edit-text tag=\"input\" tiddler=\"\"\"$config-title$/new-height\"\"\" default=<<tv-bitmap-editor-height>> size=\"8\"/> <$button popup=\"\"\"$config-title$/presets-popup\"\"\" class=\"tc-btn-invisible tc-popup-keep\" style=\"width: auto; display: inline-block; background-colour: inherit;\" selectedClass=\"tc-selected\">\n{{$:/core/images/down-arrow}}\n</$button>\n\n<$reveal tag=\"span\" state=\"\"\"$config-title$/presets-popup\"\"\" type=\"popup\" position=\"belowleft\" animate=\"yes\">\n\n<div class=\"tc-drop-down tc-popup-keep\">\n\n<$list filter={{$:/config/BitmapEditor/ImageSizes}} variable=\"sizePair\">\n\n<$macrocall $name=\"toolbar-button-size-preset\" config-title=\"$config-title$\"/>\n\n</$list>\n\n</div>\n\n</$reveal>\n\n<$button>\n<$action-sendmessage\n\t$message=\"tm-edit-bitmap-operation\"\n\t$param=\"resize\"\n\twidth={{$config-title$/new-width}}\n\theight={{$config-title$/new-height}}\n/>\n<$action-deletetiddler\n\t$tiddler=\"\"\"$config-title$/new-width\"\"\"\n/>\n<$action-deletetiddler\n\t$tiddler=\"\"\"$config-title$/new-height\"\"\"\n/>\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n<<lingo Caption/Resize>>\n</$button>\n\\end\n\n<$macrocall $name=\"toolbar-button-size\" config-title=<<qualify \"$:/state/Size/\">>/>\n"
        },
        "$:/core/ui/EditorToolbar/size": {
            "title": "$:/core/ui/EditorToolbar/size",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/size",
            "caption": "{{$:/language/Buttons/Size/Caption}}",
            "description": "{{$:/language/Buttons/Size/Hint}}",
            "condition": "[<targetTiddler>is[image]]",
            "dropdown": "$:/core/ui/EditorToolbar/size-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/stamp-dropdown": {
            "title": "$:/core/ui/EditorToolbar/stamp-dropdown",
            "text": "\\define toolbar-button-stamp-inner()\n<$button tag=\"a\">\n\n<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"replace-selection\"\n\ttext={{$(snippetTitle)$}}\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<$view tiddler=<<snippetTitle>> field=\"caption\" mode=\"inline\">\n\n<$view tiddler=<<snippetTitle>> field=\"title\" mode=\"inline\"/>\n\n</$view>\n\n</$button>\n\\end\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TextEditor/Snippet]!has[draft.of]sort[caption]]\" variable=\"snippetTitle\">\n\n<<toolbar-button-stamp-inner>>\n\n</$list>\n\n----\n\n<$button tag=\"a\">\n\n<$action-sendmessage\n\t$message=\"tm-new-tiddler\"\n\ttags=\"$:/tags/TextEditor/Snippet\"\n\tcaption={{$:/language/Buttons/Stamp/New/Title}}\n\ttext={{$:/language/Buttons/Stamp/New/Text}}\n/>\n\n<$action-deletetiddler\n\t$tiddler=<<dropdown-state>>\n/>\n\n<em>\n\n<$text text={{$:/language/Buttons/Stamp/Caption/New}}/>\n\n</em>\n\n</$button>\n"
        },
        "$:/core/ui/EditorToolbar/stamp": {
            "title": "$:/core/ui/EditorToolbar/stamp",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/stamp",
            "caption": "{{$:/language/Buttons/Stamp/Caption}}",
            "description": "{{$:/language/Buttons/Stamp/Hint}}",
            "condition": "[<targetTiddler>!is[image]]",
            "shortcuts": "((stamp))",
            "dropdown": "$:/core/ui/EditorToolbar/stamp-dropdown",
            "text": ""
        },
        "$:/core/ui/EditorToolbar/strikethrough": {
            "title": "$:/core/ui/EditorToolbar/strikethrough",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/strikethrough",
            "caption": "{{$:/language/Buttons/Strikethrough/Caption}}",
            "description": "{{$:/language/Buttons/Strikethrough/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((strikethrough))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"~~\"\n\tsuffix=\"~~\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/subscript": {
            "title": "$:/core/ui/EditorToolbar/subscript",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/subscript",
            "caption": "{{$:/language/Buttons/Subscript/Caption}}",
            "description": "{{$:/language/Buttons/Subscript/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((subscript))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\",,\"\n\tsuffix=\",,\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/superscript": {
            "title": "$:/core/ui/EditorToolbar/superscript",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/superscript",
            "caption": "{{$:/language/Buttons/Superscript/Caption}}",
            "description": "{{$:/language/Buttons/Superscript/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((superscript))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"^^\"\n\tsuffix=\"^^\"\n/>\n"
        },
        "$:/core/ui/EditorToolbar/underline": {
            "title": "$:/core/ui/EditorToolbar/underline",
            "tags": "$:/tags/EditorToolbar",
            "icon": "$:/core/images/underline",
            "caption": "{{$:/language/Buttons/Underline/Caption}}",
            "description": "{{$:/language/Buttons/Underline/Hint}}",
            "condition": "[<targetTiddler>!has[type]] [<targetTiddler>type[text/vnd.tiddlywiki]]",
            "shortcuts": "((underline))",
            "text": "<$action-sendmessage\n\t$message=\"tm-edit-text-operation\"\n\t$param=\"wrap-selection\"\n\tprefix=\"__\"\n\tsuffix=\"__\"\n/>\n"
        },
        "$:/core/ui/EditTemplate/body/editor": {
            "title": "$:/core/ui/EditTemplate/body/editor",
            "text": "<$edit\n\n  field=\"text\"\n  class=\"tc-edit-texteditor\"\n  placeholder={{$:/language/EditTemplate/Body/Placeholder}}\n\n><$set\n\n  name=\"targetTiddler\"\n  value=<<currentTiddler>>\n\n><$list\n\n  filter=\"[all[shadows+tiddlers]tag[$:/tags/EditorToolbar]!has[draft.of]]\"\n\n><$reveal\n\n  type=\"nomatch\"\n  state=<<config-visibility-title>>\n  text=\"hide\"\n  class=\"tc-text-editor-toolbar-item-wrapper\"\n\n><$transclude\n\n  tiddler=\"$:/core/ui/EditTemplate/body/toolbar/button\"\n  mode=\"inline\"\n\n/></$reveal></$list></$set></$edit>\n"
        },
        "$:/core/ui/EditTemplate/body/toolbar/button": {
            "title": "$:/core/ui/EditTemplate/body/toolbar/button",
            "text": "\\define toolbar-button-icon()\n<$list\n\n  filter=\"[all[current]!has[custom-icon]]\"\n  variable=\"no-custom-icon\"\n\n><$transclude\n\n  tiddler={{!!icon}}\n\n/></$list>\n\\end\n\n\\define toolbar-button-tooltip()\n{{!!description}}<$macrocall $name=\"displayshortcuts\" $output=\"text/plain\" shortcuts={{!!shortcuts}} prefix=\"` - [\" separator=\"] [\" suffix=\"]`\"/>\n\\end\n\n\\define toolbar-button()\n<$list\n\n  filter={{!!condition}}\n  variable=\"list-condition\"\n\n><$wikify\n\n  name=\"tooltip-text\"\n  text=<<toolbar-button-tooltip>>\n  mode=\"inline\"\n  output=\"text\"\n\n><$list\n\n  filter=\"[all[current]!has[dropdown]]\"\n  variable=\"no-dropdown\"\n\n><$button\n\n  class=\"tc-btn-invisible $(buttonClasses)$\"\n  tooltip=<<tooltip-text>>\n\n><span\n\n  data-tw-keyboard-shortcut={{!!shortcuts}}\n\n/><<toolbar-button-icon>><$transclude\n\n  tiddler=<<currentTiddler>>\n  field=\"text\"\n\n/></$button></$list><$list\n\n  filter=\"[all[current]has[dropdown]]\"\n  variable=\"dropdown\"\n\n><$set\n\n  name=\"dropdown-state\"\n  value=<<qualify \"$:/state/EditorToolbarDropdown\">>\n\n><$button\n\n  popup=<<dropdown-state>>\n  class=\"tc-popup-keep tc-btn-invisible $(buttonClasses)$\"\n  selectedClass=\"tc-selected\"\n  tooltip=<<tooltip-text>>\n\n><span\n\n  data-tw-keyboard-shortcut={{!!shortcuts}}\n\n/><<toolbar-button-icon>><$transclude\n\n  tiddler=<<currentTiddler>>\n  field=\"text\"\n\n/></$button><$reveal\n\n  state=<<dropdown-state>>\n  type=\"popup\"\n  position=\"below\"\n  animate=\"yes\"\n  tag=\"span\"\n\n><div\n\n  class=\"tc-drop-down tc-popup-keep\"\n\n><$transclude\n\n  tiddler={{!!dropdown}}\n  mode=\"block\"\n\n/></div></$reveal></$set></$list></$wikify></$list>\n\\end\n\n\\define toolbar-button-outer()\n<$set\n\n  name=\"buttonClasses\"\n  value={{!!button-classes}}\n\n><<toolbar-button>></$set>\n\\end\n\n<<toolbar-button-outer>>"
        },
        "$:/core/ui/EditTemplate/body": {
            "title": "$:/core/ui/EditTemplate/body",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/Body/\n\\define config-visibility-title()\n$:/config/EditorToolbarButtons/Visibility/$(currentTiddler)$\n\\end\n<$list filter=\"[is[current]has[_canonical_uri]]\">\n\n<div class=\"tc-message-box\">\n\n<<lingo External/Hint>>\n\n<a href={{!!_canonical_uri}}><$text text={{!!_canonical_uri}}/></a>\n\n<$edit-text field=\"_canonical_uri\" class=\"tc-edit-fields\"></$edit-text>\n\n</div>\n\n</$list>\n\n<$list filter=\"[is[current]!has[_canonical_uri]]\">\n\n<$reveal state=\"$:/state/showeditpreview\" type=\"match\" text=\"yes\">\n\n<div class=\"tc-tiddler-preview\">\n\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/editor\" mode=\"inline\"/>\n\n<div class=\"tc-tiddler-preview-preview\">\n\n<$transclude tiddler={{$:/state/editpreviewtype}} mode=\"inline\">\n\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/preview/output\" mode=\"inline\"/>\n\n</$transclude>\n\n</div>\n\n</div>\n\n</$reveal>\n\n<$reveal state=\"$:/state/showeditpreview\" type=\"nomatch\" text=\"yes\">\n\n<$transclude tiddler=\"$:/core/ui/EditTemplate/body/editor\" mode=\"inline\"/>\n\n</$reveal>\n\n</$list>\n"
        },
        "$:/core/ui/EditTemplate/controls": {
            "title": "$:/core/ui/EditTemplate/controls",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define config-title()\n$:/config/EditToolbarButtons/Visibility/$(listItem)$\n\\end\n<div class=\"tc-tiddler-title tc-tiddler-edit-title\">\n<$view field=\"title\"/>\n<span class=\"tc-tiddler-controls tc-titlebar\"><$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditToolbar]!has[draft.of]]\" variable=\"listItem\"><$reveal type=\"nomatch\" state=<<config-title>> text=\"hide\"><$transclude tiddler=<<listItem>>/></$reveal></$list></span>\n<div style=\"clear: both;\"></div>\n</div>\n"
        },
        "$:/core/ui/EditTemplate/fields": {
            "title": "$:/core/ui/EditTemplate/fields",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/\n\\define config-title()\n$:/config/EditTemplateFields/Visibility/$(currentField)$\n\\end\n\n\\define config-filter()\n[[hide]] -[title{$(config-title)$}]\n\\end\n\n\\define new-field-inner()\n<$reveal type=\"nomatch\" text=\"\" default=<<name>>>\n<$button>\n<$action-sendmessage $message=\"tm-add-field\" $name=<<name>> $value=<<value>>/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldname\"/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldvalue\"/>\n<<lingo Fields/Add/Button>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" text=\"\" default=<<name>>>\n<$button>\n<<lingo Fields/Add/Button>>\n</$button>\n</$reveal>\n\\end\n\n\\define new-field()\n<$set name=\"name\" value={{$:/temp/newfieldname}}>\n<$set name=\"value\" value={{$:/temp/newfieldvalue}}>\n<<new-field-inner>>\n</$set>\n</$set>\n\\end\n\n<div class=\"tc-edit-fields\">\n<table class=\"tc-edit-fields\">\n<tbody>\n<$list filter=\"[all[current]fields[]] +[sort[title]]\" variable=\"currentField\">\n<$list filter=<<config-filter>> variable=\"temp\">\n<tr class=\"tc-edit-field\">\n<td class=\"tc-edit-field-name\">\n<$text text=<<currentField>>/>:</td>\n<td class=\"tc-edit-field-value\">\n<$edit-text tiddler=<<currentTiddler>> field=<<currentField>> placeholder={{$:/language/EditTemplate/Fields/Add/Value/Placeholder}}/>\n</td>\n<td class=\"tc-edit-field-remove\">\n<$button class=\"tc-btn-invisible\" tooltip={{$:/language/EditTemplate/Field/Remove/Hint}} aria-label={{$:/language/EditTemplate/Field/Remove/Caption}}>\n<$action-deletefield $field=<<currentField>>/>\n{{$:/core/images/delete-button}}\n</$button>\n</td>\n</tr>\n</$list>\n</$list>\n</tbody>\n</table>\n</div>\n\n<$fieldmangler>\n<div class=\"tc-edit-field-add\">\n<em class=\"tc-edit\">\n<<lingo Fields/Add/Prompt>>\n</em>\n<span class=\"tc-edit-field-add-name\">\n<$edit-text tiddler=\"$:/temp/newfieldname\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Fields/Add/Name/Placeholder}} focusPopup=<<qualify \"$:/state/popup/field-dropdown\">> class=\"tc-edit-texteditor tc-popup-handle\"/>\n</span>\n<$button popup=<<qualify \"$:/state/popup/field-dropdown\">> class=\"tc-btn-invisible tc-btn-dropdown\" tooltip={{$:/language/EditTemplate/Field/Dropdown/Hint}} aria-label={{$:/language/EditTemplate/Field/Dropdown/Caption}}>{{$:/core/images/down-arrow}}</$button>\n<$reveal state=<<qualify \"$:/state/popup/field-dropdown\">> type=\"nomatch\" text=\"\" default=\"\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<$linkcatcher to=\"$:/temp/newfieldname\">\n<div class=\"tc-dropdown-item\">\n<<lingo Fields/Add/Dropdown/User>>\n</div>\n<$list filter=\"[!is[shadow]!is[system]fields[]sort[]] -created -creator -draft.of -draft.title -modified -modifier -tags -text -title -type\"  variable=\"currentField\">\n<$link to=<<currentField>>>\n<<currentField>>\n</$link>\n</$list>\n<div class=\"tc-dropdown-item\">\n<<lingo Fields/Add/Dropdown/System>>\n</div>\n<$list filter=\"[fields[]sort[]] -[!is[shadow]!is[system]fields[]]\" variable=\"currentField\">\n<$link to=<<currentField>>>\n<<currentField>>\n</$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>\n<span class=\"tc-edit-field-add-value\">\n<$edit-text tiddler=\"$:/temp/newfieldvalue\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Fields/Add/Value/Placeholder}} class=\"tc-edit-texteditor\"/>\n</span>\n<span class=\"tc-edit-field-add-button\">\n<$macrocall $name=\"new-field\"/>\n</span>\n</div>\n</$fieldmangler>\n\n"
        },
        "$:/core/ui/EditTemplate/body/preview/output": {
            "title": "$:/core/ui/EditTemplate/body/preview/output",
            "tags": "$:/tags/EditPreview",
            "caption": "{{$:/language/EditTemplate/Body/Preview/Type/Output}}",
            "text": "<$set name=\"tv-tiddler-preview\" value=\"yes\">\n\n<$transclude />\n\n</$set>\n"
        },
        "$:/core/ui/EditTemplate/shadow": {
            "title": "$:/core/ui/EditTemplate/shadow",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/Shadow/\n\\define pluginLinkBody()\n<$link to=\"\"\"$(pluginTitle)$\"\"\">\n<$text text=\"\"\"$(pluginTitle)$\"\"\"/>\n</$link>\n\\end\n<$list filter=\"[all[current]get[draft.of]is[shadow]!is[tiddler]]\">\n\n<$list filter=\"[all[current]shadowsource[]]\" variable=\"pluginTitle\">\n\n<$set name=\"pluginLink\" value=<<pluginLinkBody>>>\n<div class=\"tc-message-box\">\n\n<<lingo Warning>>\n\n</div>\n</$set>\n</$list>\n\n</$list>\n\n<$list filter=\"[all[current]get[draft.of]is[shadow]is[tiddler]]\">\n\n<$list filter=\"[all[current]shadowsource[]]\" variable=\"pluginTitle\">\n\n<$set name=\"pluginLink\" value=<<pluginLinkBody>>>\n<div class=\"tc-message-box\">\n\n<<lingo OverriddenWarning>>\n\n</div>\n</$set>\n</$list>\n\n</$list>"
        },
        "$:/core/ui/EditTemplate/tags": {
            "title": "$:/core/ui/EditTemplate/tags",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/\n\\define tag-styles()\nbackground-color:$(backgroundColor)$;\nfill:$(foregroundColor)$;\ncolor:$(foregroundColor)$;\n\\end\n\\define tag-body-inner(colour,fallbackTarget,colourA,colourB)\n<$vars foregroundColor=<<contrastcolour target:\"\"\"$colour$\"\"\" fallbackTarget:\"\"\"$fallbackTarget$\"\"\" colourA:\"\"\"$colourA$\"\"\" colourB:\"\"\"$colourB$\"\"\">> backgroundColor=\"\"\"$colour$\"\"\">\n<span style=<<tag-styles>> class=\"tc-tag-label\">\n<$view field=\"title\" format=\"text\" />\n<$button message=\"tm-remove-tag\" param={{!!title}} class=\"tc-btn-invisible tc-remove-tag-button\">&times;</$button>\n</span>\n</$vars>\n\\end\n\\define tag-body(colour,palette)\n<$macrocall $name=\"tag-body-inner\" colour=\"\"\"$colour$\"\"\" fallbackTarget={{$palette$##tag-background}} colourA={{$palette$##foreground}} colourB={{$palette$##background}}/>\n\\end\n<div class=\"tc-edit-tags\">\n<$fieldmangler>\n<$list filter=\"[all[current]tags[]sort[title]]\" storyview=\"pop\">\n<$macrocall $name=\"tag-body\" colour={{!!color}} palette={{$:/palette}}/>\n</$list>\n\n<div class=\"tc-edit-add-tag\">\n<span class=\"tc-add-tag-name\">\n<$edit-text tiddler=\"$:/temp/NewTagName\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Tags/Add/Placeholder}} focusPopup=<<qualify \"$:/state/popup/tags-auto-complete\">> class=\"tc-edit-texteditor tc-popup-handle\"/>\n</span> <$button popup=<<qualify \"$:/state/popup/tags-auto-complete\">> class=\"tc-btn-invisible tc-btn-dropdown\" tooltip={{$:/language/EditTemplate/Tags/Dropdown/Hint}} aria-label={{$:/language/EditTemplate/Tags/Dropdown/Caption}}>{{$:/core/images/down-arrow}}</$button> <span class=\"tc-add-tag-button\">\n<$button message=\"tm-add-tag\" param={{$:/temp/NewTagName}} set=\"$:/temp/NewTagName\" setTo=\"\" class=\"\">\n<<lingo Tags/Add/Button>>\n</$button>\n</span>\n</div>\n\n<div class=\"tc-block-dropdown-wrapper\">\n<$reveal state=<<qualify \"$:/state/popup/tags-auto-complete\">> type=\"nomatch\" text=\"\" default=\"\">\n<div class=\"tc-block-dropdown\">\n<$linkcatcher set=\"$:/temp/NewTagName\" setTo=\"\" message=\"tm-add-tag\">\n<$list filter=\"[tags[]!is[system]search:title{$:/temp/NewTagName}sort[]]\">\n{{||$:/core/ui/Components/tag-link}}\n</$list>\n<hr>\n<$list filter=\"[tags[]is[system]search:title{$:/temp/NewTagName}sort[]]\">\n{{||$:/core/ui/Components/tag-link}}\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>\n</div>\n</$fieldmangler>\n</div>"
        },
        "$:/core/ui/EditTemplate/title": {
            "title": "$:/core/ui/EditTemplate/title",
            "tags": "$:/tags/EditTemplate",
            "text": "<$vars pattern=\"\"\"[\\|\\[\\]{}]\"\"\" bad-chars=\"\"\"`| [ ] { }`\"\"\">\n\n<$list filter=\"[is[current]regexp:draft.title<pattern>]\" variable=\"listItem\">\n\n<div class=\"tc-message-box\">\n\n{{$:/language/EditTemplate/Title/BadCharacterWarning}}\n\n</div>\n\n</$list>\n\n</$vars>\n\n<$edit-text field=\"draft.title\" class=\"tc-titlebar tc-edit-texteditor\" focus=\"true\"/>\n"
        },
        "$:/core/ui/EditTemplate/type": {
            "title": "$:/core/ui/EditTemplate/type",
            "tags": "$:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/EditTemplate/\n<div class=\"tc-type-selector\"><$fieldmangler>\n<em class=\"tc-edit\"><<lingo Type/Prompt>></em> <$edit-text field=\"type\" tag=\"input\" default=\"\" placeholder={{$:/language/EditTemplate/Type/Placeholder}} focusPopup=<<qualify \"$:/state/popup/type-dropdown\">> class=\"tc-edit-typeeditor tc-popup-handle\"/> <$button popup=<<qualify \"$:/state/popup/type-dropdown\">> class=\"tc-btn-invisible tc-btn-dropdown\" tooltip={{$:/language/EditTemplate/Type/Dropdown/Hint}} aria-label={{$:/language/EditTemplate/Type/Dropdown/Caption}}>{{$:/core/images/down-arrow}}</$button> <$button message=\"tm-remove-field\" param=\"type\" class=\"tc-btn-invisible tc-btn-icon\" tooltip={{$:/language/EditTemplate/Type/Delete/Hint}} aria-label={{$:/language/EditTemplate/Type/Delete/Caption}}>{{$:/core/images/delete-button}}</$button>\n</$fieldmangler></div>\n\n<div class=\"tc-block-dropdown-wrapper\">\n<$reveal state=<<qualify \"$:/state/popup/type-dropdown\">> type=\"nomatch\" text=\"\" default=\"\">\n<div class=\"tc-block-dropdown tc-edit-type-dropdown\">\n<$linkcatcher to=\"!!type\">\n<$list filter='[all[shadows+tiddlers]prefix[$:/language/Docs/Types/]each[group]sort[group]]'>\n<div class=\"tc-dropdown-item\">\n<$text text={{!!group}}/>\n</div>\n<$list filter=\"[all[shadows+tiddlers]prefix[$:/language/Docs/Types/]group{!!group}] +[sort[description]]\"><$link to={{!!name}}><$view field=\"description\"/> (<$view field=\"name\"/>)</$link>\n</$list>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>\n</div>"
        },
        "$:/core/ui/EditTemplate": {
            "title": "$:/core/ui/EditTemplate",
            "text": "\\define frame-classes()\ntc-tiddler-frame tc-tiddler-edit-frame $(missingTiddlerClass)$ $(shadowTiddlerClass)$ $(systemTiddlerClass)$\n\\end\n<div class=<<frame-classes>>>\n<$set name=\"storyTiddler\" value=<<currentTiddler>>>\n<$keyboard key=\"((cancel-edit-tiddler))\" message=\"tm-cancel-tiddler\">\n<$keyboard key=\"((save-tiddler))\" message=\"tm-save-tiddler\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/EditTemplate]!has[draft.of]]\" variable=\"listItem\">\n<$transclude tiddler=<<listItem>>/>\n</$list>\n</$keyboard>\n</$keyboard>\n</$set>\n</div>\n"
        },
        "$:/core/ui/Buttons/cancel": {
            "title": "$:/core/ui/Buttons/cancel",
            "tags": "$:/tags/EditToolbar",
            "caption": "{{$:/core/images/cancel-button}} {{$:/language/Buttons/Cancel/Caption}}",
            "description": "{{$:/language/Buttons/Cancel/Hint}}",
            "text": "<$button message=\"tm-cancel-tiddler\" tooltip={{$:/language/Buttons/Cancel/Hint}} aria-label={{$:/language/Buttons/Cancel/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/cancel-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Cancel/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/delete": {
            "title": "$:/core/ui/Buttons/delete",
            "tags": "$:/tags/EditToolbar $:/tags/ViewToolbar",
            "caption": "{{$:/core/images/delete-button}} {{$:/language/Buttons/Delete/Caption}}",
            "description": "{{$:/language/Buttons/Delete/Hint}}",
            "text": "<$button message=\"tm-delete-tiddler\" tooltip={{$:/language/Buttons/Delete/Hint}} aria-label={{$:/language/Buttons/Delete/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/delete-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Delete/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/save": {
            "title": "$:/core/ui/Buttons/save",
            "tags": "$:/tags/EditToolbar",
            "caption": "{{$:/core/images/done-button}} {{$:/language/Buttons/Save/Caption}}",
            "description": "{{$:/language/Buttons/Save/Hint}}",
            "text": "<$fieldmangler><$button tooltip={{$:/language/Buttons/Save/Hint}} aria-label={{$:/language/Buttons/Save/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-add-tag\" $param={{$:/temp/NewTagName}}/>\n<$action-deletetiddler $tiddler=\"$:/temp/NewTagName\"/>\n<$action-sendmessage $message=\"tm-add-field\" $name={{$:/temp/newfieldname}} $value={{$:/temp/newfieldvalue}}/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldname\"/>\n<$action-deletetiddler $tiddler=\"$:/temp/newfieldvalue\"/>\n<$action-sendmessage $message=\"tm-save-tiddler\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/done-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Save/Caption}}/></span>\n</$list>\n</$button>\n</$fieldmangler>\n"
        },
        "$:/core/Filters/AllTags": {
            "title": "$:/core/Filters/AllTags",
            "tags": "$:/tags/Filter",
            "filter": "[tags[]!is[system]sort[title]]",
            "description": "{{$:/language/Filters/AllTags}}",
            "text": ""
        },
        "$:/core/Filters/AllTiddlers": {
            "title": "$:/core/Filters/AllTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[!is[system]sort[title]]",
            "description": "{{$:/language/Filters/AllTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/Drafts": {
            "title": "$:/core/Filters/Drafts",
            "tags": "$:/tags/Filter",
            "filter": "[has[draft.of]sort[title]]",
            "description": "{{$:/language/Filters/Drafts}}",
            "text": ""
        },
        "$:/core/Filters/Missing": {
            "title": "$:/core/Filters/Missing",
            "tags": "$:/tags/Filter",
            "filter": "[all[missing]sort[title]]",
            "description": "{{$:/language/Filters/Missing}}",
            "text": ""
        },
        "$:/core/Filters/Orphans": {
            "title": "$:/core/Filters/Orphans",
            "tags": "$:/tags/Filter",
            "filter": "[all[orphans]sort[title]]",
            "description": "{{$:/language/Filters/Orphans}}",
            "text": ""
        },
        "$:/core/Filters/OverriddenShadowTiddlers": {
            "title": "$:/core/Filters/OverriddenShadowTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[is[shadow]]",
            "description": "{{$:/language/Filters/OverriddenShadowTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/RecentSystemTiddlers": {
            "title": "$:/core/Filters/RecentSystemTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[has[modified]!sort[modified]limit[50]]",
            "description": "{{$:/language/Filters/RecentSystemTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/RecentTiddlers": {
            "title": "$:/core/Filters/RecentTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[!is[system]has[modified]!sort[modified]limit[50]]",
            "description": "{{$:/language/Filters/RecentTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/ShadowTiddlers": {
            "title": "$:/core/Filters/ShadowTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[all[shadows]sort[title]]",
            "description": "{{$:/language/Filters/ShadowTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/SystemTags": {
            "title": "$:/core/Filters/SystemTags",
            "tags": "$:/tags/Filter",
            "filter": "[all[shadows+tiddlers]tags[]is[system]sort[title]]",
            "description": "{{$:/language/Filters/SystemTags}}",
            "text": ""
        },
        "$:/core/Filters/SystemTiddlers": {
            "title": "$:/core/Filters/SystemTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[is[system]sort[title]]",
            "description": "{{$:/language/Filters/SystemTiddlers}}",
            "text": ""
        },
        "$:/core/Filters/TypedTiddlers": {
            "title": "$:/core/Filters/TypedTiddlers",
            "tags": "$:/tags/Filter",
            "filter": "[!is[system]has[type]each[type]sort[type]] -[type[text/vnd.tiddlywiki]]",
            "description": "{{$:/language/Filters/TypedTiddlers}}",
            "text": ""
        },
        "$:/core/ui/ImportListing": {
            "title": "$:/core/ui/ImportListing",
            "text": "\\define lingo-base() $:/language/Import/\n\\define messageField()\nmessage-$(payloadTiddler)$\n\\end\n\\define selectionField()\nselection-$(payloadTiddler)$\n\\end\n\\define previewPopupState()\n$(currentTiddler)$!!popup-$(payloadTiddler)$\n\\end\n<table>\n<tbody>\n<tr>\n<th>\n<<lingo Listing/Select/Caption>>\n</th>\n<th>\n<<lingo Listing/Title/Caption>>\n</th>\n<th>\n<<lingo Listing/Status/Caption>>\n</th>\n</tr>\n<$list filter=\"[all[current]plugintiddlers[]sort[title]]\" variable=\"payloadTiddler\">\n<tr>\n<td>\n<$checkbox field=<<selectionField>> checked=\"checked\" unchecked=\"unchecked\" default=\"checked\"/>\n</td>\n<td>\n<$reveal type=\"nomatch\" state=<<previewPopupState>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<previewPopupState>> setTo=\"yes\">\n{{$:/core/images/right-arrow}}&nbsp;<$text text=<<payloadTiddler>>/>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<previewPopupState>> text=\"yes\">\n<$button class=\"tc-btn-invisible tc-btn-dropdown\" set=<<previewPopupState>> setTo=\"no\">\n{{$:/core/images/down-arrow}}&nbsp;<$text text=<<payloadTiddler>>/>\n</$button>\n</$reveal>\n</td>\n<td>\n<$view field=<<messageField>>/>\n</td>\n</tr>\n<tr>\n<td colspan=\"3\">\n<$reveal type=\"match\" text=\"yes\" state=<<previewPopupState>>>\n<$transclude subtiddler=<<payloadTiddler>> mode=\"block\"/>\n</$reveal>\n</td>\n</tr>\n</$list>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/ListItemTemplate": {
            "title": "$:/core/ui/ListItemTemplate",
            "text": "<div class=\"tc-menu-list-item\">\n<$link to={{!!title}}>\n<$view field=\"title\"/>\n</$link>\n</div>"
        },
        "$:/core/ui/MissingTemplate": {
            "title": "$:/core/ui/MissingTemplate",
            "text": "<div class=\"tc-tiddler-missing\">\n<$button popup=<<qualify \"$:/state/popup/missing\">> class=\"tc-btn-invisible tc-missing-tiddler-label\">\n<$view field=\"title\" format=\"text\" />\n</$button>\n<$reveal state=<<qualify \"$:/state/popup/missing\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$transclude tiddler=\"$:/core/ui/ListItemTemplate\"/>\n<hr>\n<$list filter=\"[all[current]backlinks[]sort[title]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n</div>\n</$reveal>\n</div>\n"
        },
        "$:/core/ui/MoreSideBar/All": {
            "title": "$:/core/ui/MoreSideBar/All",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/All/Caption}}",
            "text": "<$list filter={{$:/core/Filters/AllTiddlers!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Drafts": {
            "title": "$:/core/ui/MoreSideBar/Drafts",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Drafts/Caption}}",
            "text": "<$list filter={{$:/core/Filters/Drafts!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Missing": {
            "title": "$:/core/ui/MoreSideBar/Missing",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Missing/Caption}}",
            "text": "<$list filter={{$:/core/Filters/Missing!!filter}} template=\"$:/core/ui/MissingTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Orphans": {
            "title": "$:/core/ui/MoreSideBar/Orphans",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Orphans/Caption}}",
            "text": "<$list filter={{$:/core/Filters/Orphans!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Recent": {
            "title": "$:/core/ui/MoreSideBar/Recent",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Recent/Caption}}",
            "text": "<$macrocall $name=\"timeline\" format={{$:/language/RecentChanges/DateFormat}}/>\n"
        },
        "$:/core/ui/MoreSideBar/Shadows": {
            "title": "$:/core/ui/MoreSideBar/Shadows",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Shadows/Caption}}",
            "text": "<$list filter={{$:/core/Filters/ShadowTiddlers!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/System": {
            "title": "$:/core/ui/MoreSideBar/System",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/System/Caption}}",
            "text": "<$list filter={{$:/core/Filters/SystemTiddlers!!filter}} template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/MoreSideBar/Tags": {
            "title": "$:/core/ui/MoreSideBar/Tags",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Tags/Caption}}",
            "text": "<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n{{$:/core/ui/Buttons/tag-manager}}\n\n</$set>\n\n</$set>\n\n</$set>\n\n<$list filter={{$:/core/Filters/AllTags!!filter}}>\n\n<$transclude tiddler=\"$:/core/ui/TagTemplate\"/>\n\n</$list>\n\n<hr class=\"tc-untagged-separator\">\n\n{{$:/core/ui/UntaggedTemplate}}\n"
        },
        "$:/core/ui/MoreSideBar/Types": {
            "title": "$:/core/ui/MoreSideBar/Types",
            "tags": "$:/tags/MoreSideBar",
            "caption": "{{$:/language/SideBar/Types/Caption}}",
            "text": "<$list filter={{$:/core/Filters/TypedTiddlers!!filter}}>\n<div class=\"tc-menu-list-item\">\n<$view field=\"type\"/>\n<$list filter=\"[type{!!type}!is[system]sort[title]]\">\n<div class=\"tc-menu-list-subitem\">\n<$link to={{!!title}}><$view field=\"title\"/></$link>\n</div>\n</$list>\n</div>\n</$list>\n"
        },
        "$:/core/ui/Buttons/advanced-search": {
            "title": "$:/core/ui/Buttons/advanced-search",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/advanced-search-button}} {{$:/language/Buttons/AdvancedSearch/Caption}}",
            "description": "{{$:/language/Buttons/AdvancedSearch/Hint}}",
            "text": "\\define control-panel-button(class)\n<$button to=\"$:/AdvancedSearch\" tooltip={{$:/language/Buttons/AdvancedSearch/Hint}} aria-label={{$:/language/Buttons/AdvancedSearch/Caption}} class=\"\"\"$(tv-config-toolbar-class)$ $class$\"\"\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/advanced-search-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/AdvancedSearch/Caption}}/></span>\n</$list>\n</$button>\n\\end\n\n<$list filter=\"[list[$:/StoryList]] +[field:title[$:/AdvancedSearch]]\" emptyMessage=<<control-panel-button>>>\n<<control-panel-button \"tc-selected\">>\n</$list>\n"
        },
        "$:/core/ui/Buttons/close-all": {
            "title": "$:/core/ui/Buttons/close-all",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/close-all-button}} {{$:/language/Buttons/CloseAll/Caption}}",
            "description": "{{$:/language/Buttons/CloseAll/Hint}}",
            "text": "<$button message=\"tm-close-all-tiddlers\" tooltip={{$:/language/Buttons/CloseAll/Hint}} aria-label={{$:/language/Buttons/CloseAll/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/close-all-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/CloseAll/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/control-panel": {
            "title": "$:/core/ui/Buttons/control-panel",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/options-button}} {{$:/language/Buttons/ControlPanel/Caption}}",
            "description": "{{$:/language/Buttons/ControlPanel/Hint}}",
            "text": "\\define control-panel-button(class)\n<$button to=\"$:/ControlPanel\" tooltip={{$:/language/Buttons/ControlPanel/Hint}} aria-label={{$:/language/Buttons/ControlPanel/Caption}} class=\"\"\"$(tv-config-toolbar-class)$ $class$\"\"\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/options-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/ControlPanel/Caption}}/></span>\n</$list>\n</$button>\n\\end\n\n<$list filter=\"[list[$:/StoryList]] +[field:title[$:/ControlPanel]]\" emptyMessage=<<control-panel-button>>>\n<<control-panel-button \"tc-selected\">>\n</$list>\n"
        },
        "$:/core/ui/Buttons/encryption": {
            "title": "$:/core/ui/Buttons/encryption",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/locked-padlock}} {{$:/language/Buttons/Encryption/Caption}}",
            "description": "{{$:/language/Buttons/Encryption/Hint}}",
            "text": "<$reveal type=\"match\" state=\"$:/isEncrypted\" text=\"yes\">\n<$button message=\"tm-clear-password\" tooltip={{$:/language/Buttons/Encryption/ClearPassword/Hint}} aria-label={{$:/language/Buttons/Encryption/ClearPassword/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/locked-padlock}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Encryption/ClearPassword/Caption}}/></span>\n</$list>\n</$button>\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/isEncrypted\" text=\"yes\">\n<$button message=\"tm-set-password\" tooltip={{$:/language/Buttons/Encryption/SetPassword/Hint}} aria-label={{$:/language/Buttons/Encryption/SetPassword/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/unlocked-padlock}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Encryption/SetPassword/Caption}}/></span>\n</$list>\n</$button>\n</$reveal>"
        },
        "$:/core/ui/Buttons/export-page": {
            "title": "$:/core/ui/Buttons/export-page",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/export-button}} {{$:/language/Buttons/ExportPage/Caption}}",
            "description": "{{$:/language/Buttons/ExportPage/Hint}}",
            "text": "<$macrocall $name=\"exportButton\" exportFilter=\"[!is[system]sort[title]]\" lingoBase=\"$:/language/Buttons/ExportPage/\"/>"
        },
        "$:/core/ui/Buttons/fold-all": {
            "title": "$:/core/ui/Buttons/fold-all",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/fold-all-button}} {{$:/language/Buttons/FoldAll/Caption}}",
            "description": "{{$:/language/Buttons/FoldAll/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/FoldAll/Hint}} aria-label={{$:/language/Buttons/FoldAll/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-all-tiddlers\" $param=<<currentTiddler>> foldedStatePrefix=\"$:/state/folded/\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/fold-all-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/FoldAll/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/full-screen": {
            "title": "$:/core/ui/Buttons/full-screen",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/full-screen-button}} {{$:/language/Buttons/FullScreen/Caption}}",
            "description": "{{$:/language/Buttons/FullScreen/Hint}}",
            "text": "<$button message=\"tm-full-screen\" tooltip={{$:/language/Buttons/FullScreen/Hint}} aria-label={{$:/language/Buttons/FullScreen/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/full-screen-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/FullScreen/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/home": {
            "title": "$:/core/ui/Buttons/home",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/home-button}} {{$:/language/Buttons/Home/Caption}}",
            "description": "{{$:/language/Buttons/Home/Hint}}",
            "text": "<$button message=\"tm-home\" tooltip={{$:/language/Buttons/Home/Hint}} aria-label={{$:/language/Buttons/Home/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/home-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Home/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/import": {
            "title": "$:/core/ui/Buttons/import",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/import-button}} {{$:/language/Buttons/Import/Caption}}",
            "description": "{{$:/language/Buttons/Import/Hint}}",
            "text": "<div class=\"tc-file-input-wrapper\">\n<$button tooltip={{$:/language/Buttons/Import/Hint}} aria-label={{$:/language/Buttons/Import/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/import-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Import/Caption}}/></span>\n</$list>\n</$button>\n<$browse tooltip={{$:/language/Buttons/Import/Hint}}/>\n</div>"
        },
        "$:/core/ui/Buttons/language": {
            "title": "$:/core/ui/Buttons/language",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/globe}} {{$:/language/Buttons/Language/Caption}}",
            "description": "{{$:/language/Buttons/Language/Hint}}",
            "text": "\\define flag-title()\n$(languagePluginTitle)$/icon\n\\end\n<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/language\">> tooltip={{$:/language/Buttons/Language/Hint}} aria-label={{$:/language/Buttons/Language/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n<span class=\"tc-image-button\">\n<$set name=\"languagePluginTitle\" value={{$:/language}}>\n<$image source=<<flag-title>>/>\n</$set>\n</span>\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Language/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/language\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down tc-drop-down-language-chooser\">\n<$linkcatcher to=\"$:/language\">\n<$list filter=\"[[$:/languages/en-GB]] [plugin-type[language]sort[description]]\">\n<$link>\n<span class=\"tc-drop-down-bullet\">\n<$reveal type=\"match\" state=\"$:/language\" text=<<currentTiddler>>>\n&bull;\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/language\" text=<<currentTiddler>>>\n&nbsp;\n</$reveal>\n</span>\n<span class=\"tc-image-button\">\n<$set name=\"languagePluginTitle\" value=<<currentTiddler>>>\n<$transclude subtiddler=<<flag-title>>>\n<$list filter=\"[all[current]field:title[$:/languages/en-GB]]\">\n<$transclude tiddler=\"$:/languages/en-GB/icon\"/>\n</$list>\n</$transclude>\n</$set>\n</span>\n<$view field=\"description\">\n<$view field=\"name\">\n<$view field=\"title\"/>\n</$view>\n</$view>\n</$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/more-page-actions": {
            "title": "$:/core/ui/Buttons/more-page-actions",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/down-arrow}} {{$:/language/Buttons/More/Caption}}",
            "description": "{{$:/language/Buttons/More/Hint}}",
            "text": "\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n<$button popup=<<qualify \"$:/state/popup/more\">> tooltip={{$:/language/Buttons/More/Hint}} aria-label={{$:/language/Buttons/More/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/down-arrow}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/More/Caption}}/></span>\n</$list>\n</$button><$reveal state=<<qualify \"$:/state/popup/more\">> type=\"popup\" position=\"below\" animate=\"yes\">\n\n<div class=\"tc-drop-down\">\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"tc-btn-invisible\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]] -[[$:/core/ui/Buttons/more-page-actions]]\" variable=\"listItem\">\n\n<$reveal type=\"match\" state=<<config-title>> text=\"hide\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$reveal>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</div>\n\n</$reveal>"
        },
        "$:/core/ui/Buttons/new-image": {
            "title": "$:/core/ui/Buttons/new-image",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/new-image-button}} {{$:/language/Buttons/NewImage/Caption}}",
            "description": "{{$:/language/Buttons/NewImage/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/NewImage/Hint}} aria-label={{$:/language/Buttons/NewImage/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" type=\"image/jpeg\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-image-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewImage/Caption}}/></span>\n</$list>\n</$button>\n"
        },
        "$:/core/ui/Buttons/new-journal": {
            "title": "$:/core/ui/Buttons/new-journal",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/new-journal-button}} {{$:/language/Buttons/NewJournal/Caption}}",
            "description": "{{$:/language/Buttons/NewJournal/Hint}}",
            "text": "\\define journalButton()\n<$button tooltip={{$:/language/Buttons/NewJournal/Hint}} aria-label={{$:/language/Buttons/NewJournal/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" title=<<now \"$(journalTitleTemplate)$\">> tags=\"$(journalTags)$\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-journal-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewJournal/Caption}}/></span>\n</$list>\n</$button>\n\\end\n<$set name=\"journalTitleTemplate\" value={{$:/config/NewJournal/Title}}>\n<$set name=\"journalTags\" value={{$:/config/NewJournal/Tags}}>\n<<journalButton>>\n</$set></$set>"
        },
        "$:/core/ui/Buttons/new-tiddler": {
            "title": "$:/core/ui/Buttons/new-tiddler",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/new-button}} {{$:/language/Buttons/NewTiddler/Caption}}",
            "description": "{{$:/language/Buttons/NewTiddler/Hint}}",
            "text": "<$button message=\"tm-new-tiddler\" tooltip={{$:/language/Buttons/NewTiddler/Hint}} aria-label={{$:/language/Buttons/NewTiddler/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewTiddler/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/palette": {
            "title": "$:/core/ui/Buttons/palette",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/palette}} {{$:/language/Buttons/Palette/Caption}}",
            "description": "{{$:/language/Buttons/Palette/Hint}}",
            "text": "<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/palette\">> tooltip={{$:/language/Buttons/Palette/Hint}} aria-label={{$:/language/Buttons/Palette/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/palette}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Palette/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/palette\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\" style=\"font-size:0.7em;\">\n{{$:/snippets/paletteswitcher}}\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/refresh": {
            "title": "$:/core/ui/Buttons/refresh",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/refresh-button}} {{$:/language/Buttons/Refresh/Caption}}",
            "description": "{{$:/language/Buttons/Refresh/Hint}}",
            "text": "<$button message=\"tm-browser-refresh\" tooltip={{$:/language/Buttons/Refresh/Hint}} aria-label={{$:/language/Buttons/Refresh/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/refresh-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Refresh/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/save-wiki": {
            "title": "$:/core/ui/Buttons/save-wiki",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/save-button}} {{$:/language/Buttons/SaveWiki/Caption}}",
            "description": "{{$:/language/Buttons/SaveWiki/Hint}}",
            "text": "<$button message=\"tm-save-wiki\" param={{$:/config/SaveWikiButton/Template}} tooltip={{$:/language/Buttons/SaveWiki/Hint}} aria-label={{$:/language/Buttons/SaveWiki/Caption}} class=<<tv-config-toolbar-class>>>\n<span class=\"tc-dirty-indicator\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/save-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/SaveWiki/Caption}}/></span>\n</$list>\n</span>\n</$button>"
        },
        "$:/core/ui/Buttons/storyview": {
            "title": "$:/core/ui/Buttons/storyview",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/storyview-classic}} {{$:/language/Buttons/StoryView/Caption}}",
            "description": "{{$:/language/Buttons/StoryView/Hint}}",
            "text": "\\define icon()\n$:/core/images/storyview-$(storyview)$\n\\end\n<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/storyview\">> tooltip={{$:/language/Buttons/StoryView/Hint}} aria-label={{$:/language/Buttons/StoryView/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n<$set name=\"storyview\" value={{$:/view}}>\n<$transclude tiddler=<<icon>>/>\n</$set>\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/StoryView/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/storyview\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$linkcatcher to=\"$:/view\">\n<$list filter=\"[storyviews[]]\" variable=\"storyview\">\n<$link to=<<storyview>>>\n<span class=\"tc-drop-down-bullet\">\n<$reveal type=\"match\" state=\"$:/view\" text=<<storyview>>>\n&bull;\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/view\" text=<<storyview>>>\n&nbsp;\n</$reveal>\n</span>\n<$transclude tiddler=<<icon>>/>\n<$text text=<<storyview>>/></$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/tag-manager": {
            "title": "$:/core/ui/Buttons/tag-manager",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/tag-button}} {{$:/language/Buttons/TagManager/Caption}}",
            "description": "{{$:/language/Buttons/TagManager/Hint}}",
            "text": "\\define control-panel-button(class)\n<$button to=\"$:/TagManager\" tooltip={{$:/language/Buttons/TagManager/Hint}} aria-label={{$:/language/Buttons/TagManager/Caption}} class=\"\"\"$(tv-config-toolbar-class)$ $class$\"\"\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/tag-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/TagManager/Caption}}/></span>\n</$list>\n</$button>\n\\end\n\n<$list filter=\"[list[$:/StoryList]] +[field:title[$:/TagManager]]\" emptyMessage=<<control-panel-button>>>\n<<control-panel-button \"tc-selected\">>\n</$list>\n"
        },
        "$:/core/ui/Buttons/theme": {
            "title": "$:/core/ui/Buttons/theme",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/theme-button}} {{$:/language/Buttons/Theme/Caption}}",
            "description": "{{$:/language/Buttons/Theme/Hint}}",
            "text": "<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/theme\">> tooltip={{$:/language/Buttons/Theme/Hint}} aria-label={{$:/language/Buttons/Theme/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/theme-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Theme/Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/theme\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$linkcatcher to=\"$:/theme\">\n<$list filter=\"[plugin-type[theme]sort[title]]\" variable=\"themeTitle\">\n<$link to=<<themeTitle>>>\n<span class=\"tc-drop-down-bullet\">\n<$reveal type=\"match\" state=\"$:/theme\" text=<<themeTitle>>>\n&bull;\n</$reveal>\n<$reveal type=\"nomatch\" state=\"$:/theme\" text=<<themeTitle>>>\n&nbsp;\n</$reveal>\n</span>\n<$view tiddler=<<themeTitle>> field=\"name\"/>\n</$link>\n</$list>\n</$linkcatcher>\n</div>\n</$reveal>"
        },
        "$:/core/ui/Buttons/unfold-all": {
            "title": "$:/core/ui/Buttons/unfold-all",
            "tags": "$:/tags/PageControls",
            "caption": "{{$:/core/images/unfold-all-button}} {{$:/language/Buttons/UnfoldAll/Caption}}",
            "description": "{{$:/language/Buttons/UnfoldAll/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/UnfoldAll/Hint}} aria-label={{$:/language/Buttons/UnfoldAll/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-unfold-all-tiddlers\" $param=<<currentTiddler>> foldedStatePrefix=\"$:/state/folded/\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/unfold-all-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/UnfoldAll/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/PageTemplate/pagecontrols": {
            "title": "$:/core/ui/PageTemplate/pagecontrols",
            "text": "\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n<div class=\"tc-page-controls\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]\" variable=\"listItem\">\n<$reveal type=\"nomatch\" state=<<config-title>> text=\"hide\">\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n</$reveal>\n</$list>\n</div>\n\n"
        },
        "$:/core/ui/PageStylesheet": {
            "title": "$:/core/ui/PageStylesheet",
            "text": "<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<$set name=\"currentTiddler\" value={{$:/language}}>\n\n<$set name=\"languageTitle\" value={{!!name}}>\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Stylesheet]!has[draft.of]]\">\n<$transclude mode=\"block\"/>\n</$list>\n\n</$set>\n\n</$set>\n\n</$importvariables>\n"
        },
        "$:/core/ui/PageTemplate/alerts": {
            "title": "$:/core/ui/PageTemplate/alerts",
            "tags": "$:/tags/PageTemplate",
            "text": "<div class=\"tc-alerts\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Alert]!has[draft.of]]\" template=\"$:/core/ui/AlertTemplate\" storyview=\"pop\"/>\n\n</div>\n"
        },
        "$:/core/ui/PageTemplate/pluginreloadwarning": {
            "title": "$:/core/ui/PageTemplate/pluginreloadwarning",
            "tags": "$:/tags/PageTemplate",
            "text": "\\define lingo-base() $:/language/\n\n<$list filter=\"[has[plugin-type]haschanged[]!plugin-type[import]limit[1]]\">\n\n<$reveal type=\"nomatch\" state=\"$:/temp/HidePluginWarning\" text=\"yes\">\n\n<div class=\"tc-plugin-reload-warning\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n<<lingo PluginReloadWarning>> <$button set=\"$:/temp/HidePluginWarning\" setTo=\"yes\" class=\"tc-btn-invisible\">{{$:/core/images/close-button}}</$button>\n\n</$set>\n\n</div>\n\n</$reveal>\n\n</$list>\n"
        },
        "$:/core/ui/PageTemplate/sidebar": {
            "title": "$:/core/ui/PageTemplate/sidebar",
            "tags": "$:/tags/PageTemplate",
            "text": "<$scrollable fallthrough=\"no\" class=\"tc-sidebar-scrollable\">\n\n<div class=\"tc-sidebar-header\">\n\n<$reveal state=\"$:/state/sidebar\" type=\"match\" text=\"yes\" default=\"yes\" retain=\"yes\" animate=\"yes\">\n\n<h1 class=\"tc-site-title\">\n\n<$transclude tiddler=\"$:/SiteTitle\" mode=\"inline\"/>\n\n</h1>\n\n<div class=\"tc-site-subtitle\">\n\n<$transclude tiddler=\"$:/SiteSubtitle\" mode=\"inline\"/>\n\n</div>\n\n{{||$:/core/ui/PageTemplate/pagecontrols}}\n\n<$transclude tiddler=\"$:/core/ui/SideBarLists\" mode=\"inline\"/>\n\n</$reveal>\n\n</div>\n\n</$scrollable>"
        },
        "$:/core/ui/PageTemplate/story": {
            "title": "$:/core/ui/PageTemplate/story",
            "tags": "$:/tags/PageTemplate",
            "text": "<section class=\"tc-story-river\">\n\n<section class=\"story-backdrop\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/AboveStory]!has[draft.of]]\">\n\n<$transclude/>\n\n</$list>\n\n</section>\n\n<$list filter=\"[list[$:/StoryList]]\" history=\"$:/HistoryList\" template=\"$:/core/ui/ViewTemplate\" editTemplate=\"$:/core/ui/EditTemplate\" storyview={{$:/view}} emptyMessage={{$:/config/EmptyStoryMessage}}/>\n\n<section class=\"story-frontdrop\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/BelowStory]!has[draft.of]]\">\n\n<$transclude/>\n\n</$list>\n\n</section>\n\n</section>\n"
        },
        "$:/core/ui/PageTemplate/topleftbar": {
            "title": "$:/core/ui/PageTemplate/topleftbar",
            "tags": "$:/tags/PageTemplate",
            "text": "<span class=\"tc-topbar tc-topbar-left\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TopLeftBar]!has[draft.of]]\" variable=\"listItem\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$list>\n\n</span>\n"
        },
        "$:/core/ui/PageTemplate/toprightbar": {
            "title": "$:/core/ui/PageTemplate/toprightbar",
            "tags": "$:/tags/PageTemplate",
            "text": "<span class=\"tc-topbar tc-topbar-right\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TopRightBar]!has[draft.of]]\" variable=\"listItem\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$list>\n\n</span>\n"
        },
        "$:/core/ui/PageTemplate": {
            "title": "$:/core/ui/PageTemplate",
            "text": "\\define containerClasses()\ntc-page-container tc-page-view-$(themeTitle)$ tc-language-$(languageTitle)$\n\\end\n\n<$importvariables filter=\"[[$:/core/ui/PageMacros]] [all[shadows+tiddlers]tag[$:/tags/Macro]!has[draft.of]]\">\n\n<$set name=\"tv-config-toolbar-icons\" value={{$:/config/Toolbar/Icons}}>\n\n<$set name=\"tv-config-toolbar-text\" value={{$:/config/Toolbar/Text}}>\n\n<$set name=\"tv-config-toolbar-class\" value={{$:/config/Toolbar/ButtonClass}}>\n\n<$set name=\"themeTitle\" value={{$:/view}}>\n\n<$set name=\"currentTiddler\" value={{$:/language}}>\n\n<$set name=\"languageTitle\" value={{!!name}}>\n\n<$set name=\"currentTiddler\" value=\"\">\n\n<div class=<<containerClasses>>>\n\n<$navigator story=\"$:/StoryList\" history=\"$:/HistoryList\" openLinkFromInsideRiver={{$:/config/Navigation/openLinkFromInsideRiver}} openLinkFromOutsideRiver={{$:/config/Navigation/openLinkFromOutsideRiver}}>\n\n<$dropzone>\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageTemplate]!has[draft.of]]\" variable=\"listItem\">\n\n<$transclude tiddler=<<listItem>>/>\n\n</$list>\n\n</$dropzone>\n\n</$navigator>\n\n</div>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</$importvariables>\n"
        },
        "$:/core/ui/PluginInfo": {
            "title": "$:/core/ui/PluginInfo",
            "text": "\\define localised-info-tiddler-title()\n$(currentTiddler)$/$(languageTitle)$/$(currentTab)$\n\\end\n\\define info-tiddler-title()\n$(currentTiddler)$/$(currentTab)$\n\\end\n<$transclude tiddler=<<localised-info-tiddler-title>> mode=\"block\">\n<$transclude tiddler=<<currentTiddler>> subtiddler=<<localised-info-tiddler-title>> mode=\"block\">\n<$transclude tiddler=<<currentTiddler>> subtiddler=<<info-tiddler-title>> mode=\"block\">\n{{$:/language/ControlPanel/Plugin/NoInfoFound/Hint}}\n</$transclude>\n</$transclude>\n</$transclude>\n"
        },
        "$:/core/ui/SearchResults": {
            "title": "$:/core/ui/SearchResults",
            "text": "<div class=\"tc-search-results\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]butfirst[]limit[1]]\" emptyMessage=\"\"\"\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\">\n<$transclude mode=\"block\"/>\n</$list>\n\"\"\">\n\n<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/SearchResults]!has[draft.of]]\" default={{$:/config/SearchResults/Default}}/>\n\n</$list>\n\n</div>\n"
        },
        "$:/core/ui/SideBar/More": {
            "title": "$:/core/ui/SideBar/More",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/More/Caption}}",
            "text": "<div class=\"tc-more-sidebar\">\n<<tabs \"[all[shadows+tiddlers]tag[$:/tags/MoreSideBar]!has[draft.of]]\" \"$:/core/ui/MoreSideBar/Tags\" \"$:/state/tab/moresidebar\" \"tc-vertical\">>\n</div>\n"
        },
        "$:/core/ui/SideBar/Open": {
            "title": "$:/core/ui/SideBar/Open",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/Open/Caption}}",
            "text": "\\define lingo-base() $:/language/CloseAll/\n<$list filter=\"[list[$:/StoryList]]\" history=\"$:/HistoryList\" storyview=\"pop\">\n\n<$button message=\"tm-close-tiddler\" tooltip={{$:/language/Buttons/Close/Hint}} aria-label={{$:/language/Buttons/Close/Caption}} class=\"tc-btn-invisible tc-btn-mini\">&times;</$button> <$link to={{!!title}}><$view field=\"title\"/></$link>\n\n</$list>\n\n<$button message=\"tm-close-all-tiddlers\" class=\"tc-btn-invisible tc-btn-mini\"><<lingo Button>></$button>\n"
        },
        "$:/core/ui/SideBar/Recent": {
            "title": "$:/core/ui/SideBar/Recent",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/Recent/Caption}}",
            "text": "<$macrocall $name=\"timeline\" format={{$:/language/RecentChanges/DateFormat}}/>\n"
        },
        "$:/core/ui/SideBar/Tools": {
            "title": "$:/core/ui/SideBar/Tools",
            "tags": "$:/tags/SideBar",
            "caption": "{{$:/language/SideBar/Tools/Caption}}",
            "text": "\\define lingo-base() $:/language/ControlPanel/\n\\define config-title()\n$:/config/PageControlButtons/Visibility/$(listItem)$\n\\end\n\n<<lingo Basics/Version/Prompt>> <<version>>\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]\" variable=\"listItem\">\n\n<div style=\"position:relative;\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>>/> <i class=\"tc-muted\"><$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</div>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/SideBarLists": {
            "title": "$:/core/ui/SideBarLists",
            "text": "<div class=\"tc-sidebar-lists\">\n\n<$set name=\"searchTiddler\" value=\"$:/temp/search\">\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/search\" type=\"search\" tag=\"input\" focus={{$:/config/Search/AutoFocus}} focusPopup=<<qualify \"$:/state/popup/search-dropdown\">> class=\"tc-popup-handle\"/>\n<$reveal state=\"$:/temp/search\" type=\"nomatch\" text=\"\">\n<$button tooltip={{$:/language/Buttons/AdvancedSearch/Hint}} aria-label={{$:/language/Buttons/AdvancedSearch/Caption}} class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" text={{$:/temp/search}}/>\n<$action-setfield $tiddler=\"$:/temp/search\" text=\"\"/>\n<$action-navigate $to=\"$:/AdvancedSearch\"/>\n{{$:/core/images/advanced-search-button}}\n</$button>\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/search\" text=\"\" />\n{{$:/core/images/close-button}}\n</$button>\n<$button popup=<<qualify \"$:/state/popup/search-dropdown\">> class=\"tc-btn-invisible\">\n<$set name=\"resultCount\" value=\"\"\"<$count filter=\"[!is[system]search{$(searchTiddler)$}]\"/>\"\"\">\n{{$:/core/images/down-arrow}} {{$:/language/Search/Matches}}\n</$set>\n</$button>\n</$reveal>\n<$reveal state=\"$:/temp/search\" type=\"match\" text=\"\">\n<$button to=\"$:/AdvancedSearch\" tooltip={{$:/language/Buttons/AdvancedSearch/Hint}} aria-label={{$:/language/Buttons/AdvancedSearch/Caption}} class=\"tc-btn-invisible\">\n{{$:/core/images/advanced-search-button}}\n</$button>\n</$reveal>\n</div>\n\n<$reveal tag=\"div\" class=\"tc-block-dropdown-wrapper\" state=\"$:/temp/search\" type=\"nomatch\" text=\"\">\n\n<$reveal tag=\"div\" class=\"tc-block-dropdown tc-search-drop-down tc-popup-handle\" state=<<qualify \"$:/state/popup/search-dropdown\">> type=\"nomatch\" text=\"\" default=\"\">\n\n{{$:/core/ui/SearchResults}}\n\n</$reveal>\n\n</$reveal>\n\n</$set>\n\n<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/SideBar]!has[draft.of]]\" default={{$:/config/DefaultSidebarTab}} state=\"$:/state/tab/sidebar\" />\n\n</div>\n"
        },
        "$:/TagManager": {
            "title": "$:/TagManager",
            "icon": "$:/core/images/tag-button",
            "color": "#bbb",
            "text": "\\define lingo-base() $:/language/TagManager/\n\\define iconEditorTab(type)\n<$list filter=\"[all[shadows+tiddlers]is[image]] [all[shadows+tiddlers]tag[$:/tags/Image]] -[type[application/pdf]] +[sort[title]] +[$type$is[system]]\">\n<$link to={{!!title}}>\n<$transclude/> <$view field=\"title\"/>\n</$link>\n</$list>\n\\end\n\\define iconEditor(title)\n<div class=\"tc-drop-down-wrapper\">\n<$button popup=<<qualify \"$:/state/popup/icon/$title$\">> class=\"tc-btn-invisible tc-btn-dropdown\">{{$:/core/images/down-arrow}}</$button>\n<$reveal state=<<qualify \"$:/state/popup/icon/$title$\">> type=\"popup\" position=\"belowleft\" text=\"\" default=\"\">\n<div class=\"tc-drop-down\">\n<$linkcatcher to=\"$title$!!icon\">\n<<iconEditorTab type:\"!\">>\n<hr/>\n<<iconEditorTab type:\"\">>\n</$linkcatcher>\n</div>\n</$reveal>\n</div>\n\\end\n\\define qualifyTitle(title)\n$title$$(currentTiddler)$\n\\end\n\\define toggleButton(state)\n<$reveal state=\"$state$\" type=\"match\" text=\"closed\" default=\"closed\">\n<$button set=\"$state$\" setTo=\"open\" class=\"tc-btn-invisible tc-btn-dropdown\" selectedClass=\"tc-selected\">\n{{$:/core/images/info-button}}\n</$button>\n</$reveal>\n<$reveal state=\"$state$\" type=\"match\" text=\"open\" default=\"closed\">\n<$button set=\"$state$\" setTo=\"closed\" class=\"tc-btn-invisible tc-btn-dropdown\" selectedClass=\"tc-selected\">\n{{$:/core/images/info-button}}\n</$button>\n</$reveal>\n\\end\n<table class=\"tc-tag-manager-table\">\n<tbody>\n<tr>\n<th><<lingo Colour/Heading>></th>\n<th class=\"tc-tag-manager-tag\"><<lingo Tag/Heading>></th>\n<th><<lingo Count/Heading>></th>\n<th><<lingo Icon/Heading>></th>\n<th><<lingo Info/Heading>></th>\n</tr>\n<$list filter=\"[tags[]!is[system]sort[title]]\">\n<tr>\n<td><$edit-text field=\"color\" tag=\"input\" type=\"color\"/></td>\n<td><$transclude tiddler=\"$:/core/ui/TagTemplate\"/></td>\n<td><$count filter=\"[all[current]tagging[]]\"/></td>\n<td>\n<$macrocall $name=\"iconEditor\" title={{!!title}}/>\n</td>\n<td>\n<$macrocall $name=\"toggleButton\" state=<<qualifyTitle \"$:/state/tag-manager/\">> /> \n</td>\n</tr>\n<tr>\n<td></td>\n<td colspan=\"4\">\n<$reveal state=<<qualifyTitle \"$:/state/tag-manager/\">> type=\"match\" text=\"open\" default=\"\">\n<table>\n<tbody>\n<tr><td><<lingo Colour/Heading>></td><td><$edit-text field=\"color\" tag=\"input\" type=\"text\" size=\"9\"/></td></tr>\n<tr><td><<lingo Icon/Heading>></td><td><$edit-text field=\"icon\" tag=\"input\" size=\"45\"/></td></tr>\n</tbody>\n</table>\n</$reveal>\n</td>\n</tr>\n</$list>\n<tr>\n<td></td>\n<td>\n{{$:/core/ui/UntaggedTemplate}}\n</td>\n<td>\n<small class=\"tc-menu-list-count\"><$count filter=\"[untagged[]!is[system]] -[tags[]]\"/></small>\n</td>\n<td></td>\n<td></td>\n</tr>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/TagTemplate": {
            "title": "$:/core/ui/TagTemplate",
            "text": "\\define tag-styles()\nbackground-color:$(backgroundColor)$;\nfill:$(foregroundColor)$;\ncolor:$(foregroundColor)$;\n\\end\n\n\\define tag-body-inner(colour,fallbackTarget,colourA,colourB)\n<$vars foregroundColor=<<contrastcolour target:\"\"\"$colour$\"\"\" fallbackTarget:\"\"\"$fallbackTarget$\"\"\" colourA:\"\"\"$colourA$\"\"\" colourB:\"\"\"$colourB$\"\"\">> backgroundColor=\"\"\"$colour$\"\"\">\n<$button popup=<<qualify \"$:/state/popup/tag\">> class=\"tc-btn-invisible tc-tag-label\" style=<<tag-styles>>>\n<$transclude tiddler={{!!icon}}/> <$view field=\"title\" format=\"text\" />\n</$button>\n<$reveal state=<<qualify \"$:/state/popup/tag\">> type=\"popup\" position=\"below\" animate=\"yes\" class=\"tc-drop-down\"><$transclude tiddler=\"$:/core/ui/ListItemTemplate\"/>\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TagDropdown]!has[draft.of]]\" variable=\"listItem\"> \n<$transclude tiddler=<<listItem>>/> \n</$list> \n<hr>\n<$list filter=\"[all[current]tagging[]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n</$reveal>\n</$vars>\n\\end\n\n\\define tag-body(colour,palette)\n<span class=\"tc-tag-list-item\">\n<$macrocall $name=\"tag-body-inner\" colour=\"\"\"$colour$\"\"\" fallbackTarget={{$palette$##tag-background}} colourA={{$palette$##foreground}} colourB={{$palette$##background}}/>\n</span>\n\\end\n\n<$macrocall $name=\"tag-body\" colour={{!!color}} palette={{$:/palette}}/>\n"
        },
        "$:/core/ui/TiddlerFields": {
            "title": "$:/core/ui/TiddlerFields",
            "text": "<table class=\"tc-view-field-table\">\n<tbody>\n<$list filter=\"[all[current]fields[]sort[title]] -text\" template=\"$:/core/ui/TiddlerFieldTemplate\" variable=\"listItem\"/>\n</tbody>\n</table>\n"
        },
        "$:/core/ui/TiddlerFieldTemplate": {
            "title": "$:/core/ui/TiddlerFieldTemplate",
            "text": "<tr class=\"tc-view-field\">\n<td class=\"tc-view-field-name\">\n<$text text=<<listItem>>/>\n</td>\n<td class=\"tc-view-field-value\">\n<$view field=<<listItem>>/>\n</td>\n</tr>"
        },
        "$:/core/ui/TiddlerInfo/Advanced/PluginInfo": {
            "title": "$:/core/ui/TiddlerInfo/Advanced/PluginInfo",
            "tags": "$:/tags/TiddlerInfo/Advanced",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/Advanced/PluginInfo/\n<$list filter=\"[all[current]has[plugin-type]]\">\n\n! <<lingo Heading>>\n\n<<lingo Hint>>\n<ul>\n<$list filter=\"[all[current]plugintiddlers[]sort[title]]\" emptyMessage=<<lingo Empty/Hint>>>\n<li>\n<$link to={{!!title}}>\n<$view field=\"title\"/>\n</$link>\n</li>\n</$list>\n</ul>\n\n</$list>\n"
        },
        "$:/core/ui/TiddlerInfo/Advanced/ShadowInfo": {
            "title": "$:/core/ui/TiddlerInfo/Advanced/ShadowInfo",
            "tags": "$:/tags/TiddlerInfo/Advanced",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/Advanced/ShadowInfo/\n<$set name=\"infoTiddler\" value=<<currentTiddler>>>\n\n''<<lingo Heading>>''\n\n<$list filter=\"[all[current]!is[shadow]]\">\n\n<<lingo NotShadow/Hint>>\n\n</$list>\n\n<$list filter=\"[all[current]is[shadow]]\">\n\n<<lingo Shadow/Hint>>\n\n<$list filter=\"[all[current]shadowsource[]]\">\n\n<$set name=\"pluginTiddler\" value=<<currentTiddler>>>\n<<lingo Shadow/Source>>\n</$set>\n\n</$list>\n\n<$list filter=\"[all[current]is[shadow]is[tiddler]]\">\n\n<<lingo OverriddenShadow/Hint>>\n\n</$list>\n\n\n</$list>\n</$set>\n"
        },
        "$:/core/ui/TiddlerInfo/Advanced": {
            "title": "$:/core/ui/TiddlerInfo/Advanced",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Advanced/Caption}}",
            "text": "<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/TiddlerInfo/Advanced]!has[draft.of]]\" variable=\"listItem\">\n<$transclude tiddler=<<listItem>>/>\n\n</$list>\n"
        },
        "$:/core/ui/TiddlerInfo/Fields": {
            "title": "$:/core/ui/TiddlerInfo/Fields",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Fields/Caption}}",
            "text": "<$transclude tiddler=\"$:/core/ui/TiddlerFields\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/List": {
            "title": "$:/core/ui/TiddlerInfo/List",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/List/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[list{!!title}]\" emptyMessage=<<lingo List/Empty>> template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/Listed": {
            "title": "$:/core/ui/TiddlerInfo/Listed",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Listed/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[all[current]listed[]!is[system]]\" emptyMessage=<<lingo Listed/Empty>> template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/References": {
            "title": "$:/core/ui/TiddlerInfo/References",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/References/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[all[current]backlinks[]sort[title]]\" emptyMessage=<<lingo References/Empty>> template=\"$:/core/ui/ListItemTemplate\">\n</$list>\n"
        },
        "$:/core/ui/TiddlerInfo/Tagging": {
            "title": "$:/core/ui/TiddlerInfo/Tagging",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Tagging/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n<$list filter=\"[all[current]tagging[]]\" emptyMessage=<<lingo Tagging/Empty>> template=\"$:/core/ui/ListItemTemplate\"/>\n"
        },
        "$:/core/ui/TiddlerInfo/Tools": {
            "title": "$:/core/ui/TiddlerInfo/Tools",
            "tags": "$:/tags/TiddlerInfo",
            "caption": "{{$:/language/TiddlerInfo/Tools/Caption}}",
            "text": "\\define lingo-base() $:/language/TiddlerInfo/\n\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]\" variable=\"listItem\">\n\n<$checkbox tiddler=<<config-title>> field=\"text\" checked=\"show\" unchecked=\"hide\" default=\"show\"/> <$transclude tiddler=<<listItem>>/> <i class=\"tc-muted\"><$transclude tiddler=<<listItem>> field=\"description\"/></i>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n"
        },
        "$:/core/ui/TiddlerInfo": {
            "title": "$:/core/ui/TiddlerInfo",
            "text": "<$macrocall $name=\"tabs\" tabsList=\"[all[shadows+tiddlers]tag[$:/tags/TiddlerInfo]!has[draft.of]]\" default={{$:/config/TiddlerInfo/Default}}/>"
        },
        "$:/core/ui/TopBar/menu": {
            "title": "$:/core/ui/TopBar/menu",
            "tags": "$:/tags/TopRightBar",
            "text": "<$reveal state=\"$:/state/sidebar\" type=\"nomatch\" text=\"no\">\n<$button set=\"$:/state/sidebar\" setTo=\"no\" tooltip={{$:/language/Buttons/HideSideBar/Hint}} aria-label={{$:/language/Buttons/HideSideBar/Caption}} class=\"tc-btn-invisible\">{{$:/core/images/chevron-right}}</$button>\n</$reveal>\n<$reveal state=\"$:/state/sidebar\" type=\"match\" text=\"no\">\n<$button set=\"$:/state/sidebar\" setTo=\"yes\" tooltip={{$:/language/Buttons/ShowSideBar/Hint}} aria-label={{$:/language/Buttons/ShowSideBar/Caption}} class=\"tc-btn-invisible\">{{$:/core/images/chevron-left}}</$button>\n</$reveal>\n"
        },
        "$:/core/ui/UntaggedTemplate": {
            "title": "$:/core/ui/UntaggedTemplate",
            "text": "\\define lingo-base() $:/language/SideBar/\n<$button popup=<<qualify \"$:/state/popup/tag\">> class=\"tc-btn-invisible tc-untagged-label tc-tag-label\">\n<<lingo Tags/Untagged/Caption>>\n</$button>\n<$reveal state=<<qualify \"$:/state/popup/tag\">> type=\"popup\" position=\"below\">\n<div class=\"tc-drop-down\">\n<$list filter=\"[untagged[]!is[system]] -[tags[]] +[sort[title]]\" template=\"$:/core/ui/ListItemTemplate\"/>\n</div>\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate/body": {
            "title": "$:/core/ui/ViewTemplate/body",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal tag=\"div\" class=\"tc-tiddler-body\" type=\"nomatch\" state=<<folded-state>> text=\"hide\" retain=\"yes\" animate=\"yes\">\n\n<$list filter=\"[all[current]!has[plugin-type]!field:hide-body[yes]]\">\n\n<$transclude>\n\n<$transclude tiddler=\"$:/language/MissingTiddler/Hint\"/>\n\n</$transclude>\n\n</$list>\n\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate/classic": {
            "title": "$:/core/ui/ViewTemplate/classic",
            "tags": "$:/tags/ViewTemplate $:/tags/EditTemplate",
            "text": "\\define lingo-base() $:/language/ClassicWarning/\n<$list filter=\"[all[current]type[text/x-tiddlywiki]]\">\n<div class=\"tc-message-box\">\n\n<<lingo Hint>>\n\n<$button set=\"!!type\" setTo=\"text/vnd.tiddlywiki\"><<lingo Upgrade/Caption>></$button>\n\n</div>\n</$list>\n"
        },
        "$:/core/ui/ViewTemplate/import": {
            "title": "$:/core/ui/ViewTemplate/import",
            "tags": "$:/tags/ViewTemplate",
            "text": "\\define lingo-base() $:/language/Import/\n\n<$list filter=\"[all[current]field:plugin-type[import]]\">\n\n<div class=\"tc-import\">\n\n<<lingo Listing/Hint>>\n\n<$button message=\"tm-delete-tiddler\" param=<<currentTiddler>>><<lingo Listing/Cancel/Caption>></$button>\n<$button message=\"tm-perform-import\" param=<<currentTiddler>>><<lingo Listing/Import/Caption>></$button>\n\n{{||$:/core/ui/ImportListing}}\n\n<$button message=\"tm-delete-tiddler\" param=<<currentTiddler>>><<lingo Listing/Cancel/Caption>></$button>\n<$button message=\"tm-perform-import\" param=<<currentTiddler>>><<lingo Listing/Import/Caption>></$button>\n\n</div>\n\n</$list>\n"
        },
        "$:/core/ui/ViewTemplate/plugin": {
            "title": "$:/core/ui/ViewTemplate/plugin",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$list filter=\"[all[current]has[plugin-type]] -[all[current]field:plugin-type[import]]\">\n\n{{||$:/core/ui/TiddlerInfo/Advanced/PluginInfo}}\n\n</$list>\n"
        },
        "$:/core/ui/ViewTemplate/subtitle": {
            "title": "$:/core/ui/ViewTemplate/subtitle",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal type=\"nomatch\" state=<<folded-state>> text=\"hide\" tag=\"div\" retain=\"yes\" animate=\"yes\">\n<div class=\"tc-subtitle\">\n<$link to={{!!modifier}}>\n<$view field=\"modifier\"/>\n</$link> <$view field=\"modified\" format=\"date\" template={{$:/language/Tiddler/DateFormat}}/>\n</div>\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate/tags": {
            "title": "$:/core/ui/ViewTemplate/tags",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal type=\"nomatch\" state=<<folded-state>> text=\"hide\" tag=\"div\" retain=\"yes\" animate=\"yes\">\n<div class=\"tc-tags-wrapper\"><$list filter=\"[all[current]tags[]sort[title]]\" template=\"$:/core/ui/TagTemplate\" storyview=\"pop\"/></div>\n</$reveal>"
        },
        "$:/core/ui/ViewTemplate/title": {
            "title": "$:/core/ui/ViewTemplate/title",
            "tags": "$:/tags/ViewTemplate",
            "text": "\\define title-styles()\nfill:$(foregroundColor)$;\n\\end\n\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n<div class=\"tc-tiddler-title\">\n<div class=\"tc-titlebar\">\n<span class=\"tc-tiddler-controls\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]\" variable=\"listItem\"><$reveal type=\"nomatch\" state=<<config-title>> text=\"hide\"><$transclude tiddler=<<listItem>>/></$reveal></$list>\n</span>\n<$set name=\"tv-wikilinks\" value={{$:/config/Tiddlers/TitleLinks}}>\n<$link>\n<$set name=\"foregroundColor\" value={{!!color}}>\n<span class=\"tc-tiddler-title-icon\" style=<<title-styles>>>\n<$transclude tiddler={{!!icon}}/>\n</span>\n</$set>\n<$list filter=\"[all[current]removeprefix[$:/]]\">\n<h2 class=\"tc-title\" title={{$:/language/SystemTiddler/Tooltip}}>\n<span class=\"tc-system-title-prefix\">$:/</span><$text text=<<currentTiddler>>/>\n</h2>\n</$list>\n<$list filter=\"[all[current]!prefix[$:/]]\">\n<h2 class=\"tc-title\">\n<$view field=\"title\"/>\n</h2>\n</$list>\n</$link>\n</$set>\n</div>\n\n<$reveal type=\"nomatch\" text=\"\" default=\"\" state=<<tiddlerInfoState>> class=\"tc-tiddler-info tc-popup-handle\" animate=\"yes\" retain=\"yes\">\n\n<$transclude tiddler=\"$:/core/ui/TiddlerInfo\"/>\n\n</$reveal>\n</div>"
        },
        "$:/core/ui/ViewTemplate/unfold": {
            "title": "$:/core/ui/ViewTemplate/unfold",
            "tags": "$:/tags/ViewTemplate",
            "text": "<$reveal tag=\"div\" type=\"nomatch\" state=\"$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-bar\" text=\"hide\">\n<$reveal tag=\"div\" type=\"nomatch\" state=<<folded-state>> text=\"hide\" default=\"show\" retain=\"yes\" animate=\"yes\">\n<$button tooltip={{$:/language/Buttons/Fold/Hint}} aria-label={{$:/language/Buttons/Fold/Caption}} class=\"tc-fold-banner\">\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n{{$:/core/images/chevron-up}}\n</$button>\n</$reveal>\n<$reveal tag=\"div\" type=\"nomatch\" state=<<folded-state>> text=\"show\" default=\"show\" retain=\"yes\" animate=\"yes\">\n<$button tooltip={{$:/language/Buttons/Unfold/Hint}} aria-label={{$:/language/Buttons/Unfold/Caption}} class=\"tc-unfold-banner\">\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n{{$:/core/images/chevron-down}}\n</$button>\n</$reveal>\n</$reveal>\n"
        },
        "$:/core/ui/ViewTemplate": {
            "title": "$:/core/ui/ViewTemplate",
            "text": "\\define frame-classes()\ntc-tiddler-frame tc-tiddler-view-frame $(missingTiddlerClass)$ $(shadowTiddlerClass)$ $(systemTiddlerClass)$ $(tiddlerTagClasses)$\n\\end\n\\define folded-state()\n$:/state/folded/$(currentTiddler)$\n\\end\n<$set name=\"storyTiddler\" value=<<currentTiddler>>><$set name=\"tiddlerInfoState\" value=<<qualify \"$:/state/popup/tiddler-info\">>><$tiddler tiddler=<<currentTiddler>>><div class=<<frame-classes>>><$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewTemplate]!has[draft.of]]\" variable=\"listItem\"><$transclude tiddler=<<listItem>>/></$list>\n</div>\n</$tiddler></$set></$set>\n"
        },
        "$:/core/ui/Buttons/clone": {
            "title": "$:/core/ui/Buttons/clone",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/clone-button}} {{$:/language/Buttons/Clone/Caption}}",
            "description": "{{$:/language/Buttons/Clone/Hint}}",
            "text": "<$button message=\"tm-new-tiddler\" param=<<currentTiddler>> tooltip={{$:/language/Buttons/Clone/Hint}} aria-label={{$:/language/Buttons/Clone/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/clone-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Clone/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/close-others": {
            "title": "$:/core/ui/Buttons/close-others",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/close-others-button}} {{$:/language/Buttons/CloseOthers/Caption}}",
            "description": "{{$:/language/Buttons/CloseOthers/Hint}}",
            "text": "<$button message=\"tm-close-other-tiddlers\" param=<<currentTiddler>> tooltip={{$:/language/Buttons/CloseOthers/Hint}} aria-label={{$:/language/Buttons/CloseOthers/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/close-others-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/CloseOthers/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/close": {
            "title": "$:/core/ui/Buttons/close",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/close-button}} {{$:/language/Buttons/Close/Caption}}",
            "description": "{{$:/language/Buttons/Close/Hint}}",
            "text": "<$button message=\"tm-close-tiddler\" tooltip={{$:/language/Buttons/Close/Hint}} aria-label={{$:/language/Buttons/Close/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/close-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Close/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/edit": {
            "title": "$:/core/ui/Buttons/edit",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/edit-button}} {{$:/language/Buttons/Edit/Caption}}",
            "description": "{{$:/language/Buttons/Edit/Hint}}",
            "text": "<$button message=\"tm-edit-tiddler\" tooltip={{$:/language/Buttons/Edit/Hint}} aria-label={{$:/language/Buttons/Edit/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/edit-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Edit/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/export-tiddler": {
            "title": "$:/core/ui/Buttons/export-tiddler",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/export-button}} {{$:/language/Buttons/ExportTiddler/Caption}}",
            "description": "{{$:/language/Buttons/ExportTiddler/Hint}}",
            "text": "\\define makeExportFilter()\n[[$(currentTiddler)$]]\n\\end\n<$macrocall $name=\"exportButton\" exportFilter=<<makeExportFilter>> lingoBase=\"$:/language/Buttons/ExportTiddler/\" baseFilename=<<currentTiddler>>/>"
        },
        "$:/core/ui/Buttons/fold-bar": {
            "title": "$:/core/ui/Buttons/fold-bar",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/language/Buttons/Fold/FoldBar/Caption}}",
            "description": "{{$:/language/Buttons/Fold/FoldBar/Hint}}",
            "text": "<!-- This dummy toolbar button is here to allow visibility of the fold-bar to be controlled as if it were a toolbar button -->"
        },
        "$:/core/ui/Buttons/fold-others": {
            "title": "$:/core/ui/Buttons/fold-others",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/fold-others-button}} {{$:/language/Buttons/FoldOthers/Caption}}",
            "description": "{{$:/language/Buttons/FoldOthers/Hint}}",
            "text": "<$button tooltip={{$:/language/Buttons/FoldOthers/Hint}} aria-label={{$:/language/Buttons/FoldOthers/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-other-tiddlers\" $param=<<currentTiddler>> foldedStatePrefix=\"$:/state/folded/\"/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/fold-others-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/FoldOthers/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/fold": {
            "title": "$:/core/ui/Buttons/fold",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/fold-button}} {{$:/language/Buttons/Fold/Caption}}",
            "description": "{{$:/language/Buttons/Fold/Hint}}",
            "text": "<$reveal type=\"nomatch\" state=<<folded-state>> text=\"hide\" default=\"show\"><$button tooltip={{$:/language/Buttons/Fold/Hint}} aria-label={{$:/language/Buttons/Fold/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/fold-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\">\n<$text text={{$:/language/Buttons/Fold/Caption}}/>\n</span>\n</$list>\n</$button></$reveal><$reveal type=\"match\" state=<<folded-state>> text=\"hide\" default=\"show\"><$button tooltip={{$:/language/Buttons/Unfold/Hint}} aria-label={{$:/language/Buttons/Unfold/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-fold-tiddler\" $param=<<currentTiddler>> foldedState=<<folded-state>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\" variable=\"listItem\">\n{{$:/core/images/unfold-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\">\n<$text text={{$:/language/Buttons/Unfold/Caption}}/>\n</span>\n</$list>\n</$button></$reveal>"
        },
        "$:/core/ui/Buttons/info": {
            "title": "$:/core/ui/Buttons/info",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/info-button}} {{$:/language/Buttons/Info/Caption}}",
            "description": "{{$:/language/Buttons/Info/Hint}}",
            "text": "<$button popup=<<tiddlerInfoState>> tooltip={{$:/language/Buttons/Info/Hint}} aria-label={{$:/language/Buttons/Info/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/info-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Info/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/more-tiddler-actions": {
            "title": "$:/core/ui/Buttons/more-tiddler-actions",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/down-arrow}} {{$:/language/Buttons/More/Caption}}",
            "description": "{{$:/language/Buttons/More/Hint}}",
            "text": "\\define config-title()\n$:/config/ViewToolbarButtons/Visibility/$(listItem)$\n\\end\n<$button popup=<<qualify \"$:/state/popup/more\">> tooltip={{$:/language/Buttons/More/Hint}} aria-label={{$:/language/Buttons/More/Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/down-arrow}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/More/Caption}}/></span>\n</$list>\n</$button><$reveal state=<<qualify \"$:/state/popup/more\">> type=\"popup\" position=\"below\" animate=\"yes\">\n\n<div class=\"tc-drop-down\">\n\n<$set name=\"tv-config-toolbar-icons\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-text\" value=\"yes\">\n\n<$set name=\"tv-config-toolbar-class\" value=\"tc-btn-invisible\">\n\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]] -[[$:/core/ui/Buttons/more-tiddler-actions]]\" variable=\"listItem\">\n\n<$reveal type=\"match\" state=<<config-title>> text=\"hide\">\n\n<$transclude tiddler=<<listItem>> mode=\"inline\"/>\n\n</$reveal>\n\n</$list>\n\n</$set>\n\n</$set>\n\n</$set>\n\n</div>\n\n</$reveal>"
        },
        "$:/core/ui/Buttons/new-here": {
            "title": "$:/core/ui/Buttons/new-here",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/new-here-button}} {{$:/language/Buttons/NewHere/Caption}}",
            "description": "{{$:/language/Buttons/NewHere/Hint}}",
            "text": "\\define newHereButtonTags()\n[[$(currentTiddler)$]]\n\\end\n\\define newHereButton()\n<$button tooltip={{$:/language/Buttons/NewHere/Hint}} aria-label={{$:/language/Buttons/NewHere/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" tags=<<newHereButtonTags>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-here-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewHere/Caption}}/></span>\n</$list>\n</$button>\n\\end\n<<newHereButton>>"
        },
        "$:/core/ui/Buttons/new-journal-here": {
            "title": "$:/core/ui/Buttons/new-journal-here",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/new-journal-button}} {{$:/language/Buttons/NewJournalHere/Caption}}",
            "description": "{{$:/language/Buttons/NewJournalHere/Hint}}",
            "text": "\\define journalButtonTags()\n[[$(currentTiddlerTag)$]] $(journalTags)$\n\\end\n\\define journalButton()\n<$button tooltip={{$:/language/Buttons/NewJournalHere/Hint}} aria-label={{$:/language/Buttons/NewJournalHere/Caption}} class=<<tv-config-toolbar-class>>>\n<$action-sendmessage $message=\"tm-new-tiddler\" title=<<now \"$(journalTitleTemplate)$\">> tags=<<journalButtonTags>>/>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/new-journal-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/NewJournalHere/Caption}}/></span>\n</$list>\n</$button>\n\\end\n<$set name=\"journalTitleTemplate\" value={{$:/config/NewJournal/Title}}>\n<$set name=\"journalTags\" value={{$:/config/NewJournal/Tags}}>\n<$set name=\"currentTiddlerTag\" value=<<currentTiddler>>>\n<<journalButton>>\n</$set></$set></$set>"
        },
        "$:/core/ui/Buttons/open-window": {
            "title": "$:/core/ui/Buttons/open-window",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/open-window}} {{$:/language/Buttons/OpenWindow/Caption}}",
            "description": "{{$:/language/Buttons/OpenWindow/Hint}}",
            "text": "<$button message=\"tm-open-window\" tooltip={{$:/language/Buttons/OpenWindow/Hint}} aria-label={{$:/language/Buttons/OpenWindow/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/open-window}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/OpenWindow/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/permalink": {
            "title": "$:/core/ui/Buttons/permalink",
            "tags": "$:/tags/ViewToolbar",
            "caption": "{{$:/core/images/permalink-button}} {{$:/language/Buttons/Permalink/Caption}}",
            "description": "{{$:/language/Buttons/Permalink/Hint}}",
            "text": "<$button message=\"tm-permalink\" tooltip={{$:/language/Buttons/Permalink/Hint}} aria-label={{$:/language/Buttons/Permalink/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/permalink-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Permalink/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/core/ui/Buttons/permaview": {
            "title": "$:/core/ui/Buttons/permaview",
            "tags": "$:/tags/ViewToolbar $:/tags/PageControls",
            "caption": "{{$:/core/images/permaview-button}} {{$:/language/Buttons/Permaview/Caption}}",
            "description": "{{$:/language/Buttons/Permaview/Hint}}",
            "text": "<$button message=\"tm-permaview\" tooltip={{$:/language/Buttons/Permaview/Hint}} aria-label={{$:/language/Buttons/Permaview/Caption}} class=<<tv-config-toolbar-class>>>\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/permaview-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$:/language/Buttons/Permaview/Caption}}/></span>\n</$list>\n</$button>"
        },
        "$:/temp/advancedsearch": {
            "title": "$:/temp/advancedsearch",
            "text": ""
        },
        "$:/snippets/allfields": {
            "title": "$:/snippets/allfields",
            "text": "\\define renderfield(title)\n<tr class=\"tc-view-field\"><td class=\"tc-view-field-name\">''$title$'':</td><td class=\"tc-view-field-value\">//{{$:/language/Docs/Fields/$title$}}//</td></tr>\n\\end\n<table class=\"tc-view-field-table\"><tbody><$list filter=\"[fields[]sort[title]]\" variable=\"listItem\"><$macrocall $name=\"renderfield\" title=<<listItem>>/></$list>\n</tbody></table>\n"
        },
        "$:/config/AnimationDuration": {
            "title": "$:/config/AnimationDuration",
            "text": "400"
        },
        "$:/config/AutoSave": {
            "title": "$:/config/AutoSave",
            "text": "yes"
        },
        "$:/config/BitmapEditor/Colour": {
            "title": "$:/config/BitmapEditor/Colour",
            "text": "#444"
        },
        "$:/config/BitmapEditor/ImageSizes": {
            "title": "$:/config/BitmapEditor/ImageSizes",
            "text": "[[62px 100px]] [[100px 62px]] [[124px 200px]] [[200px 124px]] [[248px 400px]] [[371px 600px]] [[400px 248px]] [[556px 900px]] [[600px 371px]] [[742px 1200px]] [[900px 556px]] [[1200px 742px]]"
        },
        "$:/config/BitmapEditor/LineWidth": {
            "title": "$:/config/BitmapEditor/LineWidth",
            "text": "3px"
        },
        "$:/config/BitmapEditor/LineWidths": {
            "title": "$:/config/BitmapEditor/LineWidths",
            "text": "0.25px 0.5px 1px 2px 3px 4px 6px 8px 10px 16px 20px 28px 40px 56px 80px"
        },
        "$:/config/BitmapEditor/Opacities": {
            "title": "$:/config/BitmapEditor/Opacities",
            "text": "0.01 0.025 0.05 0.075 0.1 0.15 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0"
        },
        "$:/config/BitmapEditor/Opacity": {
            "title": "$:/config/BitmapEditor/Opacity",
            "text": "1.0"
        },
        "$:/config/DefaultSidebarTab": {
            "title": "$:/config/DefaultSidebarTab",
            "text": "$:/core/ui/SideBar/Open"
        },
        "$:/config/Drafts/TypingTimeout": {
            "title": "$:/config/Drafts/TypingTimeout",
            "text": "400"
        },
        "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-4": {
            "title": "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-4",
            "text": "hide"
        },
        "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-5": {
            "title": "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-5",
            "text": "hide"
        },
        "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-6": {
            "title": "$:/config/EditorToolbarButtons/Visibility/$:/core/ui/EditorToolbar/heading-6",
            "text": "hide"
        },
        "$:/config/EditorTypeMappings/image/gif": {
            "title": "$:/config/EditorTypeMappings/image/gif",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/jpeg": {
            "title": "$:/config/EditorTypeMappings/image/jpeg",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/jpg": {
            "title": "$:/config/EditorTypeMappings/image/jpg",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/png": {
            "title": "$:/config/EditorTypeMappings/image/png",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/image/x-icon": {
            "title": "$:/config/EditorTypeMappings/image/x-icon",
            "text": "bitmap"
        },
        "$:/config/EditorTypeMappings/text/vnd.tiddlywiki": {
            "title": "$:/config/EditorTypeMappings/text/vnd.tiddlywiki",
            "text": "text"
        },
        "$:/config/EditTemplateFields/Visibility/title": {
            "title": "$:/config/EditTemplateFields/Visibility/title",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/tags": {
            "title": "$:/config/EditTemplateFields/Visibility/tags",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/text": {
            "title": "$:/config/EditTemplateFields/Visibility/text",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/creator": {
            "title": "$:/config/EditTemplateFields/Visibility/creator",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/created": {
            "title": "$:/config/EditTemplateFields/Visibility/created",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/modified": {
            "title": "$:/config/EditTemplateFields/Visibility/modified",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/modifier": {
            "title": "$:/config/EditTemplateFields/Visibility/modifier",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/type": {
            "title": "$:/config/EditTemplateFields/Visibility/type",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/draft.title": {
            "title": "$:/config/EditTemplateFields/Visibility/draft.title",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/draft.of": {
            "title": "$:/config/EditTemplateFields/Visibility/draft.of",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/revision": {
            "title": "$:/config/EditTemplateFields/Visibility/revision",
            "text": "hide"
        },
        "$:/config/EditTemplateFields/Visibility/bag": {
            "title": "$:/config/EditTemplateFields/Visibility/bag",
            "text": "hide"
        },
        "$:/config/MissingLinks": {
            "title": "$:/config/MissingLinks",
            "text": "yes"
        },
        "$:/config/Navigation/UpdateAddressBar": {
            "title": "$:/config/Navigation/UpdateAddressBar",
            "text": "no"
        },
        "$:/config/Navigation/UpdateHistory": {
            "title": "$:/config/Navigation/UpdateHistory",
            "text": "no"
        },
        "$:/config/OfficialPluginLibrary": {
            "title": "$:/config/OfficialPluginLibrary",
            "tags": "$:/tags/PluginLibrary",
            "url": "http://tiddlywiki.com/library/v5.1.13/index.html",
            "caption": "{{$:/language/OfficialPluginLibrary}}",
            "text": "{{$:/language/OfficialPluginLibrary/Hint}}\n"
        },
        "$:/config/Navigation/openLinkFromInsideRiver": {
            "title": "$:/config/Navigation/openLinkFromInsideRiver",
            "text": "below"
        },
        "$:/config/Navigation/openLinkFromOutsideRiver": {
            "title": "$:/config/Navigation/openLinkFromOutsideRiver",
            "text": "top"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/advanced-search": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/advanced-search",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/close-all": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/close-all",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/encryption": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/encryption",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/export-page": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/export-page",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/fold-all": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/fold-all",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/full-screen": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/full-screen",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/home",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/refresh": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/refresh",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/import": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/import",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/language": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/language",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/tag-manager": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/tag-manager",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/more-page-actions": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/more-page-actions",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-journal": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-journal",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-image": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/new-image",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/palette": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/palette",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/permaview": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/permaview",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/storyview": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/storyview",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/theme": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/theme",
            "text": "hide"
        },
        "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/unfold-all": {
            "title": "$:/config/PageControlButtons/Visibility/$:/core/ui/Buttons/unfold-all",
            "text": "hide"
        },
        "$:/config/Performance/Instrumentation": {
            "title": "$:/config/Performance/Instrumentation",
            "text": "no"
        },
        "$:/config/SaverFilter": {
            "title": "$:/config/SaverFilter",
            "text": "[all[]] -[[$:/HistoryList]] -[[$:/StoryList]] -[[$:/Import]] -[[$:/isEncrypted]] -[[$:/UploadName]] -[prefix[$:/state/]] -[prefix[$:/temp/]]"
        },
        "$:/config/SaveWikiButton/Template": {
            "title": "$:/config/SaveWikiButton/Template",
            "text": "$:/core/save/all"
        },
        "$:/config/Search/AutoFocus": {
            "title": "$:/config/Search/AutoFocus",
            "text": "true"
        },
        "$:/config/SearchResults/Default": {
            "title": "$:/config/SearchResults/Default",
            "text": "$:/core/ui/DefaultSearchResultList"
        },
        "$:/config/ShortcutInfo/bold": {
            "title": "$:/config/ShortcutInfo/bold",
            "text": "{{$:/language/Buttons/Bold/Hint}}"
        },
        "$:/config/ShortcutInfo/cancel-edit-tiddler": {
            "title": "$:/config/ShortcutInfo/cancel-edit-tiddler",
            "text": "{{$:/language/Buttons/Cancel/Hint}}"
        },
        "$:/config/ShortcutInfo/excise": {
            "title": "$:/config/ShortcutInfo/excise",
            "text": "{{$:/language/Buttons/Excise/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-1": {
            "title": "$:/config/ShortcutInfo/heading-1",
            "text": "{{$:/language/Buttons/Heading1/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-2": {
            "title": "$:/config/ShortcutInfo/heading-2",
            "text": "{{$:/language/Buttons/Heading2/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-3": {
            "title": "$:/config/ShortcutInfo/heading-3",
            "text": "{{$:/language/Buttons/Heading3/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-4": {
            "title": "$:/config/ShortcutInfo/heading-4",
            "text": "{{$:/language/Buttons/Heading4/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-5": {
            "title": "$:/config/ShortcutInfo/heading-5",
            "text": "{{$:/language/Buttons/Heading5/Hint}}"
        },
        "$:/config/ShortcutInfo/heading-6": {
            "title": "$:/config/ShortcutInfo/heading-6",
            "text": "{{$:/language/Buttons/Heading6/Hint}}"
        },
        "$:/config/ShortcutInfo/italic": {
            "title": "$:/config/ShortcutInfo/italic",
            "text": "{{$:/language/Buttons/Italic/Hint}}"
        },
        "$:/config/ShortcutInfo/link": {
            "title": "$:/config/ShortcutInfo/link",
            "text": "{{$:/language/Buttons/Link/Hint}}"
        },
        "$:/config/ShortcutInfo/list-bullet": {
            "title": "$:/config/ShortcutInfo/list-bullet",
            "text": "{{$:/language/Buttons/ListBullet/Hint}}"
        },
        "$:/config/ShortcutInfo/list-number": {
            "title": "$:/config/ShortcutInfo/list-number",
            "text": "{{$:/language/Buttons/ListNumber/Hint}}"
        },
        "$:/config/ShortcutInfo/mono-block": {
            "title": "$:/config/ShortcutInfo/mono-block",
            "text": "{{$:/language/Buttons/MonoBlock/Hint}}"
        },
        "$:/config/ShortcutInfo/mono-line": {
            "title": "$:/config/ShortcutInfo/mono-line",
            "text": "{{$:/language/Buttons/MonoLine/Hint}}"
        },
        "$:/config/ShortcutInfo/picture": {
            "title": "$:/config/ShortcutInfo/picture",
            "text": "{{$:/language/Buttons/Picture/Hint}}"
        },
        "$:/config/ShortcutInfo/preview": {
            "title": "$:/config/ShortcutInfo/preview",
            "text": "{{$:/language/Buttons/Preview/Hint}}"
        },
        "$:/config/ShortcutInfo/quote": {
            "title": "$:/config/ShortcutInfo/quote",
            "text": "{{$:/language/Buttons/Quote/Hint}}"
        },
        "$:/config/ShortcutInfo/save-tiddler": {
            "title": "$:/config/ShortcutInfo/save-tiddler",
            "text": "{{$:/language/Buttons/Save/Hint}}"
        },
        "$:/config/ShortcutInfo/stamp": {
            "title": "$:/config/ShortcutInfo/stamp",
            "text": "{{$:/language/Buttons/Stamp/Hint}}"
        },
        "$:/config/ShortcutInfo/strikethrough": {
            "title": "$:/config/ShortcutInfo/strikethrough",
            "text": "{{$:/language/Buttons/Strikethrough/Hint}}"
        },
        "$:/config/ShortcutInfo/subscript": {
            "title": "$:/config/ShortcutInfo/subscript",
            "text": "{{$:/language/Buttons/Subscript/Hint}}"
        },
        "$:/config/ShortcutInfo/superscript": {
            "title": "$:/config/ShortcutInfo/superscript",
            "text": "{{$:/language/Buttons/Superscript/Hint}}"
        },
        "$:/config/ShortcutInfo/underline": {
            "title": "$:/config/ShortcutInfo/underline",
            "text": "{{$:/language/Buttons/Underline/Hint}}"
        },
        "$:/config/shortcuts-mac/bold": {
            "title": "$:/config/shortcuts-mac/bold",
            "text": "meta-B"
        },
        "$:/config/shortcuts-mac/italic": {
            "title": "$:/config/shortcuts-mac/italic",
            "text": "meta-I"
        },
        "$:/config/shortcuts-mac/underline": {
            "title": "$:/config/shortcuts-mac/underline",
            "text": "meta-U"
        },
        "$:/config/shortcuts-not-mac/bold": {
            "title": "$:/config/shortcuts-not-mac/bold",
            "text": "ctrl-B"
        },
        "$:/config/shortcuts-not-mac/italic": {
            "title": "$:/config/shortcuts-not-mac/italic",
            "text": "ctrl-I"
        },
        "$:/config/shortcuts-not-mac/underline": {
            "title": "$:/config/shortcuts-not-mac/underline",
            "text": "ctrl-U"
        },
        "$:/config/shortcuts/cancel-edit-tiddler": {
            "title": "$:/config/shortcuts/cancel-edit-tiddler",
            "text": "escape"
        },
        "$:/config/shortcuts/excise": {
            "title": "$:/config/shortcuts/excise",
            "text": "ctrl-E"
        },
        "$:/config/shortcuts/heading-1": {
            "title": "$:/config/shortcuts/heading-1",
            "text": "ctrl-1"
        },
        "$:/config/shortcuts/heading-2": {
            "title": "$:/config/shortcuts/heading-2",
            "text": "ctrl-2"
        },
        "$:/config/shortcuts/heading-3": {
            "title": "$:/config/shortcuts/heading-3",
            "text": "ctrl-3"
        },
        "$:/config/shortcuts/heading-4": {
            "title": "$:/config/shortcuts/heading-4",
            "text": "ctrl-4"
        },
        "$:/config/shortcuts/heading-5": {
            "title": "$:/config/shortcuts/heading-5",
            "text": "ctrl-5"
        },
        "$:/config/shortcuts/heading-6": {
            "title": "$:/config/shortcuts/heading-6",
            "text": "ctrl-6"
        },
        "$:/config/shortcuts/link": {
            "title": "$:/config/shortcuts/link",
            "text": "ctrl-L"
        },
        "$:/config/shortcuts/list-bullet": {
            "title": "$:/config/shortcuts/list-bullet",
            "text": "ctrl-shift-L"
        },
        "$:/config/shortcuts/list-number": {
            "title": "$:/config/shortcuts/list-number",
            "text": "ctrl-shift-N"
        },
        "$:/config/shortcuts/mono-block": {
            "title": "$:/config/shortcuts/mono-block",
            "text": "ctrl-shift-M"
        },
        "$:/config/shortcuts/mono-line": {
            "title": "$:/config/shortcuts/mono-line",
            "text": "ctrl-M"
        },
        "$:/config/shortcuts/picture": {
            "title": "$:/config/shortcuts/picture",
            "text": "ctrl-shift-I"
        },
        "$:/config/shortcuts/preview": {
            "title": "$:/config/shortcuts/preview",
            "text": "alt-P"
        },
        "$:/config/shortcuts/quote": {
            "title": "$:/config/shortcuts/quote",
            "text": "ctrl-Q"
        },
        "$:/config/shortcuts/save-tiddler": {
            "title": "$:/config/shortcuts/save-tiddler",
            "text": "ctrl+enter"
        },
        "$:/config/shortcuts/stamp": {
            "title": "$:/config/shortcuts/stamp",
            "text": "ctrl-S"
        },
        "$:/config/shortcuts/strikethrough": {
            "title": "$:/config/shortcuts/strikethrough",
            "text": "ctrl-T"
        },
        "$:/config/shortcuts/subscript": {
            "title": "$:/config/shortcuts/subscript",
            "text": "ctrl-shift-B"
        },
        "$:/config/shortcuts/superscript": {
            "title": "$:/config/shortcuts/superscript",
            "text": "ctrl-shift-P"
        },
        "$:/config/SyncFilter": {
            "title": "$:/config/SyncFilter",
            "text": "[is[tiddler]] -[[$:/HistoryList]] -[[$:/Import]] -[[$:/isEncrypted]] -[prefix[$:/status/]] -[prefix[$:/state/]] -[prefix[$:/temp/]]"
        },
        "$:/config/TextEditor/EditorHeight/Height": {
            "title": "$:/config/TextEditor/EditorHeight/Height",
            "text": "400px"
        },
        "$:/config/TextEditor/EditorHeight/Mode": {
            "title": "$:/config/TextEditor/EditorHeight/Mode",
            "text": "auto"
        },
        "$:/config/TiddlerInfo/Default": {
            "title": "$:/config/TiddlerInfo/Default",
            "text": "$:/core/ui/TiddlerInfo/Fields"
        },
        "$:/config/Tiddlers/TitleLinks": {
            "title": "$:/config/Tiddlers/TitleLinks",
            "text": "no"
        },
        "$:/config/Toolbar/ButtonClass": {
            "title": "$:/config/Toolbar/ButtonClass",
            "text": "tc-btn-invisible"
        },
        "$:/config/Toolbar/Icons": {
            "title": "$:/config/Toolbar/Icons",
            "text": "yes"
        },
        "$:/config/Toolbar/Text": {
            "title": "$:/config/Toolbar/Text",
            "text": "no"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/clone": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/clone",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/close-others": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/close-others",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/export-tiddler": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/export-tiddler",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/info": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/info",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/more-tiddler-actions",
            "text": "show"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-here": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-here",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-journal-here": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/new-journal-here",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/open-window": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/open-window",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permalink": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permalink",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permaview": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/permaview",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/delete": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/delete",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-bar": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-bar",
            "text": "hide"
        },
        "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-others": {
            "title": "$:/config/ViewToolbarButtons/Visibility/$:/core/ui/Buttons/fold-others",
            "text": "hide"
        },
        "$:/config/WikiParserRules/Inline/wikilink": {
            "title": "$:/config/WikiParserRules/Inline/wikilink",
            "text": "enable"
        },
        "$:/snippets/currpalettepreview": {
            "title": "$:/snippets/currpalettepreview",
            "text": "\\define swatchStyle()\nbackground-color: $(swatchColour)$;\n\\end\n\\define swatch(colour)\n<$set name=\"swatchColour\" value={{##$colour$}}>\n<div class=\"tc-swatch\" style=<<swatchStyle>>/>\n</$set>\n\\end\n<div class=\"tc-swatches-horiz\">\n<<swatch foreground>>\n<<swatch background>>\n<<swatch muted-foreground>>\n<<swatch primary>>\n<<swatch page-background>>\n<<swatch tab-background>>\n<<swatch tiddler-info-background>>\n</div>\n"
        },
        "$:/DefaultTiddlers": {
            "title": "$:/DefaultTiddlers",
            "text": "GettingStarted\n"
        },
        "$:/snippets/download-wiki-button": {
            "title": "$:/snippets/download-wiki-button",
            "text": "\\define lingo-base() $:/language/ControlPanel/Tools/Download/\n<$button class=\"tc-btn-big-green\">\n<$action-sendmessage $message=\"tm-download-file\" $param=\"$:/core/save/all\" filename=\"index.html\"/>\n<<lingo Full/Caption>> {{$:/core/images/save-button}}\n</$button>"
        },
        "$:/language": {
            "title": "$:/language",
            "text": "$:/languages/en-GB"
        },
        "$:/snippets/languageswitcher": {
            "title": "$:/snippets/languageswitcher",
            "text": "{{$:/language/ControlPanel/Basics/Language/Prompt}} <$select tiddler=\"$:/language\">\n<$list filter=\"[[$:/languages/en-GB]] [plugin-type[language]sort[description]]\">\n<option value=<<currentTiddler>>><$view field=\"description\"><$view field=\"name\"><$view field=\"title\"/></$view></$view></option>\n</$list>\n</$select>"
        },
        "$:/core/macros/colour-picker": {
            "title": "$:/core/macros/colour-picker",
            "tags": "$:/tags/Macro",
            "text": "\\define colour-picker-update-recent()\n<$action-listops\n\t$tiddler=\"$:/config/ColourPicker/Recent\"\n\t$subfilter=\"$(colour-picker-value)$ [list[$:/config/ColourPicker/Recent]remove[$(colour-picker-value)$]] +[limit[8]]\"\n/>\n\\end\n\n\\define colour-picker-inner(actions)\n<$button tag=\"a\" tooltip=\"\"\"$(colour-picker-value)$\"\"\">\n\n$(colour-picker-update-recent)$\n\n$actions$\n\n<div style=\"background-color: $(colour-picker-value)$; width: 100%; height: 100%; border-radius: 50%;\"/>\n\n</$button>\n\\end\n\n\\define colour-picker-recent-inner(actions)\n<$set name=\"colour-picker-value\" value=\"$(recentColour)$\">\n<$macrocall $name=\"colour-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n</$set>\n\\end\n\n\\define colour-picker-recent(actions)\n{{$:/language/ColourPicker/Recent}} <$list filter=\"[list[$:/config/ColourPicker/Recent]]\" variable=\"recentColour\">\n<$macrocall $name=\"colour-picker-recent-inner\" actions=\"\"\"$actions$\"\"\"/></$list>\n\\end\n\n\\define colour-picker(actions)\n<div class=\"tc-colour-chooser\">\n\n<$macrocall $name=\"colour-picker-recent\" actions=\"\"\"$actions$\"\"\"/>\n\n---\n\n<$list filter=\"LightPink Pink Crimson LavenderBlush PaleVioletRed HotPink DeepPink MediumVioletRed Orchid Thistle Plum Violet Magenta Fuchsia DarkMagenta Purple MediumOrchid DarkViolet DarkOrchid Indigo BlueViolet MediumPurple MediumSlateBlue SlateBlue DarkSlateBlue Lavender GhostWhite Blue MediumBlue MidnightBlue DarkBlue Navy RoyalBlue CornflowerBlue LightSteelBlue LightSlateGrey SlateGrey DodgerBlue AliceBlue SteelBlue LightSkyBlue SkyBlue DeepSkyBlue LightBlue PowderBlue CadetBlue Azure LightCyan PaleTurquoise Cyan Aqua DarkTurquoise DarkSlateGrey DarkCyan Teal MediumTurquoise LightSeaGreen Turquoise Aquamarine MediumAquamarine MediumSpringGreen MintCream SpringGreen MediumSeaGreen SeaGreen Honeydew LightGreen PaleGreen DarkSeaGreen LimeGreen Lime ForestGreen Green DarkGreen Chartreuse LawnGreen GreenYellow DarkOliveGreen YellowGreen OliveDrab Beige LightGoldenrodYellow Ivory LightYellow Yellow Olive DarkKhaki LemonChiffon PaleGoldenrod Khaki Gold Cornsilk Goldenrod DarkGoldenrod FloralWhite OldLace Wheat Moccasin Orange PapayaWhip BlanchedAlmond NavajoWhite AntiqueWhite Tan BurlyWood Bisque DarkOrange Linen Peru PeachPuff SandyBrown Chocolate SaddleBrown Seashell Sienna LightSalmon Coral OrangeRed DarkSalmon Tomato MistyRose Salmon Snow LightCoral RosyBrown IndianRed Red Brown FireBrick DarkRed Maroon White WhiteSmoke Gainsboro LightGrey Silver DarkGrey Grey DimGrey Black\" variable=\"colour-picker-value\">\n<$macrocall $name=\"colour-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n</$list>\n\n---\n\n<$edit-text tiddler=\"$:/config/ColourPicker/New\" tag=\"input\" default=\"\" placeholder=\"\"/> \n<$edit-text tiddler=\"$:/config/ColourPicker/New\" type=\"color\" tag=\"input\"/>\n<$set name=\"colour-picker-value\" value={{$:/config/ColourPicker/New}}>\n<$macrocall $name=\"colour-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n</$set>\n\n</div>\n\n\\end\n"
        },
        "$:/core/macros/CSS": {
            "title": "$:/core/macros/CSS",
            "tags": "$:/tags/Macro",
            "text": "\\define colour(name)\n<$transclude tiddler={{$:/palette}} index=\"$name$\"><$transclude tiddler=\"$:/palettes/Vanilla\" index=\"$name$\"/></$transclude>\n\\end\n\n\\define color(name)\n<<colour $name$>>\n\\end\n\n\\define box-shadow(shadow)\n``\n  -webkit-box-shadow: $shadow$;\n     -moz-box-shadow: $shadow$;\n          box-shadow: $shadow$;\n``\n\\end\n\n\\define filter(filter)\n``\n  -webkit-filter: $filter$;\n     -moz-filter: $filter$;\n          filter: $filter$;\n``\n\\end\n\n\\define transition(transition)\n``\n  -webkit-transition: $transition$;\n     -moz-transition: $transition$;\n          transition: $transition$;\n``\n\\end\n\n\\define transform-origin(origin)\n``\n  -webkit-transform-origin: $origin$;\n     -moz-transform-origin: $origin$;\n          transform-origin: $origin$;\n``\n\\end\n\n\\define background-linear-gradient(gradient)\n``\nbackground-image: linear-gradient($gradient$);\nbackground-image: -o-linear-gradient($gradient$);\nbackground-image: -moz-linear-gradient($gradient$);\nbackground-image: -webkit-linear-gradient($gradient$);\nbackground-image: -ms-linear-gradient($gradient$);\n``\n\\end\n\n\\define datauri(title)\n<$macrocall $name=\"makedatauri\" type={{$title$!!type}} text={{$title$}}/>\n\\end\n\n\\define if-sidebar(text)\n<$reveal state=\"$:/state/sidebar\" type=\"match\" text=\"yes\" default=\"yes\">$text$</$reveal>\n\\end\n\n\\define if-no-sidebar(text)\n<$reveal state=\"$:/state/sidebar\" type=\"nomatch\" text=\"yes\" default=\"yes\">$text$</$reveal>\n\\end\n"
        },
        "$:/core/macros/export": {
            "title": "$:/core/macros/export",
            "tags": "$:/tags/Macro",
            "text": "\\define exportButtonFilename(baseFilename)\n$baseFilename$$(extension)$\n\\end\n\n\\define exportButton(exportFilter:\"[!is[system]sort[title]]\",lingoBase,baseFilename:\"tiddlers\")\n<span class=\"tc-popup-keep\">\n<$button popup=<<qualify \"$:/state/popup/export\">> tooltip={{$lingoBase$Hint}} aria-label={{$lingoBase$Caption}} class=<<tv-config-toolbar-class>> selectedClass=\"tc-selected\">\n<$list filter=\"[<tv-config-toolbar-icons>prefix[yes]]\">\n{{$:/core/images/export-button}}\n</$list>\n<$list filter=\"[<tv-config-toolbar-text>prefix[yes]]\">\n<span class=\"tc-btn-text\"><$text text={{$lingoBase$Caption}}/></span>\n</$list>\n</$button>\n</span>\n<$reveal state=<<qualify \"$:/state/popup/export\">> type=\"popup\" position=\"below\" animate=\"yes\">\n<div class=\"tc-drop-down\">\n<$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Exporter]]\">\n<$set name=\"extension\" value={{!!extension}}>\n<$button class=\"tc-btn-invisible\">\n<$action-sendmessage $message=\"tm-download-file\" $param=<<currentTiddler>> exportFilter=\"\"\"$exportFilter$\"\"\" filename=<<exportButtonFilename \"\"\"$baseFilename$\"\"\">>/>\n<$action-deletetiddler $tiddler=<<qualify \"$:/state/popup/export\">>/>\n<$transclude field=\"description\"/>\n</$button>\n</$set>\n</$list>\n</div>\n</$reveal>\n\\end\n"
        },
        "$:/core/macros/image-picker": {
            "title": "$:/core/macros/image-picker",
            "tags": "$:/tags/Macro",
            "text": "\\define image-picker-inner(actions)\n<$button tag=\"a\" tooltip=\"\"\"$(imageTitle)$\"\"\">\n\n$actions$\n\n<$transclude tiddler=<<imageTitle>>/>\n\n</$button>\n\\end\n\n\\define image-picker(actions,subfilter:\"\")\n<div class=\"tc-image-chooser\">\n\n<$list filter=\"[all[shadows+tiddlers]is[image]$subfilter$!has[draft.of]] -[type[application/pdf]] +[sort[title]]\" variable=\"imageTitle\">\n\n<$macrocall $name=\"image-picker-inner\" actions=\"\"\"$actions$\"\"\"/>\n\n</$list>\n\n</div>\n\n\\end\n\n"
        },
        "$:/core/macros/lingo": {
            "title": "$:/core/macros/lingo",
            "tags": "$:/tags/Macro",
            "text": "\\define lingo-base()\n$:/language/\n\\end\n\n\\define lingo(title)\n{{$(lingo-base)$$title$}}\n\\end\n"
        },
        "$:/core/macros/list": {
            "title": "$:/core/macros/list",
            "tags": "$:/tags/Macro",
            "text": "\\define list-links(filter,type:\"ul\",subtype:\"li\",class:\"\")\n<$type$ class=\"$class$\">\n<$list filter=\"$filter$\">\n<$subtype$>\n<$link to={{!!title}}>\n<$transclude field=\"caption\">\n<$view field=\"title\"/>\n</$transclude>\n</$link>\n</$subtype$>\n</$list>\n</$type$>\n\\end\n"
        },
        "$:/core/macros/tabs": {
            "title": "$:/core/macros/tabs",
            "tags": "$:/tags/Macro",
            "text": "\\define tabs(tabsList,default,state:\"$:/state/tab\",class,template)\n<div class=\"tc-tab-set $class$\">\n<div class=\"tc-tab-buttons $class$\">\n<$list filter=\"$tabsList$\" variable=\"currentTab\"><$set name=\"save-currentTiddler\" value=<<currentTiddler>>><$tiddler tiddler=<<currentTab>>><$button set=<<qualify \"$state$\">> setTo=<<currentTab>> default=\"$default$\" selectedClass=\"tc-tab-selected\" tooltip={{!!tooltip}}>\n<$tiddler tiddler=<<save-currentTiddler>>>\n<$set name=\"tv-wikilinks\" value=\"no\">\n<$transclude tiddler=<<currentTab>> field=\"caption\">\n<$macrocall $name=\"currentTab\" $type=\"text/plain\" $output=\"text/plain\"/>\n</$transclude>\n</$set></$tiddler></$button></$tiddler></$set></$list>\n</div>\n<div class=\"tc-tab-divider $class$\"/>\n<div class=\"tc-tab-content $class$\">\n<$list filter=\"$tabsList$\" variable=\"currentTab\">\n\n<$reveal type=\"match\" state=<<qualify \"$state$\">> text=<<currentTab>> default=\"$default$\">\n\n<$transclude tiddler=\"$template$\" mode=\"block\">\n\n<$transclude tiddler=<<currentTab>> mode=\"block\"/>\n\n</$transclude>\n\n</$reveal>\n\n</$list>\n</div>\n</div>\n\\end\n"
        },
        "$:/core/macros/tag": {
            "title": "$:/core/macros/tag",
            "tags": "$:/tags/Macro",
            "text": "\\define tag(tag)\n{{$tag$||$:/core/ui/TagTemplate}}\n\\end\n"
        },
        "$:/core/macros/thumbnails": {
            "title": "$:/core/macros/thumbnails",
            "tags": "$:/tags/Macro",
            "text": "\\define thumbnail(link,icon,color,background-color,image,caption,width:\"280\",height:\"157\")\n<$link to=\"\"\"$link$\"\"\"><div class=\"tc-thumbnail-wrapper\">\n<div class=\"tc-thumbnail-image\" style=\"width:$width$px;height:$height$px;\"><$reveal type=\"nomatch\" text=\"\" default=\"\"\"$image$\"\"\" tag=\"div\" style=\"width:$width$px;height:$height$px;\">\n[img[$image$]]\n</$reveal><$reveal type=\"match\" text=\"\" default=\"\"\"$image$\"\"\" tag=\"div\" class=\"tc-thumbnail-background\" style=\"width:$width$px;height:$height$px;background-color:$background-color$;\"></$reveal></div><div class=\"tc-thumbnail-icon\" style=\"fill:$color$;color:$color$;\">\n$icon$\n</div><div class=\"tc-thumbnail-caption\">\n$caption$\n</div>\n</div></$link>\n\\end\n\n\\define thumbnail-right(link,icon,color,background-color,image,caption,width:\"280\",height:\"157\")\n<div class=\"tc-thumbnail-right-wrapper\"><<thumbnail \"\"\"$link$\"\"\" \"\"\"$icon$\"\"\" \"\"\"$color$\"\"\" \"\"\"$background-color$\"\"\" \"\"\"$image$\"\"\" \"\"\"$caption$\"\"\" \"\"\"$width$\"\"\" \"\"\"$height$\"\"\">></div>\n\\end\n\n\\define list-thumbnails(filter,width:\"280\",height:\"157\")\n<$list filter=\"\"\"$filter$\"\"\"><$macrocall $name=\"thumbnail\" link={{!!link}} icon={{!!icon}} color={{!!color}} background-color={{!!background-color}} image={{!!image}} caption={{!!caption}} width=\"\"\"$width$\"\"\" height=\"\"\"$height$\"\"\"/></$list>\n\\end\n"
        },
        "$:/core/macros/timeline": {
            "created": "20141212105914482",
            "modified": "20141212110330815",
            "tags": "$:/tags/Macro",
            "title": "$:/core/macros/timeline",
            "type": "text/vnd.tiddlywiki",
            "text": "\\define timeline-title()\n<!-- Override this macro with a global macro \n     of the same name if you need to change \n     how titles are displayed on the timeline \n     -->\n<$view field=\"title\"/>\n\\end\n\\define timeline(limit:\"100\",format:\"DDth MMM YYYY\",subfilter:\"\",dateField:\"modified\")\n<div class=\"tc-timeline\">\n<$list filter=\"[!is[system]$subfilter$has[$dateField$]!sort[$dateField$]limit[$limit$]eachday[$dateField$]]\">\n<div class=\"tc-menu-list-item\">\n<$view field=\"$dateField$\" format=\"date\" template=\"$format$\"/>\n<$list filter=\"[sameday:$dateField${!!$dateField$}!is[system]$subfilter$!sort[$dateField$]]\">\n<div class=\"tc-menu-list-subitem\">\n<$link to={{!!title}}>\n<<timeline-title>>\n</$link>\n</div>\n</$list>\n</div>\n</$list>\n</div>\n\\end\n"
        },
        "$:/core/macros/toc": {
            "title": "$:/core/macros/toc",
            "tags": "$:/tags/Macro",
            "text": "\\define toc-caption()\n<$set name=\"tv-wikilinks\" value=\"no\">\n<$transclude field=\"caption\">\n<$view field=\"title\"/>\n</$transclude>\n</$set>\n\\end\n\n\\define toc-body(rootTag,tag,sort:\"\",itemClassFilter)\n<ol class=\"tc-toc\">\n<$list filter=\"\"\"[all[shadows+tiddlers]tag[$tag$]!has[draft.of]$sort$]\"\"\">\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$list filter=\"[all[current]toc-link[no]]\" emptyMessage=\"<$link><$view field='caption'><$view field='title'/></$view></$link>\">\n<<toc-caption>>\n</$list>\n<$list filter=\"\"\"[all[current]] -[[$rootTag$]]\"\"\">\n<$macrocall $name=\"toc-body\" rootTag=\"\"\"$rootTag$\"\"\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$list>\n</li>\n</$set>\n</$list>\n</ol>\n\\end\n\n\\define toc(tag,sort:\"\",itemClassFilter)\n<<toc-body rootTag:\"\"\"$tag$\"\"\" tag:\"\"\"$tag$\"\"\" sort:\"\"\"$sort$\"\"\" itemClassFilter:\"\"\"itemClassFilter\"\"\">>\n\\end\n\n\\define toc-linked-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$link>\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n<<toc-caption>>\n</$link>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"toc-expandable\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-unlinked-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"toc-expandable\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-expandable-empty-message()\n<<toc-linked-expandable-body tag:\"\"\"$(tag)$\"\"\" sort:\"\"\"$(sort)$\"\"\" itemClassFilter:\"\"\"$(itemClassFilter)$\"\"\">>\n\\end\n\n\\define toc-expandable(tag,sort:\"\",itemClassFilter)\n<$vars tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\">\n<ol class=\"tc-toc toc-expandable\">\n<$list filter=\"[all[shadows+tiddlers]tag[$tag$]!has[draft.of]$sort$]\">\n<$list filter=\"[all[current]toc-link[no]]\" emptyMessage=<<toc-expandable-empty-message>>>\n<<toc-unlinked-expandable-body tag:\"\"\"$tag$\"\"\" sort:\"\"\"$sort$\"\"\" itemClassFilter:\"\"\"itemClassFilter\"\"\">>\n</$list>\n</$list>\n</ol>\n</$vars>\n\\end\n\n\\define toc-linked-selective-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$link>\n<$list filter=\"[all[current]tagging[]limit[1]]\" variable=\"ignore\" emptyMessage=\"<$button class='tc-btn-invisible'>{{$:/core/images/blank}}</$button>\">\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n</$button>\n</$reveal>\n</$list>\n<<toc-caption>>\n</$link>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"toc-selective-expandable\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-unlinked-selective-expandable-body(tag,sort:\"\",itemClassFilter)\n<$set name=\"toc-state\" value=<<qualify \"\"\"$:/state/toc/$tag$-$(currentTiddler)$\"\"\">>>\n<$set name=\"toc-item-class\" filter=\"\"\"$itemClassFilter$\"\"\" value=\"toc-item-selected\" emptyValue=\"toc-item\">\n<li class=<<toc-item-class>>>\n<$list filter=\"[all[current]tagging[]limit[1]]\" variable=\"ignore\" emptyMessage=\"<$button class='tc-btn-invisible'>{{$:/core/images/blank}}</$button> <$view field='caption'><$view field='title'/></$view>\">\n<$reveal type=\"nomatch\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"open\" class=\"tc-btn-invisible\">\n{{$:/core/images/right-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$button set=<<toc-state>> setTo=\"close\" class=\"tc-btn-invisible\">\n{{$:/core/images/down-arrow}}\n<<toc-caption>>\n</$button>\n</$reveal>\n</$list>\n<$reveal type=\"match\" state=<<toc-state>> text=\"open\">\n<$macrocall $name=\"\"\"toc-selective-expandable\"\"\" tag=<<currentTiddler>> sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\"/>\n</$reveal>\n</li>\n</$set>\n</$set>\n\\end\n\n\\define toc-selective-expandable-empty-message()\n<<toc-linked-selective-expandable-body tag:\"\"\"$(tag)$\"\"\" sort:\"\"\"$(sort)$\"\"\" itemClassFilter:\"\"\"$(itemClassFilter)$\"\"\">>\n\\end\n\n\\define toc-selective-expandable(tag,sort:\"\",itemClassFilter)\n<$vars tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" itemClassFilter=\"\"\"$itemClassFilter$\"\"\">\n<ol class=\"tc-toc toc-selective-expandable\">\n<$list filter=\"[all[shadows+tiddlers]tag[$tag$]!has[draft.of]$sort$]\">\n<$list filter=\"[all[current]toc-link[no]]\" variable=\"ignore\" emptyMessage=<<toc-selective-expandable-empty-message>>>\n<<toc-unlinked-selective-expandable-body tag:\"\"\"$tag$\"\"\" sort:\"\"\"$sort$\"\"\" itemClassFilter:\"\"\"$itemClassFilter$\"\"\">>\n</$list>\n</$list>\n</ol>\n</$vars>\n\\end\n\n\\define toc-tabbed-selected-item-filter(selectedTiddler)\n[all[current]field:title{$selectedTiddler$}]\n\\end\n\n\\define toc-tabbed-external-nav(tag,sort:\"\",selectedTiddler:\"$:/temp/toc/selectedTiddler\",unselectedText,missingText,template:\"\")\n<$tiddler tiddler={{$selectedTiddler$}}>\n<div class=\"tc-tabbed-table-of-contents\">\n<$linkcatcher to=\"$selectedTiddler$\">\n<div class=\"tc-table-of-contents\">\n<$macrocall $name=\"toc-selective-expandable\" tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" itemClassFilter=<<toc-tabbed-selected-item-filter selectedTiddler:\"\"\"$selectedTiddler$\"\"\">>/>\n</div>\n</$linkcatcher>\n<div class=\"tc-tabbed-table-of-contents-content\">\n<$reveal state=\"\"\"$selectedTiddler$\"\"\" type=\"nomatch\" text=\"\">\n<$transclude mode=\"block\" tiddler=\"$template$\">\n<h1><<toc-caption>></h1>\n<$transclude mode=\"block\">$missingText$</$transclude>\n</$transclude>\n</$reveal>\n<$reveal state=\"\"\"$selectedTiddler$\"\"\" type=\"match\" text=\"\">\n$unselectedText$\n</$reveal>\n</div>\n</div>\n</$tiddler>\n\\end\n\n\\define toc-tabbed-internal-nav(tag,sort:\"\",selectedTiddler:\"$:/temp/toc/selectedTiddler\",unselectedText,missingText,template:\"\")\n<$linkcatcher to=\"\"\"$selectedTiddler$\"\"\">\n<$macrocall $name=\"toc-tabbed-external-nav\" tag=\"\"\"$tag$\"\"\" sort=\"\"\"$sort$\"\"\" selectedTiddler=\"\"\"$selectedTiddler$\"\"\" unselectedText=\"\"\"$unselectedText$\"\"\" missingText=\"\"\"$missingText$\"\"\" template=\"\"\"$template$\"\"\"/>\n</$linkcatcher>\n\\end\n\n"
        },
        "$:/core/macros/translink": {
            "title": "$:/core/macros/translink",
            "tags": "$:/tags/Macro",
            "text": "\\define translink(title,mode:\"block\")\n<div style=\"border:1px solid #ccc; padding: 0.5em; background: black; foreground; white;\">\n<$link to=\"\"\"$title$\"\"\">\n<$text text=\"\"\"$title$\"\"\"/>\n</$link>\n<div style=\"border:1px solid #ccc; padding: 0.5em; background: white; foreground; black;\">\n<$transclude tiddler=\"\"\"$title$\"\"\" mode=\"$mode$\">\n\"<$text text=\"\"\"$title$\"\"\"/>\" is missing\n</$transclude>\n</div>\n</div>\n\\end\n"
        },
        "$:/snippets/minilanguageswitcher": {
            "title": "$:/snippets/minilanguageswitcher",
            "text": "<$select tiddler=\"$:/language\">\n<$list filter=\"[[$:/languages/en-GB]] [plugin-type[language]sort[title]]\">\n<option value=<<currentTiddler>>><$view field=\"description\"><$view field=\"name\"><$view field=\"title\"/></$view></$view></option>\n</$list>\n</$select>"
        },
        "$:/snippets/minithemeswitcher": {
            "title": "$:/snippets/minithemeswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/Theme/\n<<lingo Prompt>> <$select tiddler=\"$:/theme\">\n<$list filter=\"[plugin-type[theme]sort[title]]\">\n<option value=<<currentTiddler>>><$view field=\"name\"><$view field=\"title\"/></$view></option>\n</$list>\n</$select>"
        },
        "$:/snippets/modules": {
            "title": "$:/snippets/modules",
            "text": "\\define describeModuleType(type)\n{{$:/language/Docs/ModuleTypes/$type$}}\n\\end\n<$list filter=\"[moduletypes[]]\">\n\n!! <$macrocall $name=\"currentTiddler\" $type=\"text/plain\" $output=\"text/plain\"/>\n\n<$macrocall $name=\"describeModuleType\" type=<<currentTiddler>>/>\n\n<ul><$list filter=\"[all[current]modules[]]\"><li><$link><<currentTiddler>></$link>\n</li>\n</$list>\n</ul>\n</$list>\n"
        },
        "$:/palette": {
            "title": "$:/palette",
            "text": "$:/palettes/Vanilla"
        },
        "$:/snippets/paletteeditor": {
            "title": "$:/snippets/paletteeditor",
            "text": "\\define lingo-base() $:/language/ControlPanel/Palette/Editor/\n\\define describePaletteColour(colour)\n<$transclude tiddler=\"$:/language/Docs/PaletteColours/$colour$\"><$text text=\"$colour$\"/></$transclude>\n\\end\n<$set name=\"currentTiddler\" value={{$:/palette}}>\n\n<<lingo Prompt>> <$link to={{$:/palette}}><$macrocall $name=\"currentTiddler\" $output=\"text/plain\"/></$link>\n\n<$list filter=\"[all[current]is[shadow]is[tiddler]]\" variable=\"listItem\">\n<<lingo Prompt/Modified>>\n<$button message=\"tm-delete-tiddler\" param={{$:/palette}}><<lingo Reset/Caption>></$button>\n</$list>\n\n<$list filter=\"[all[current]is[shadow]!is[tiddler]]\" variable=\"listItem\">\n<<lingo Clone/Prompt>>\n</$list>\n\n<$button message=\"tm-new-tiddler\" param={{$:/palette}}><<lingo Clone/Caption>></$button>\n\n<table>\n<tbody>\n<$list filter=\"[all[current]indexes[]]\" variable=\"colourName\">\n<tr>\n<td>\n''<$macrocall $name=\"describePaletteColour\" colour=<<colourName>>/>''<br/>\n<$macrocall $name=\"colourName\" $output=\"text/plain\"/>\n</td>\n<td>\n<$edit-text index=<<colourName>> tag=\"input\"/>\n<br>\n<$edit-text index=<<colourName>> type=\"color\" tag=\"input\"/>\n</td>\n</tr>\n</$list>\n</tbody>\n</table>\n</$set>\n"
        },
        "$:/snippets/palettepreview": {
            "title": "$:/snippets/palettepreview",
            "text": "<$set name=\"currentTiddler\" value={{$:/palette}}>\n<$transclude tiddler=\"$:/snippets/currpalettepreview\"/>\n</$set>\n"
        },
        "$:/snippets/paletteswitcher": {
            "title": "$:/snippets/paletteswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/Palette/\n<div class=\"tc-prompt\">\n<<lingo Prompt>> <$view tiddler={{$:/palette}} field=\"name\"/>\n</div>\n\n<$linkcatcher to=\"$:/palette\">\n<div class=\"tc-chooser\"><$list filter=\"[all[shadows+tiddlers]tag[$:/tags/Palette]sort[description]]\"><div class=\"tc-chooser-item\"><$link to={{!!title}}><div><$reveal state=\"$:/palette\" type=\"match\" text={{!!title}}>&bull;</$reveal><$reveal state=\"$:/palette\" type=\"nomatch\" text={{!!title}}>&nbsp;</$reveal> ''<$view field=\"name\" format=\"text\"/>'' - <$view field=\"description\" format=\"text\"/></div><$transclude tiddler=\"$:/snippets/currpalettepreview\"/></$link></div>\n</$list>\n</div>\n</$linkcatcher>"
        },
        "$:/temp/search": {
            "title": "$:/temp/search",
            "text": ""
        },
        "$:/tags/AdvancedSearch": {
            "title": "$:/tags/AdvancedSearch",
            "list": "[[$:/core/ui/AdvancedSearch/Standard]] [[$:/core/ui/AdvancedSearch/System]] [[$:/core/ui/AdvancedSearch/Shadows]] [[$:/core/ui/AdvancedSearch/Filter]]"
        },
        "$:/tags/AdvancedSearch/FilterButton": {
            "title": "$:/tags/AdvancedSearch/FilterButton",
            "list": "$:/core/ui/AdvancedSearch/Filter/FilterButtons/dropdown $:/core/ui/AdvancedSearch/Filter/FilterButtons/clear $:/core/ui/AdvancedSearch/Filter/FilterButtons/export $:/core/ui/AdvancedSearch/Filter/FilterButtons/delete"
        },
        "$:/tags/ControlPanel": {
            "title": "$:/tags/ControlPanel",
            "list": "$:/core/ui/ControlPanel/Info $:/core/ui/ControlPanel/Appearance $:/core/ui/ControlPanel/Settings $:/core/ui/ControlPanel/Saving $:/core/ui/ControlPanel/Plugins $:/core/ui/ControlPanel/Tools $:/core/ui/ControlPanel/Internals"
        },
        "$:/tags/ControlPanel/Info": {
            "title": "$:/tags/ControlPanel/Info",
            "list": "$:/core/ui/ControlPanel/Basics $:/core/ui/ControlPanel/Advanced"
        },
        "$:/tags/ControlPanel/Plugins": {
            "title": "$:/tags/ControlPanel/Plugins",
            "list": "[[$:/core/ui/ControlPanel/Plugins/Installed]] [[$:/core/ui/ControlPanel/Plugins/Add]]"
        },
        "$:/tags/EditorToolbar": {
            "title": "$:/tags/EditorToolbar",
            "list": "$:/core/ui/EditorToolbar/paint $:/core/ui/EditorToolbar/opacity $:/core/ui/EditorToolbar/line-width $:/core/ui/EditorToolbar/clear $:/core/ui/EditorToolbar/bold $:/core/ui/EditorToolbar/italic $:/core/ui/EditorToolbar/strikethrough $:/core/ui/EditorToolbar/underline $:/core/ui/EditorToolbar/superscript $:/core/ui/EditorToolbar/subscript $:/core/ui/EditorToolbar/mono-line $:/core/ui/EditorToolbar/mono-block $:/core/ui/EditorToolbar/quote $:/core/ui/EditorToolbar/list-bullet $:/core/ui/EditorToolbar/list-number $:/core/ui/EditorToolbar/heading-1 $:/core/ui/EditorToolbar/heading-2 $:/core/ui/EditorToolbar/heading-3 $:/core/ui/EditorToolbar/heading-4 $:/core/ui/EditorToolbar/heading-5 $:/core/ui/EditorToolbar/heading-6 $:/core/ui/EditorToolbar/link $:/core/ui/EditorToolbar/excise $:/core/ui/EditorToolbar/picture $:/core/ui/EditorToolbar/stamp $:/core/ui/EditorToolbar/size $:/core/ui/EditorToolbar/editor-height $:/core/ui/EditorToolbar/more $:/core/ui/EditorToolbar/preview $:/core/ui/EditorToolbar/preview-type"
        },
        "$:/tags/EditTemplate": {
            "title": "$:/tags/EditTemplate",
            "list": "[[$:/core/ui/EditTemplate/controls]] [[$:/core/ui/EditTemplate/title]] [[$:/core/ui/EditTemplate/tags]] [[$:/core/ui/EditTemplate/shadow]] [[$:/core/ui/ViewTemplate/classic]] [[$:/core/ui/EditTemplate/body]] [[$:/core/ui/EditTemplate/type]] [[$:/core/ui/EditTemplate/fields]]"
        },
        "$:/tags/EditToolbar": {
            "title": "$:/tags/EditToolbar",
            "list": "[[$:/core/ui/Buttons/delete]] [[$:/core/ui/Buttons/cancel]] [[$:/core/ui/Buttons/save]]"
        },
        "$:/tags/MoreSideBar": {
            "title": "$:/tags/MoreSideBar",
            "list": "[[$:/core/ui/MoreSideBar/All]] [[$:/core/ui/MoreSideBar/Recent]] [[$:/core/ui/MoreSideBar/Tags]] [[$:/core/ui/MoreSideBar/Missing]] [[$:/core/ui/MoreSideBar/Drafts]] [[$:/core/ui/MoreSideBar/Orphans]] [[$:/core/ui/MoreSideBar/Types]] [[$:/core/ui/MoreSideBar/System]] [[$:/core/ui/MoreSideBar/Shadows]]",
            "text": ""
        },
        "$:/tags/PageControls": {
            "title": "$:/tags/PageControls",
            "list": "[[$:/core/ui/Buttons/home]] [[$:/core/ui/Buttons/close-all]] [[$:/core/ui/Buttons/fold-all]] [[$:/core/ui/Buttons/unfold-all]] [[$:/core/ui/Buttons/permaview]] [[$:/core/ui/Buttons/new-tiddler]] [[$:/core/ui/Buttons/new-journal]] [[$:/core/ui/Buttons/new-image]] [[$:/core/ui/Buttons/import]] [[$:/core/ui/Buttons/export-page]] [[$:/core/ui/Buttons/control-panel]] [[$:/core/ui/Buttons/advanced-search]] [[$:/core/ui/Buttons/tag-manager]] [[$:/core/ui/Buttons/language]] [[$:/core/ui/Buttons/palette]] [[$:/core/ui/Buttons/theme]] [[$:/core/ui/Buttons/storyview]] [[$:/core/ui/Buttons/encryption]] [[$:/core/ui/Buttons/full-screen]] [[$:/core/ui/Buttons/save-wiki]] [[$:/core/ui/Buttons/refresh]] [[$:/core/ui/Buttons/more-page-actions]]"
        },
        "$:/tags/PageTemplate": {
            "title": "$:/tags/PageTemplate",
            "list": "[[$:/core/ui/PageTemplate/topleftbar]] [[$:/core/ui/PageTemplate/toprightbar]] [[$:/core/ui/PageTemplate/sidebar]] [[$:/core/ui/PageTemplate/story]] [[$:/core/ui/PageTemplate/alerts]]",
            "text": ""
        },
        "$:/tags/SideBar": {
            "title": "$:/tags/SideBar",
            "list": "[[$:/core/ui/SideBar/Open]] [[$:/core/ui/SideBar/Recent]] [[$:/core/ui/SideBar/Tools]] [[$:/core/ui/SideBar/More]]",
            "text": ""
        },
        "$:/tags/TiddlerInfo": {
            "title": "$:/tags/TiddlerInfo",
            "list": "[[$:/core/ui/TiddlerInfo/Tools]] [[$:/core/ui/TiddlerInfo/References]] [[$:/core/ui/TiddlerInfo/Tagging]] [[$:/core/ui/TiddlerInfo/List]] [[$:/core/ui/TiddlerInfo/Listed]] [[$:/core/ui/TiddlerInfo/Fields]]",
            "text": ""
        },
        "$:/tags/TiddlerInfo/Advanced": {
            "title": "$:/tags/TiddlerInfo/Advanced",
            "list": "[[$:/core/ui/TiddlerInfo/Advanced/ShadowInfo]] [[$:/core/ui/TiddlerInfo/Advanced/PluginInfo]]"
        },
        "$:/tags/ViewTemplate": {
            "title": "$:/tags/ViewTemplate",
            "list": "[[$:/core/ui/ViewTemplate/title]] [[$:/core/ui/ViewTemplate/unfold]] [[$:/core/ui/ViewTemplate/subtitle]] [[$:/core/ui/ViewTemplate/tags]] [[$:/core/ui/ViewTemplate/classic]] [[$:/core/ui/ViewTemplate/body]]"
        },
        "$:/tags/ViewToolbar": {
            "title": "$:/tags/ViewToolbar",
            "list": "[[$:/core/ui/Buttons/more-tiddler-actions]] [[$:/core/ui/Buttons/info]] [[$:/core/ui/Buttons/new-here]] [[$:/core/ui/Buttons/new-journal-here]] [[$:/core/ui/Buttons/clone]] [[$:/core/ui/Buttons/export-tiddler]] [[$:/core/ui/Buttons/edit]] [[$:/core/ui/Buttons/delete]] [[$:/core/ui/Buttons/permalink]] [[$:/core/ui/Buttons/permaview]] [[$:/core/ui/Buttons/open-window]] [[$:/core/ui/Buttons/close-others]] [[$:/core/ui/Buttons/close]] [[$:/core/ui/Buttons/fold-others]] [[$:/core/ui/Buttons/fold]]"
        },
        "$:/snippets/themeswitcher": {
            "title": "$:/snippets/themeswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/Theme/\n<<lingo Prompt>> <$view tiddler={{$:/theme}} field=\"name\"/>\n\n<$linkcatcher to=\"$:/theme\">\n<$list filter=\"[plugin-type[theme]sort[title]]\"><div><$reveal state=\"$:/theme\" type=\"match\" text={{!!title}}>&bull;</$reveal><$reveal state=\"$:/theme\" type=\"nomatch\" text={{!!title}}>&nbsp;</$reveal> <$link to={{!!title}}>''<$view field=\"name\" format=\"text\"/>'' <$view field=\"description\" format=\"text\"/></$link></div>\n</$list>\n</$linkcatcher>"
        },
        "$:/core/wiki/title": {
            "title": "$:/core/wiki/title",
            "type": "text/vnd.tiddlywiki",
            "text": "{{$:/SiteTitle}} --- {{$:/SiteSubtitle}}"
        },
        "$:/view": {
            "title": "$:/view",
            "text": "classic"
        },
        "$:/snippets/viewswitcher": {
            "title": "$:/snippets/viewswitcher",
            "text": "\\define lingo-base() $:/language/ControlPanel/StoryView/\n<<lingo Prompt>> <$select tiddler=\"$:/view\">\n<$list filter=\"[storyviews[]]\">\n<option><$view field=\"title\"/></option>\n</$list>\n</$select>"
        }
    }
}
<div class="tc-more-sidebar">
<<tabs "[all[shadows+tiddlers]tag[$:/tags/MoreSideBar]!has[draft.of]]" "$:/core/ui/MoreSideBar/Tags" "$:/state/tab/moresidebar" "tc-vertical">>
</div>
<$macrocall $name="timeline" format={{$:/language/RecentChanges/DateFormat}}/>
\define lingo-base() $:/language/ControlPanel/
\define config-title()
$:/config/PageControlButtons/Visibility/$(listItem)$
\end

<<lingo Basics/Version/Prompt>> <<version>>

<$set name="tv-config-toolbar-icons" value="yes">

<$set name="tv-config-toolbar-text" value="yes">

<$set name="tv-config-toolbar-class" value="">

<$list filter="[all[shadows+tiddlers]tag[$:/tags/PageControls]!has[draft.of]]" variable="listItem">

<div style="position:relative;">

<$checkbox tiddler=<<config-title>> field="text" checked="show" unchecked="hide" default="show"/> <$transclude tiddler=<<listItem>>/> <i class="tc-muted"><$transclude tiddler=<<listItem>> field="description"/></i>

</div>

</$list>

</$set>

</$set>

</$set>


\define title-styles()
fill:$(foregroundColor)$;
\end
\define config-title()
$:/config/ViewToolbarButtons/Visibility/$(listItem)$
\end
<div class="tc-tiddler-title">
<div class="tc-titlebar">
<span class="tc-tiddler-controls">
<$list filter="[all[shadows+tiddlers]tag[$:/tags/ViewToolbar]!has[draft.of]]" variable="listItem"><$reveal type="nomatch" state=<<config-title>> text="hide"><$transclude tiddler=<<listItem>>/></$reveal></$list>
</span>
<$set name="tv-wikilinks" value={{$:/config/Tiddlers/TitleLinks}}>
<$link>
<$set name="foregroundColor" value={{!!color}}>
<span class="tc-tiddler-title-icon" style=<<title-styles>>>
<$transclude tiddler={{!!icon}}/>
</span>
</$set>
<$list filter="[all[current]removeprefix[$:/]]">
<h2 class="tc-title" title={{$:/language/SystemTiddler/Tooltip}}>
<span class="tc-system-title-prefix">$:/</span><$text text=<<currentTiddler>>/>
</h2>
</$list>
<$list filter="[all[current]!prefix[$:/]]">
<h2 class="tc-title">
<$transclude field="caption"><$view field="title"/></$transclude>
</h2>
</$list>
</$link>
</$set>
</div>

<$reveal type="nomatch" text="" default="" state=<<tiddlerInfoState>> class="tc-tiddler-info tc-popup-handle" animate="yes" retain="yes">

<$transclude tiddler="$:/core/ui/TiddlerInfo"/>

</$reveal>
</div>
[[Welcome Page]]
no
{
    "tiddlers": {
        "$:/plugins/tiddlywiki/highlight/highlight.js": {
            "type": "application/javascript",
            "title": "$:/plugins/tiddlywiki/highlight/highlight.js",
            "module-type": "library",
            "text": "var hljs = require(\"$:/plugins/tiddlywiki/highlight/highlight.js\");\n!function(e){\"undefined\"!=typeof exports?e(exports):(window.hljs=e({}),\"function\"==typeof define&&define.amd&&define(\"hljs\",[],function(){return window.hljs}))}(function(e){function n(e){return e.replace(/&/gm,\"&amp;\").replace(/</gm,\"&lt;\").replace(/>/gm,\"&gt;\")}function t(e){return e.nodeName.toLowerCase()}function r(e,n){var t=e&&e.exec(n);return t&&0==t.index}function a(e){return/^(no-?highlight|plain|text)$/i.test(e)}function i(e){var n,t,r,i=e.className+\" \";if(i+=e.parentNode?e.parentNode.className:\"\",t=/\\blang(?:uage)?-([\\w-]+)\\b/i.exec(i))return w(t[1])?t[1]:\"no-highlight\";for(i=i.split(/\\s+/),n=0,r=i.length;r>n;n++)if(w(i[n])||a(i[n]))return i[n]}function o(e,n){var t,r={};for(t in e)r[t]=e[t];if(n)for(t in n)r[t]=n[t];return r}function u(e){var n=[];return function r(e,a){for(var i=e.firstChild;i;i=i.nextSibling)3==i.nodeType?a+=i.nodeValue.length:1==i.nodeType&&(n.push({event:\"start\",offset:a,node:i}),a=r(i,a),t(i).match(/br|hr|img|input/)||n.push({event:\"stop\",offset:a,node:i}));return a}(e,0),n}function c(e,r,a){function i(){return e.length&&r.length?e[0].offset!=r[0].offset?e[0].offset<r[0].offset?e:r:\"start\"==r[0].event?e:r:e.length?e:r}function o(e){function r(e){return\" \"+e.nodeName+'=\"'+n(e.value)+'\"'}f+=\"<\"+t(e)+Array.prototype.map.call(e.attributes,r).join(\"\")+\">\"}function u(e){f+=\"</\"+t(e)+\">\"}function c(e){(\"start\"==e.event?o:u)(e.node)}for(var s=0,f=\"\",l=[];e.length||r.length;){var g=i();if(f+=n(a.substr(s,g[0].offset-s)),s=g[0].offset,g==e){l.reverse().forEach(u);do c(g.splice(0,1)[0]),g=i();while(g==e&&g.length&&g[0].offset==s);l.reverse().forEach(o)}else\"start\"==g[0].event?l.push(g[0].node):l.pop(),c(g.splice(0,1)[0])}return f+n(a.substr(s))}function s(e){function n(e){return e&&e.source||e}function t(t,r){return new RegExp(n(t),\"m\"+(e.cI?\"i\":\"\")+(r?\"g\":\"\"))}function r(a,i){if(!a.compiled){if(a.compiled=!0,a.k=a.k||a.bK,a.k){var u={},c=function(n,t){e.cI&&(t=t.toLowerCase()),t.split(\" \").forEach(function(e){var t=e.split(\"|\");u[t[0]]=[n,t[1]?Number(t[1]):1]})};\"string\"==typeof a.k?c(\"keyword\",a.k):Object.keys(a.k).forEach(function(e){c(e,a.k[e])}),a.k=u}a.lR=t(a.l||/\\b\\w+\\b/,!0),i&&(a.bK&&(a.b=\"\\\\b(\"+a.bK.split(\" \").join(\"|\")+\")\\\\b\"),a.b||(a.b=/\\B|\\b/),a.bR=t(a.b),a.e||a.eW||(a.e=/\\B|\\b/),a.e&&(a.eR=t(a.e)),a.tE=n(a.e)||\"\",a.eW&&i.tE&&(a.tE+=(a.e?\"|\":\"\")+i.tE)),a.i&&(a.iR=t(a.i)),void 0===a.r&&(a.r=1),a.c||(a.c=[]);var s=[];a.c.forEach(function(e){e.v?e.v.forEach(function(n){s.push(o(e,n))}):s.push(\"self\"==e?a:e)}),a.c=s,a.c.forEach(function(e){r(e,a)}),a.starts&&r(a.starts,i);var f=a.c.map(function(e){return e.bK?\"\\\\.?(\"+e.b+\")\\\\.?\":e.b}).concat([a.tE,a.i]).map(n).filter(Boolean);a.t=f.length?t(f.join(\"|\"),!0):{exec:function(){return null}}}}r(e)}function f(e,t,a,i){function o(e,n){for(var t=0;t<n.c.length;t++)if(r(n.c[t].bR,e))return n.c[t]}function u(e,n){if(r(e.eR,n)){for(;e.endsParent&&e.parent;)e=e.parent;return e}return e.eW?u(e.parent,n):void 0}function c(e,n){return!a&&r(n.iR,e)}function g(e,n){var t=N.cI?n[0].toLowerCase():n[0];return e.k.hasOwnProperty(t)&&e.k[t]}function h(e,n,t,r){var a=r?\"\":E.classPrefix,i='<span class=\"'+a,o=t?\"\":\"</span>\";return i+=e+'\">',i+n+o}function p(){if(!L.k)return n(y);var e=\"\",t=0;L.lR.lastIndex=0;for(var r=L.lR.exec(y);r;){e+=n(y.substr(t,r.index-t));var a=g(L,r);a?(B+=a[1],e+=h(a[0],n(r[0]))):e+=n(r[0]),t=L.lR.lastIndex,r=L.lR.exec(y)}return e+n(y.substr(t))}function d(){var e=\"string\"==typeof L.sL;if(e&&!x[L.sL])return n(y);var t=e?f(L.sL,y,!0,M[L.sL]):l(y,L.sL.length?L.sL:void 0);return L.r>0&&(B+=t.r),e&&(M[L.sL]=t.top),h(t.language,t.value,!1,!0)}function b(){return void 0!==L.sL?d():p()}function v(e,t){var r=e.cN?h(e.cN,\"\",!0):\"\";e.rB?(k+=r,y=\"\"):e.eB?(k+=n(t)+r,y=\"\"):(k+=r,y=t),L=Object.create(e,{parent:{value:L}})}function m(e,t){if(y+=e,void 0===t)return k+=b(),0;var r=o(t,L);if(r)return k+=b(),v(r,t),r.rB?0:t.length;var a=u(L,t);if(a){var i=L;i.rE||i.eE||(y+=t),k+=b();do L.cN&&(k+=\"</span>\"),B+=L.r,L=L.parent;while(L!=a.parent);return i.eE&&(k+=n(t)),y=\"\",a.starts&&v(a.starts,\"\"),i.rE?0:t.length}if(c(t,L))throw new Error('Illegal lexeme \"'+t+'\" for mode \"'+(L.cN||\"<unnamed>\")+'\"');return y+=t,t.length||1}var N=w(e);if(!N)throw new Error('Unknown language: \"'+e+'\"');s(N);var R,L=i||N,M={},k=\"\";for(R=L;R!=N;R=R.parent)R.cN&&(k=h(R.cN,\"\",!0)+k);var y=\"\",B=0;try{for(var C,j,I=0;;){if(L.t.lastIndex=I,C=L.t.exec(t),!C)break;j=m(t.substr(I,C.index-I),C[0]),I=C.index+j}for(m(t.substr(I)),R=L;R.parent;R=R.parent)R.cN&&(k+=\"</span>\");return{r:B,value:k,language:e,top:L}}catch(O){if(-1!=O.message.indexOf(\"Illegal\"))return{r:0,value:n(t)};throw O}}function l(e,t){t=t||E.languages||Object.keys(x);var r={r:0,value:n(e)},a=r;return t.forEach(function(n){if(w(n)){var t=f(n,e,!1);t.language=n,t.r>a.r&&(a=t),t.r>r.r&&(a=r,r=t)}}),a.language&&(r.second_best=a),r}function g(e){return E.tabReplace&&(e=e.replace(/^((<[^>]+>|\\t)+)/gm,function(e,n){return n.replace(/\\t/g,E.tabReplace)})),E.useBR&&(e=e.replace(/\\n/g,\"<br>\")),e}function h(e,n,t){var r=n?R[n]:t,a=[e.trim()];return e.match(/\\bhljs\\b/)||a.push(\"hljs\"),-1===e.indexOf(r)&&a.push(r),a.join(\" \").trim()}function p(e){var n=i(e);if(!a(n)){var t;E.useBR?(t=document.createElementNS(\"http://www.w3.org/1999/xhtml\",\"div\"),t.innerHTML=e.innerHTML.replace(/\\n/g,\"\").replace(/<br[ \\/]*>/g,\"\\n\")):t=e;var r=t.textContent,o=n?f(n,r,!0):l(r),s=u(t);if(s.length){var p=document.createElementNS(\"http://www.w3.org/1999/xhtml\",\"div\");p.innerHTML=o.value,o.value=c(s,u(p),r)}o.value=g(o.value),e.innerHTML=o.value,e.className=h(e.className,n,o.language),e.result={language:o.language,re:o.r},o.second_best&&(e.second_best={language:o.second_best.language,re:o.second_best.r})}}function d(e){E=o(E,e)}function b(){if(!b.called){b.called=!0;var e=document.querySelectorAll(\"pre code\");Array.prototype.forEach.call(e,p)}}function v(){addEventListener(\"DOMContentLoaded\",b,!1),addEventListener(\"load\",b,!1)}function m(n,t){var r=x[n]=t(e);r.aliases&&r.aliases.forEach(function(e){R[e]=n})}function N(){return Object.keys(x)}function w(e){return e=e.toLowerCase(),x[e]||x[R[e]]}var E={classPrefix:\"hljs-\",tabReplace:null,useBR:!1,languages:void 0},x={},R={};return e.highlight=f,e.highlightAuto=l,e.fixMarkup=g,e.highlightBlock=p,e.configure=d,e.initHighlighting=b,e.initHighlightingOnLoad=v,e.registerLanguage=m,e.listLanguages=N,e.getLanguage=w,e.inherit=o,e.IR=\"[a-zA-Z]\\\\w*\",e.UIR=\"[a-zA-Z_]\\\\w*\",e.NR=\"\\\\b\\\\d+(\\\\.\\\\d+)?\",e.CNR=\"(\\\\b0[xX][a-fA-F0-9]+|(\\\\b\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+)([eE][-+]?\\\\d+)?)\",e.BNR=\"\\\\b(0b[01]+)\",e.RSR=\"!|!=|!==|%|%=|&|&&|&=|\\\\*|\\\\*=|\\\\+|\\\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\\\?|\\\\[|\\\\{|\\\\(|\\\\^|\\\\^=|\\\\||\\\\|=|\\\\|\\\\||~\",e.BE={b:\"\\\\\\\\[\\\\s\\\\S]\",r:0},e.ASM={cN:\"string\",b:\"'\",e:\"'\",i:\"\\\\n\",c:[e.BE]},e.QSM={cN:\"string\",b:'\"',e:'\"',i:\"\\\\n\",c:[e.BE]},e.PWM={b:/\\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such)\\b/},e.C=function(n,t,r){var a=e.inherit({cN:\"comment\",b:n,e:t,c:[]},r||{});return a.c.push(e.PWM),a.c.push({cN:\"doctag\",b:\"(?:TODO|FIXME|NOTE|BUG|XXX):\",r:0}),a},e.CLCM=e.C(\"//\",\"$\"),e.CBCM=e.C(\"/\\\\*\",\"\\\\*/\"),e.HCM=e.C(\"#\",\"$\"),e.NM={cN:\"number\",b:e.NR,r:0},e.CNM={cN:\"number\",b:e.CNR,r:0},e.BNM={cN:\"number\",b:e.BNR,r:0},e.CSSNM={cN:\"number\",b:e.NR+\"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?\",r:0},e.RM={cN:\"regexp\",b:/\\//,e:/\\/[gimuy]*/,i:/\\n/,c:[e.BE,{b:/\\[/,e:/\\]/,r:0,c:[e.BE]}]},e.TM={cN:\"title\",b:e.IR,r:0},e.UTM={cN:\"title\",b:e.UIR,r:0},e});hljs.registerLanguage(\"markdown\",function(e){return{aliases:[\"md\",\"mkdown\",\"mkd\"],c:[{cN:\"header\",v:[{b:\"^#{1,6}\",e:\"$\"},{b:\"^.+?\\\\n[=-]{2,}$\"}]},{b:\"<\",e:\">\",sL:\"xml\",r:0},{cN:\"bullet\",b:\"^([*+-]|(\\\\d+\\\\.))\\\\s+\"},{cN:\"strong\",b:\"[*_]{2}.+?[*_]{2}\"},{cN:\"emphasis\",v:[{b:\"\\\\*.+?\\\\*\"},{b:\"_.+?_\",r:0}]},{cN:\"blockquote\",b:\"^>\\\\s+\",e:\"$\"},{cN:\"code\",v:[{b:\"`.+?`\"},{b:\"^( {4}|\t)\",e:\"$\",r:0}]},{cN:\"horizontal_rule\",b:\"^[-\\\\*]{3,}\",e:\"$\"},{b:\"\\\\[.+?\\\\][\\\\(\\\\[].*?[\\\\)\\\\]]\",rB:!0,c:[{cN:\"link_label\",b:\"\\\\[\",e:\"\\\\]\",eB:!0,rE:!0,r:0},{cN:\"link_url\",b:\"\\\\]\\\\(\",e:\"\\\\)\",eB:!0,eE:!0},{cN:\"link_reference\",b:\"\\\\]\\\\[\",e:\"\\\\]\",eB:!0,eE:!0}],r:10},{b:\"^\\\\[.+\\\\]:\",rB:!0,c:[{cN:\"link_reference\",b:\"\\\\[\",e:\"\\\\]:\",eB:!0,eE:!0,starts:{cN:\"link_url\",e:\"$\"}}]}]}});hljs.registerLanguage(\"ruby\",function(e){var c=\"[a-zA-Z_]\\\\w*[!?=]?|[-+~]\\\\@|<<|>>|=~|===?|<=>|[<>]=?|\\\\*\\\\*|[-/+%^&*~`|]|\\\\[\\\\]=?\",r=\"and false then defined module in return redo if BEGIN retry end for true self when next until do begin unless END rescue nil else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor\",b={cN:\"doctag\",b:\"@[A-Za-z]+\"},a={cN:\"value\",b:\"#<\",e:\">\"},n=[e.C(\"#\",\"$\",{c:[b]}),e.C(\"^\\\\=begin\",\"^\\\\=end\",{c:[b],r:10}),e.C(\"^__END__\",\"\\\\n$\")],s={cN:\"subst\",b:\"#\\\\{\",e:\"}\",k:r},t={cN:\"string\",c:[e.BE,s],v:[{b:/'/,e:/'/},{b:/\"/,e:/\"/},{b:/`/,e:/`/},{b:\"%[qQwWx]?\\\\(\",e:\"\\\\)\"},{b:\"%[qQwWx]?\\\\[\",e:\"\\\\]\"},{b:\"%[qQwWx]?{\",e:\"}\"},{b:\"%[qQwWx]?<\",e:\">\"},{b:\"%[qQwWx]?/\",e:\"/\"},{b:\"%[qQwWx]?%\",e:\"%\"},{b:\"%[qQwWx]?-\",e:\"-\"},{b:\"%[qQwWx]?\\\\|\",e:\"\\\\|\"},{b:/\\B\\?(\\\\\\d{1,3}|\\\\x[A-Fa-f0-9]{1,2}|\\\\u[A-Fa-f0-9]{4}|\\\\?\\S)\\b/}]},i={cN:\"params\",b:\"\\\\(\",e:\"\\\\)\",k:r},d=[t,a,{cN:\"class\",bK:\"class module\",e:\"$|;\",i:/=/,c:[e.inherit(e.TM,{b:\"[A-Za-z_]\\\\w*(::\\\\w+)*(\\\\?|\\\\!)?\"}),{cN:\"inheritance\",b:\"<\\\\s*\",c:[{cN:\"parent\",b:\"(\"+e.IR+\"::)?\"+e.IR}]}].concat(n)},{cN:\"function\",bK:\"def\",e:\"$|;\",c:[e.inherit(e.TM,{b:c}),i].concat(n)},{cN:\"constant\",b:\"(::)?(\\\\b[A-Z]\\\\w*(::)?)+\",r:0},{cN:\"symbol\",b:e.UIR+\"(\\\\!|\\\\?)?:\",r:0},{cN:\"symbol\",b:\":\",c:[t,{b:c}],r:0},{cN:\"number\",b:\"(\\\\b0[0-7_]+)|(\\\\b0x[0-9a-fA-F_]+)|(\\\\b[1-9][0-9_]*(\\\\.[0-9_]+)?)|[0_]\\\\b\",r:0},{cN:\"variable\",b:\"(\\\\$\\\\W)|((\\\\$|\\\\@\\\\@?)(\\\\w+))\"},{b:\"(\"+e.RSR+\")\\\\s*\",c:[a,{cN:\"regexp\",c:[e.BE,s],i:/\\n/,v:[{b:\"/\",e:\"/[a-z]*\"},{b:\"%r{\",e:\"}[a-z]*\"},{b:\"%r\\\\(\",e:\"\\\\)[a-z]*\"},{b:\"%r!\",e:\"![a-z]*\"},{b:\"%r\\\\[\",e:\"\\\\][a-z]*\"}]}].concat(n),r:0}].concat(n);s.c=d,i.c=d;var o=\"[>?]>\",l=\"[\\\\w#]+\\\\(\\\\w+\\\\):\\\\d+:\\\\d+>\",u=\"(\\\\w+-)?\\\\d+\\\\.\\\\d+\\\\.\\\\d(p\\\\d+)?[^>]+>\",N=[{b:/^\\s*=>/,cN:\"status\",starts:{e:\"$\",c:d}},{cN:\"prompt\",b:\"^(\"+o+\"|\"+l+\"|\"+u+\")\",starts:{e:\"$\",c:d}}];return{aliases:[\"rb\",\"gemspec\",\"podspec\",\"thor\",\"irb\"],k:r,c:n.concat(N).concat(d)}});hljs.registerLanguage(\"makefile\",function(e){var a={cN:\"variable\",b:/\\$\\(/,e:/\\)/,c:[e.BE]};return{aliases:[\"mk\",\"mak\"],c:[e.HCM,{b:/^\\w+\\s*\\W*=/,rB:!0,r:0,starts:{cN:\"constant\",e:/\\s*\\W*=/,eE:!0,starts:{e:/$/,r:0,c:[a]}}},{cN:\"title\",b:/^[\\w]+:\\s*$/},{cN:\"phony\",b:/^\\.PHONY:/,e:/$/,k:\".PHONY\",l:/[\\.\\w]+/},{b:/^\\t+/,e:/$/,r:0,c:[e.QSM,a]}]}});hljs.registerLanguage(\"json\",function(e){var t={literal:\"true false null\"},i=[e.QSM,e.CNM],l={cN:\"value\",e:\",\",eW:!0,eE:!0,c:i,k:t},c={b:\"{\",e:\"}\",c:[{cN:\"attribute\",b:'\\\\s*\"',e:'\"\\\\s*:\\\\s*',eB:!0,eE:!0,c:[e.BE],i:\"\\\\n\",starts:l}],i:\"\\\\S\"},n={b:\"\\\\[\",e:\"\\\\]\",c:[e.inherit(l,{cN:null})],i:\"\\\\S\"};return i.splice(i.length,0,c,n),{c:i,k:t,i:\"\\\\S\"}});hljs.registerLanguage(\"xml\",function(t){var s=\"[A-Za-z0-9\\\\._:-]+\",c={b:/<\\?(php)?(?!\\w)/,e:/\\?>/,sL:\"php\"},e={eW:!0,i:/</,r:0,c:[c,{cN:\"attribute\",b:s,r:0},{b:\"=\",r:0,c:[{cN:\"value\",c:[c],v:[{b:/\"/,e:/\"/},{b:/'/,e:/'/},{b:/[^\\s\\/>]+/}]}]}]};return{aliases:[\"html\",\"xhtml\",\"rss\",\"atom\",\"xsl\",\"plist\"],cI:!0,c:[{cN:\"doctype\",b:\"<!DOCTYPE\",e:\">\",r:10,c:[{b:\"\\\\[\",e:\"\\\\]\"}]},t.C(\"<!--\",\"-->\",{r:10}),{cN:\"cdata\",b:\"<\\\\!\\\\[CDATA\\\\[\",e:\"\\\\]\\\\]>\",r:10},{cN:\"tag\",b:\"<style(?=\\\\s|>|$)\",e:\">\",k:{title:\"style\"},c:[e],starts:{e:\"</style>\",rE:!0,sL:\"css\"}},{cN:\"tag\",b:\"<script(?=\\\\s|>|$)\",e:\">\",k:{title:\"script\"},c:[e],starts:{e:\"</script>\",rE:!0,sL:[\"actionscript\",\"javascript\",\"handlebars\"]}},c,{cN:\"pi\",b:/<\\?\\w+/,e:/\\?>/,r:10},{cN:\"tag\",b:\"</?\",e:\"/?>\",c:[{cN:\"title\",b:/[^ \\/><\\n\\t]+/,r:0},e]}]}});hljs.registerLanguage(\"css\",function(e){var c=\"[a-zA-Z-][a-zA-Z0-9_-]*\",a={cN:\"function\",b:c+\"\\\\(\",rB:!0,eE:!0,e:\"\\\\(\"},r={cN:\"rule\",b:/[A-Z\\_\\.\\-]+\\s*:/,rB:!0,e:\";\",eW:!0,c:[{cN:\"attribute\",b:/\\S/,e:\":\",eE:!0,starts:{cN:\"value\",eW:!0,eE:!0,c:[a,e.CSSNM,e.QSM,e.ASM,e.CBCM,{cN:\"hexcolor\",b:\"#[0-9A-Fa-f]+\"},{cN:\"important\",b:\"!important\"}]}}]};return{cI:!0,i:/[=\\/|'\\$]/,c:[e.CBCM,r,{cN:\"id\",b:/\\#[A-Za-z0-9_-]+/},{cN:\"class\",b:/\\.[A-Za-z0-9_-]+/},{cN:\"attr_selector\",b:/\\[/,e:/\\]/,i:\"$\"},{cN:\"pseudo\",b:/:(:)?[a-zA-Z0-9\\_\\-\\+\\(\\)\"']+/},{cN:\"at_rule\",b:\"@(font-face|page)\",l:\"[a-z-]+\",k:\"font-face page\"},{cN:\"at_rule\",b:\"@\",e:\"[{;]\",c:[{cN:\"keyword\",b:/\\S+/},{b:/\\s/,eW:!0,eE:!0,r:0,c:[a,e.ASM,e.QSM,e.CSSNM]}]},{cN:\"tag\",b:c,r:0},{cN:\"rules\",b:\"{\",e:\"}\",i:/\\S/,c:[e.CBCM,r]}]}});hljs.registerLanguage(\"perl\",function(e){var t=\"getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qqfileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmgetsub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedirioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when\",r={cN:\"subst\",b:\"[$@]\\\\{\",e:\"\\\\}\",k:t},s={b:\"->{\",e:\"}\"},n={cN:\"variable\",v:[{b:/\\$\\d/},{b:/[\\$%@](\\^\\w\\b|#\\w+(::\\w+)*|{\\w+}|\\w+(::\\w*)*)/},{b:/[\\$%@][^\\s\\w{]/,r:0}]},o=[e.BE,r,n],i=[n,e.HCM,e.C(\"^\\\\=\\\\w\",\"\\\\=cut\",{eW:!0}),s,{cN:\"string\",c:o,v:[{b:\"q[qwxr]?\\\\s*\\\\(\",e:\"\\\\)\",r:5},{b:\"q[qwxr]?\\\\s*\\\\[\",e:\"\\\\]\",r:5},{b:\"q[qwxr]?\\\\s*\\\\{\",e:\"\\\\}\",r:5},{b:\"q[qwxr]?\\\\s*\\\\|\",e:\"\\\\|\",r:5},{b:\"q[qwxr]?\\\\s*\\\\<\",e:\"\\\\>\",r:5},{b:\"qw\\\\s+q\",e:\"q\",r:5},{b:\"'\",e:\"'\",c:[e.BE]},{b:'\"',e:'\"'},{b:\"`\",e:\"`\",c:[e.BE]},{b:\"{\\\\w+}\",c:[],r:0},{b:\"-?\\\\w+\\\\s*\\\\=\\\\>\",c:[],r:0}]},{cN:\"number\",b:\"(\\\\b0[0-7_]+)|(\\\\b0x[0-9a-fA-F_]+)|(\\\\b[1-9][0-9_]*(\\\\.[0-9_]+)?)|[0_]\\\\b\",r:0},{b:\"(\\\\/\\\\/|\"+e.RSR+\"|\\\\b(split|return|print|reverse|grep)\\\\b)\\\\s*\",k:\"split return print reverse grep\",r:0,c:[e.HCM,{cN:\"regexp\",b:\"(s|tr|y)/(\\\\\\\\.|[^/])*/(\\\\\\\\.|[^/])*/[a-z]*\",r:10},{cN:\"regexp\",b:\"(m|qr)?/\",e:\"/[a-z]*\",c:[e.BE],r:0}]},{cN:\"sub\",bK:\"sub\",e:\"(\\\\s*\\\\(.*?\\\\))?[;{]\",r:5},{cN:\"operator\",b:\"-\\\\w\\\\b\",r:0},{b:\"^__DATA__$\",e:\"^__END__$\",sL:\"mojolicious\",c:[{b:\"^@@.*\",e:\"$\",cN:\"comment\"}]}];return r.c=i,s.c=i,{aliases:[\"pl\"],k:t,c:i}});hljs.registerLanguage(\"cs\",function(e){var r=\"abstract as base bool break byte case catch char checked const continue decimal dynamic default delegate do double else enum event explicit extern false finally fixed float for foreach goto if implicit in int interface internal is lock long null when object operator out override params private protected public readonly ref sbyte sealed short sizeof stackalloc static string struct switch this true try typeof uint ulong unchecked unsafe ushort using virtual volatile void while async protected public private internal ascending descending from get group into join let orderby partial select set value var where yield\",t=e.IR+\"(<\"+e.IR+\">)?\";return{aliases:[\"csharp\"],k:r,i:/::/,c:[e.C(\"///\",\"$\",{rB:!0,c:[{cN:\"xmlDocTag\",v:[{b:\"///\",r:0},{b:\"<!--|-->\"},{b:\"</?\",e:\">\"}]}]}),e.CLCM,e.CBCM,{cN:\"preprocessor\",b:\"#\",e:\"$\",k:\"if else elif endif define undef warning error line region endregion pragma checksum\"},{cN:\"string\",b:'@\"',e:'\"',c:[{b:'\"\"'}]},e.ASM,e.QSM,e.CNM,{bK:\"class interface\",e:/[{;=]/,i:/[^\\s:]/,c:[e.TM,e.CLCM,e.CBCM]},{bK:\"namespace\",e:/[{;=]/,i:/[^\\s:]/,c:[{cN:\"title\",b:\"[a-zA-Z](\\\\.?\\\\w)*\",r:0},e.CLCM,e.CBCM]},{bK:\"new return throw await\",r:0},{cN:\"function\",b:\"(\"+t+\"\\\\s+)+\"+e.IR+\"\\\\s*\\\\(\",rB:!0,e:/[{;=]/,eE:!0,k:r,c:[{b:e.IR+\"\\\\s*\\\\(\",rB:!0,c:[e.TM],r:0},{cN:\"params\",b:/\\(/,e:/\\)/,eB:!0,eE:!0,k:r,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]}]}});hljs.registerLanguage(\"apache\",function(e){var r={cN:\"number\",b:\"[\\\\$%]\\\\d+\"};return{aliases:[\"apacheconf\"],cI:!0,c:[e.HCM,{cN:\"tag\",b:\"</?\",e:\">\"},{cN:\"keyword\",b:/\\w+/,r:0,k:{common:\"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername\"},starts:{e:/$/,r:0,k:{literal:\"on off all\"},c:[{cN:\"sqbracket\",b:\"\\\\s\\\\[\",e:\"\\\\]$\"},{cN:\"cbracket\",b:\"[\\\\$%]\\\\{\",e:\"\\\\}\",c:[\"self\",r]},r,e.QSM]}}],i:/\\S/}});hljs.registerLanguage(\"http\",function(t){return{aliases:[\"https\"],i:\"\\\\S\",c:[{cN:\"status\",b:\"^HTTP/[0-9\\\\.]+\",e:\"$\",c:[{cN:\"number\",b:\"\\\\b\\\\d{3}\\\\b\"}]},{cN:\"request\",b:\"^[A-Z]+ (.*?) HTTP/[0-9\\\\.]+$\",rB:!0,e:\"$\",c:[{cN:\"string\",b:\" \",e:\" \",eB:!0,eE:!0}]},{cN:\"attribute\",b:\"^\\\\w\",e:\": \",eE:!0,i:\"\\\\n|\\\\s|=\",starts:{cN:\"string\",e:\"$\"}},{b:\"\\\\n\\\\n\",starts:{sL:[],eW:!0}}]}});hljs.registerLanguage(\"objectivec\",function(e){var t={cN:\"built_in\",b:\"(AV|CA|CF|CG|CI|MK|MP|NS|UI)\\\\w+\"},i={keyword:\"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required\",literal:\"false true FALSE TRUE nil YES NO NULL\",built_in:\"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once\"},o=/[a-zA-Z@][a-zA-Z0-9_]*/,n=\"@interface @class @protocol @implementation\";return{aliases:[\"mm\",\"objc\",\"obj-c\"],k:i,l:o,i:\"</\",c:[t,e.CLCM,e.CBCM,e.CNM,e.QSM,{cN:\"string\",v:[{b:'@\"',e:'\"',i:\"\\\\n\",c:[e.BE]},{b:\"'\",e:\"[^\\\\\\\\]'\",i:\"[^\\\\\\\\][^']\"}]},{cN:\"preprocessor\",b:\"#\",e:\"$\",c:[{cN:\"title\",v:[{b:'\"',e:'\"'},{b:\"<\",e:\">\"}]}]},{cN:\"class\",b:\"(\"+n.split(\" \").join(\"|\")+\")\\\\b\",e:\"({|$)\",eE:!0,k:n,l:o,c:[e.UTM]},{cN:\"variable\",b:\"\\\\.\"+e.UIR,r:0}]}});hljs.registerLanguage(\"python\",function(e){var r={cN:\"prompt\",b:/^(>>>|\\.\\.\\.) /},b={cN:\"string\",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[r],r:10},{b:/(u|b)?r?\"\"\"/,e:/\"\"\"/,c:[r],r:10},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)\"/,e:/\"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)\"/,e:/\"/},e.ASM,e.QSM]},a={cN:\"number\",r:0,v:[{b:e.BNR+\"[lLjJ]?\"},{b:\"\\\\b(0o[0-7]+)[lLjJ]?\"},{b:e.CNR+\"[lLjJ]?\"}]},l={cN:\"params\",b:/\\(/,e:/\\)/,c:[\"self\",r,a,b]};return{aliases:[\"py\",\"gyp\"],k:{keyword:\"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10 None True False\",built_in:\"Ellipsis NotImplemented\"},i:/(<\\/|->|\\?)/,c:[r,a,b,e.HCM,{v:[{cN:\"function\",bK:\"def\",r:10},{cN:\"class\",bK:\"class\"}],e:/:/,i:/[${=;\\n,]/,c:[e.UTM,l]},{cN:\"decorator\",b:/^[\\t ]*@/,e:/$/},{b:/\\b(print|exec)\\(/}]}});hljs.registerLanguage(\"java\",function(e){var a=e.UIR+\"(<\"+e.UIR+\">)?\",t=\"false synchronized int abstract float private char boolean static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private\",c=\"\\\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\\\d]+[\\\\d_]+[\\\\d]+|[\\\\d]+)(\\\\.([\\\\d]+[\\\\d_]+[\\\\d]+|[\\\\d]+))?|\\\\.([\\\\d]+[\\\\d_]+[\\\\d]+|[\\\\d]+))([eE][-+]?\\\\d+)?)[lLfF]?\",r={cN:\"number\",b:c,r:0};return{aliases:[\"jsp\"],k:t,i:/<\\/|#/,c:[e.C(\"/\\\\*\\\\*\",\"\\\\*/\",{r:0,c:[{cN:\"doctag\",b:\"@[A-Za-z]+\"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:\"class\",bK:\"class interface\",e:/[{;=]/,eE:!0,k:\"class interface\",i:/[:\"\\[\\]]/,c:[{bK:\"extends implements\"},e.UTM]},{bK:\"new throw return else\",r:0},{cN:\"function\",b:\"(\"+a+\"\\\\s+)+\"+e.UIR+\"\\\\s*\\\\(\",rB:!0,e:/[{;=]/,eE:!0,k:t,c:[{b:e.UIR+\"\\\\s*\\\\(\",rB:!0,r:0,c:[e.UTM]},{cN:\"params\",b:/\\(/,e:/\\)/,k:t,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},r,{cN:\"annotation\",b:\"@[A-Za-z]+\"}]}});hljs.registerLanguage(\"bash\",function(e){var t={cN:\"variable\",v:[{b:/\\$[\\w\\d#@][\\w\\d_]*/},{b:/\\$\\{(.*?)}/}]},s={cN:\"string\",b:/\"/,e:/\"/,c:[e.BE,t,{cN:\"variable\",b:/\\$\\(/,e:/\\)/,c:[e.BE]}]},a={cN:\"string\",b:/'/,e:/'/};return{aliases:[\"sh\",\"zsh\"],l:/-?[a-z\\.]+/,k:{keyword:\"if then else elif fi for while in do done case esac function\",literal:\"true false\",built_in:\"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp\",operator:\"-ne -eq -lt -gt -f -d -e -s -l -a\"},c:[{cN:\"shebang\",b:/^#![^\\n]+sh\\s*$/,r:10},{cN:\"function\",b:/\\w[\\w\\d_]*\\s*\\(\\s*\\)\\s*\\{/,rB:!0,c:[e.inherit(e.TM,{b:/\\w[\\w\\d_]*/})],r:0},e.HCM,e.NM,s,a,t]}});hljs.registerLanguage(\"sql\",function(e){var t=e.C(\"--\",\"$\");return{cI:!0,i:/[<>{}*]/,c:[{cN:\"operator\",bK:\"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke\",e:/;/,eW:!0,k:{keyword:\"abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias allocate allow alter always analyze ancillary and any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound buffer_cache buffer_pool build bulk by byte byteordermark bytes c cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle d data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration e each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain export export_set extended extent external external_1 external_2 externally extract f failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function g general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour http i id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists k keep keep_duplicates key keys kill l language large last last_day last_insert_id last_value lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim m main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex n name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding p package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second section securefile security seed segment select self sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime t table tables tablespace tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek\",literal:\"true false null\",built_in:\"array bigint binary bit blob boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text varchar varying void\"},c:[{cN:\"string\",b:\"'\",e:\"'\",c:[e.BE,{b:\"''\"}]},{cN:\"string\",b:'\"',e:'\"',c:[e.BE,{b:'\"\"'}]},{cN:\"string\",b:\"`\",e:\"`\",c:[e.BE]},e.CNM,e.CBCM,t]},e.CBCM,t]}});hljs.registerLanguage(\"nginx\",function(e){var r={cN:\"variable\",v:[{b:/\\$\\d+/},{b:/\\$\\{/,e:/}/},{b:\"[\\\\$\\\\@]\"+e.UIR}]},b={eW:!0,l:\"[a-z/_]+\",k:{built_in:\"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll\"},r:0,i:\"=>\",c:[e.HCM,{cN:\"string\",c:[e.BE,r],v:[{b:/\"/,e:/\"/},{b:/'/,e:/'/}]},{cN:\"url\",b:\"([a-z]+):/\",e:\"\\\\s\",eW:!0,eE:!0,c:[r]},{cN:\"regexp\",c:[e.BE,r],v:[{b:\"\\\\s\\\\^\",e:\"\\\\s|{|;\",rE:!0},{b:\"~\\\\*?\\\\s+\",e:\"\\\\s|{|;\",rE:!0},{b:\"\\\\*(\\\\.[a-z\\\\-]+)+\"},{b:\"([a-z\\\\-]+\\\\.)+\\\\*\"}]},{cN:\"number\",b:\"\\\\b\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}(:\\\\d{1,5})?\\\\b\"},{cN:\"number\",b:\"\\\\b\\\\d+[kKmMgGdshdwy]*\\\\b\",r:0},r]};return{aliases:[\"nginxconf\"],c:[e.HCM,{b:e.UIR+\"\\\\s\",e:\";|{\",rB:!0,c:[{cN:\"title\",b:e.UIR,starts:b}],r:0}],i:\"[^\\\\s\\\\}]\"}});hljs.registerLanguage(\"cpp\",function(t){var e={cN:\"keyword\",b:\"\\\\b[a-z\\\\d_]*_t\\\\b\"},r={cN:\"string\",v:[t.inherit(t.QSM,{b:'((u8?|U)|L)?\"'}),{b:'(u8?|U)?R\"',e:'\"',c:[t.BE]},{b:\"'\\\\\\\\?.\",e:\"'\",i:\".\"}]},s={cN:\"number\",v:[{b:\"\\\\b(\\\\d+(\\\\.\\\\d*)?|\\\\.\\\\d+)(u|U|l|L|ul|UL|f|F)\"},{b:t.CNR}]},i={cN:\"preprocessor\",b:\"#\",e:\"$\",k:\"if else elif endif define undef warning error line pragma ifdef ifndef\",c:[{b:/\\\\\\n/,r:0},{bK:\"include\",e:\"$\",c:[r,{cN:\"string\",b:\"<\",e:\">\",i:\"\\\\n\"}]},r,s,t.CLCM,t.CBCM]},a=t.IR+\"\\\\s*\\\\(\",c={keyword:\"int float while private char catch export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const struct for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using class asm case typeid short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignof constexpr decltype noexcept static_assert thread_local restrict _Bool complex _Complex _Imaginary atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong\",built_in:\"std string cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap array shared_ptr abort abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf\",literal:\"true false nullptr NULL\"};return{aliases:[\"c\",\"cc\",\"h\",\"c++\",\"h++\",\"hpp\"],k:c,i:\"</\",c:[e,t.CLCM,t.CBCM,s,r,i,{b:\"\\\\b(deque|list|queue|stack|vector|map|set|bitset|multiset|multimap|unordered_map|unordered_set|unordered_multiset|unordered_multimap|array)\\\\s*<\",e:\">\",k:c,c:[\"self\",e]},{b:t.IR+\"::\",k:c},{bK:\"new throw return else\",r:0},{cN:\"function\",b:\"(\"+t.IR+\"[\\\\*&\\\\s]+)+\"+a,rB:!0,e:/[{;=]/,eE:!0,k:c,i:/[^\\w\\s\\*&]/,c:[{b:a,rB:!0,c:[t.TM],r:0},{cN:\"params\",b:/\\(/,e:/\\)/,k:c,r:0,c:[t.CLCM,t.CBCM,r,s]},t.CLCM,t.CBCM,i]}]}});hljs.registerLanguage(\"php\",function(e){var c={cN:\"variable\",b:\"\\\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*\"},a={cN:\"preprocessor\",b:/<\\?(php)?|\\?>/},i={cN:\"string\",c:[e.BE,a],v:[{b:'b\"',e:'\"'},{b:\"b'\",e:\"'\"},e.inherit(e.ASM,{i:null}),e.inherit(e.QSM,{i:null})]},t={v:[e.BNM,e.CNM]};return{aliases:[\"php3\",\"php4\",\"php5\",\"php6\"],cI:!0,k:\"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally\",c:[e.CLCM,e.HCM,e.C(\"/\\\\*\",\"\\\\*/\",{c:[{cN:\"doctag\",b:\"@[A-Za-z]+\"},a]}),e.C(\"__halt_compiler.+?;\",!1,{eW:!0,k:\"__halt_compiler\",l:e.UIR}),{cN:\"string\",b:/<<<['\"]?\\w+['\"]?$/,e:/^\\w+;?$/,c:[e.BE,{cN:\"subst\",v:[{b:/\\$\\w+/},{b:/\\{\\$/,e:/\\}/}]}]},a,c,{b:/(::|->)+[a-zA-Z_\\x7f-\\xff][a-zA-Z0-9_\\x7f-\\xff]*/},{cN:\"function\",bK:\"function\",e:/[;{]/,eE:!0,i:\"\\\\$|\\\\[|%\",c:[e.UTM,{cN:\"params\",b:\"\\\\(\",e:\"\\\\)\",c:[\"self\",c,e.CBCM,i,t]}]},{cN:\"class\",bK:\"class interface\",e:\"{\",eE:!0,i:/[:\\(\\$\"]/,c:[{bK:\"extends implements\"},e.UTM]},{bK:\"namespace\",e:\";\",i:/[\\.']/,c:[e.UTM]},{bK:\"use\",e:\";\",c:[e.UTM]},{b:\"=>\"},i,t]}});hljs.registerLanguage(\"coffeescript\",function(e){var c={keyword:\"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super then unless until loop of by when and or is isnt not\",literal:\"true false null undefined yes no on off\",built_in:\"npm require console print module global window document\"},n=\"[A-Za-z$_][0-9A-Za-z$_]*\",r={cN:\"subst\",b:/#\\{/,e:/}/,k:c},t=[e.BNM,e.inherit(e.CNM,{starts:{e:\"(\\\\s*/)?\",r:0}}),{cN:\"string\",v:[{b:/'''/,e:/'''/,c:[e.BE]},{b:/'/,e:/'/,c:[e.BE]},{b:/\"\"\"/,e:/\"\"\"/,c:[e.BE,r]},{b:/\"/,e:/\"/,c:[e.BE,r]}]},{cN:\"regexp\",v:[{b:\"///\",e:\"///\",c:[r,e.HCM]},{b:\"//[gim]*\",r:0},{b:/\\/(?![ *])(\\\\\\/|.)*?\\/[gim]*(?=\\W|$)/}]},{cN:\"property\",b:\"@\"+n},{b:\"`\",e:\"`\",eB:!0,eE:!0,sL:\"javascript\"}];r.c=t;var s=e.inherit(e.TM,{b:n}),i=\"(\\\\(.*\\\\))?\\\\s*\\\\B[-=]>\",o={cN:\"params\",b:\"\\\\([^\\\\(]\",rB:!0,c:[{b:/\\(/,e:/\\)/,k:c,c:[\"self\"].concat(t)}]};return{aliases:[\"coffee\",\"cson\",\"iced\"],k:c,i:/\\/\\*/,c:t.concat([e.C(\"###\",\"###\"),e.HCM,{cN:\"function\",b:\"^\\\\s*\"+n+\"\\\\s*=\\\\s*\"+i,e:\"[-=]>\",rB:!0,c:[s,o]},{b:/[:\\(,=]\\s*/,r:0,c:[{cN:\"function\",b:i,e:\"[-=]>\",rB:!0,c:[o]}]},{cN:\"class\",bK:\"class\",e:\"$\",i:/[:=\"\\[\\]]/,c:[{bK:\"extends\",eW:!0,i:/[:=\"\\[\\]]/,c:[s]},s]},{cN:\"attribute\",b:n+\":\",e:\":\",rB:!0,rE:!0,r:0}])}});hljs.registerLanguage(\"javascript\",function(e){return{aliases:[\"js\"],k:{keyword:\"in of if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const export super debugger as async await\",literal:\"true false null undefined NaN Infinity\",built_in:\"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require module console window document Symbol Set Map WeakSet WeakMap Proxy Reflect Promise\"},c:[{cN:\"pi\",r:10,b:/^\\s*['\"]use (strict|asm)['\"]/},e.ASM,e.QSM,{cN:\"string\",b:\"`\",e:\"`\",c:[e.BE,{cN:\"subst\",b:\"\\\\$\\\\{\",e:\"\\\\}\"}]},e.CLCM,e.CBCM,{cN:\"number\",v:[{b:\"\\\\b(0[bB][01]+)\"},{b:\"\\\\b(0[oO][0-7]+)\"},{b:e.CNR}],r:0},{b:\"(\"+e.RSR+\"|\\\\b(case|return|throw)\\\\b)\\\\s*\",k:\"return throw case\",c:[e.CLCM,e.CBCM,e.RM,{b:/</,e:/>\\s*[);\\]]/,r:0,sL:\"xml\"}],r:0},{cN:\"function\",bK:\"function\",e:/\\{/,eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{cN:\"params\",b:/\\(/,e:/\\)/,eB:!0,eE:!0,c:[e.CLCM,e.CBCM]}],i:/\\[|%/},{b:/\\$[(.]/},{b:\"\\\\.\"+e.IR,r:0},{bK:\"import\",e:\"[;$]\",k:\"import from as\",c:[e.ASM,e.QSM]},{cN:\"class\",bK:\"class\",e:/[{;=]/,eE:!0,i:/[:\"\\[\\]]/,c:[{bK:\"extends\"},e.UTM]}],i:/#/}});hljs.registerLanguage(\"ini\",function(e){var c={cN:\"string\",c:[e.BE],v:[{b:\"'''\",e:\"'''\",r:10},{b:'\"\"\"',e:'\"\"\"',r:10},{b:'\"',e:'\"'},{b:\"'\",e:\"'\"}]};return{aliases:[\"toml\"],cI:!0,i:/\\S/,c:[e.C(\";\",\"$\"),e.HCM,{cN:\"title\",b:/^\\s*\\[+/,e:/\\]+/},{cN:\"setting\",b:/^[a-z0-9\\[\\]_-]+\\s*=\\s*/,e:\"$\",c:[{cN:\"value\",eW:!0,k:\"on off true false yes no\",c:[{cN:\"variable\",v:[{b:/\\$[\\w\\d\"][\\w\\d_]*/},{b:/\\$\\{(.*?)}/}]},c,{cN:\"number\",b:/([\\+\\-]+)?[\\d]+_[\\d_]+/},e.NM],r:0}]}]}});hljs.registerLanguage(\"diff\",function(e){return{aliases:[\"patch\"],c:[{cN:\"chunk\",r:10,v:[{b:/^@@ +\\-\\d+,\\d+ +\\+\\d+,\\d+ +@@$/},{b:/^\\*\\*\\* +\\d+,\\d+ +\\*\\*\\*\\*$/},{b:/^\\-\\-\\- +\\d+,\\d+ +\\-\\-\\-\\-$/}]},{cN:\"header\",v:[{b:/Index: /,e:/$/},{b:/=====/,e:/=====$/},{b:/^\\-\\-\\-/,e:/$/},{b:/^\\*{3} /,e:/$/},{b:/^\\+\\+\\+/,e:/$/},{b:/\\*{5}/,e:/\\*{5}$/}]},{cN:\"addition\",b:\"^\\\\+\",e:\"$\"},{cN:\"deletion\",b:\"^\\\\-\",e:\"$\"},{cN:\"change\",b:\"^\\\\!\",e:\"$\"}]}});\nexports.hljs = hljs;\n"
        },
        "$:/plugins/tiddlywiki/highlight/highlight.css": {
            "type": "text/css",
            "title": "$:/plugins/tiddlywiki/highlight/highlight.css",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": "/*\n\nOriginal style from softwaremaniacs.org (c) Ivan Sagalaev <Maniac@SoftwareManiacs.Org>\n\n*/\n\n.hljs {\n  display: block;\n  overflow-x: auto;\n  padding: 0.5em;\n  background: #f0f0f0;\n  -webkit-text-size-adjust: none;\n}\n\n.hljs,\n.hljs-subst,\n.hljs-tag .hljs-title,\n.nginx .hljs-title {\n  color: black;\n}\n\n.hljs-string,\n.hljs-title,\n.hljs-constant,\n.hljs-parent,\n.hljs-tag .hljs-value,\n.hljs-rule .hljs-value,\n.hljs-preprocessor,\n.hljs-pragma,\n.hljs-name,\n.haml .hljs-symbol,\n.ruby .hljs-symbol,\n.ruby .hljs-symbol .hljs-string,\n.hljs-template_tag,\n.django .hljs-variable,\n.smalltalk .hljs-class,\n.hljs-addition,\n.hljs-flow,\n.hljs-stream,\n.bash .hljs-variable,\n.pf .hljs-variable,\n.apache .hljs-tag,\n.apache .hljs-cbracket,\n.tex .hljs-command,\n.tex .hljs-special,\n.erlang_repl .hljs-function_or_atom,\n.asciidoc .hljs-header,\n.markdown .hljs-header,\n.coffeescript .hljs-attribute,\n.tp .hljs-variable {\n  color: #800;\n}\n\n.smartquote,\n.hljs-comment,\n.hljs-annotation,\n.diff .hljs-header,\n.hljs-chunk,\n.asciidoc .hljs-blockquote,\n.markdown .hljs-blockquote {\n  color: #888;\n}\n\n.hljs-number,\n.hljs-date,\n.hljs-regexp,\n.hljs-literal,\n.hljs-hexcolor,\n.smalltalk .hljs-symbol,\n.smalltalk .hljs-char,\n.go .hljs-constant,\n.hljs-change,\n.lasso .hljs-variable,\n.makefile .hljs-variable,\n.asciidoc .hljs-bullet,\n.markdown .hljs-bullet,\n.asciidoc .hljs-link_url,\n.markdown .hljs-link_url {\n  color: #080;\n}\n\n.hljs-label,\n.ruby .hljs-string,\n.hljs-decorator,\n.hljs-filter .hljs-argument,\n.hljs-localvars,\n.hljs-array,\n.hljs-attr_selector,\n.hljs-important,\n.hljs-pseudo,\n.hljs-pi,\n.haml .hljs-bullet,\n.hljs-doctype,\n.hljs-deletion,\n.hljs-envvar,\n.hljs-shebang,\n.apache .hljs-sqbracket,\n.nginx .hljs-built_in,\n.tex .hljs-formula,\n.erlang_repl .hljs-reserved,\n.hljs-prompt,\n.asciidoc .hljs-link_label,\n.markdown .hljs-link_label,\n.vhdl .hljs-attribute,\n.clojure .hljs-attribute,\n.asciidoc .hljs-attribute,\n.lasso .hljs-attribute,\n.coffeescript .hljs-property,\n.hljs-phony {\n  color: #88f;\n}\n\n.hljs-keyword,\n.hljs-id,\n.hljs-title,\n.hljs-built_in,\n.css .hljs-tag,\n.hljs-doctag,\n.smalltalk .hljs-class,\n.hljs-winutils,\n.bash .hljs-variable,\n.pf .hljs-variable,\n.apache .hljs-tag,\n.hljs-type,\n.hljs-typename,\n.tex .hljs-command,\n.asciidoc .hljs-strong,\n.markdown .hljs-strong,\n.hljs-request,\n.hljs-status,\n.tp .hljs-data,\n.tp .hljs-io {\n  font-weight: bold;\n}\n\n.asciidoc .hljs-emphasis,\n.markdown .hljs-emphasis,\n.tp .hljs-units {\n  font-style: italic;\n}\n\n.nginx .hljs-built_in {\n  font-weight: normal;\n}\n\n.coffeescript .javascript,\n.javascript .xml,\n.lasso .markup,\n.tex .hljs-formula,\n.xml .javascript,\n.xml .vbscript,\n.xml .css,\n.xml .hljs-cdata {\n  opacity: 0.5;\n}\n"
        },
        "$:/plugins/tiddlywiki/highlight/highlightblock.js": {
            "text": "/*\\\ntitle: $:/plugins/tiddlywiki/highlight/highlightblock.js\ntype: application/javascript\nmodule-type: widget\n\nWraps up the fenced code blocks parser for highlight and use in TiddlyWiki5\n\n\\*/\n(function() {\n\n/*jslint node: true, browser: true */\n/*global $tw: false */\n\"use strict\";\n\nvar CodeBlockWidget = require(\"$:/core/modules/widgets/codeblock.js\").codeblock;\n\nvar hljs = require(\"$:/plugins/tiddlywiki/highlight/highlight.js\");\n\nhljs.configure({tabReplace: \"    \"});\t\n\nCodeBlockWidget.prototype.postRender = function() {\n\tvar domNode = this.domNodes[0];\n\tif($tw.browser && this.document !== $tw.fakeDocument && this.language) {\n\t\tdomNode.className = this.language.toLowerCase();\n\t\thljs.highlightBlock(domNode);\n\t} else if(!$tw.browser && this.language && this.language.indexOf(\"/\") === -1 ){\n\t\ttry {\n\t\t\tdomNode.className = this.language.toLowerCase() + \" hljs\";\n\t\t\tdomNode.children[0].innerHTML = hljs.fixMarkup(hljs.highlight(this.language, this.getAttribute(\"code\")).value);\n\t\t}\n\t\tcatch(err) {\n\t\t\t// Can't easily tell if a language is registered or not in the packed version of hightlight.js,\n\t\t\t// so we silently fail and the codeblock remains unchanged\n\t\t}\n\t}\t\n};\n\n})();\n",
            "title": "$:/plugins/tiddlywiki/highlight/highlightblock.js",
            "type": "application/javascript",
            "module-type": "widget"
        },
        "$:/plugins/tiddlywiki/highlight/license": {
            "title": "$:/plugins/tiddlywiki/highlight/license",
            "type": "text/plain",
            "text": "Copyright (c) 2006, Ivan Sagalaev\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright\n      notice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of highlight.js nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
        },
        "$:/plugins/tiddlywiki/highlight/readme": {
            "title": "$:/plugins/tiddlywiki/highlight/readme",
            "text": "This plugin provides syntax highlighting of code blocks using v8.8.0 of [[highlight.js|https://github.com/isagalaev/highlight.js]] from Ivan Sagalaev.\n\n! Usage\n\nWhen the plugin is installed it automatically applies highlighting to all codeblocks defined with triple backticks or with the CodeBlockWidget.\n\nThe language can optionally be specified after the opening triple braces:\n\n<$codeblock code=\"\"\"```css\n * { margin: 0; padding: 0; } /* micro reset */\n\nhtml { font-size: 62.5%; }\nbody { font-size: 14px; font-size: 1.4rem; } /* =14px */\nh1   { font-size: 24px; font-size: 2.4rem; } /* =24px */\n```\"\"\"/>\n\nIf no language is specified highlight.js will attempt to automatically detect the language.\n\n! Built-in Language Brushes\n\nThe plugin includes support for the following languages (referred to as \"brushes\" by highlight.js):\n\n* apache\n* bash\n* coffeescript\n* cpp\n* cs\n* css\n* diff\n* http\n* ini\n* java\n* javascript\n* json\n* makefile\n* markdown\n* nginx\n* objectivec\n* perl\n* php\n* python\n* ruby\n* sql\n* xml\n\n"
        },
        "$:/plugins/tiddlywiki/highlight/styles": {
            "title": "$:/plugins/tiddlywiki/highlight/styles",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": ".hljs{display:block;overflow-x:auto;padding:.5em;color:#333;background:#f8f8f8;-webkit-text-size-adjust:none}.hljs-comment,.diff .hljs-header,.hljs-javadoc{color:#998;font-style:italic}.hljs-keyword,.css .rule .hljs-keyword,.hljs-winutils,.nginx .hljs-title,.hljs-subst,.hljs-request,.hljs-status{color:#333;font-weight:bold}.hljs-number,.hljs-hexcolor,.ruby .hljs-constant{color:teal}.hljs-string,.hljs-tag .hljs-value,.hljs-phpdoc,.hljs-dartdoc,.tex .hljs-formula{color:#d14}.hljs-title,.hljs-id,.scss .hljs-preprocessor{color:#900;font-weight:bold}.hljs-list .hljs-keyword,.hljs-subst{font-weight:normal}.hljs-class .hljs-title,.hljs-type,.vhdl .hljs-literal,.tex .hljs-command{color:#458;font-weight:bold}.hljs-tag,.hljs-tag .hljs-title,.hljs-rule .hljs-property,.django .hljs-tag .hljs-keyword{color:navy;font-weight:normal}.hljs-attribute,.hljs-variable,.lisp .hljs-body,.hljs-name{color:teal}.hljs-regexp{color:#009926}.hljs-symbol,.ruby .hljs-symbol .hljs-string,.lisp .hljs-keyword,.clojure .hljs-keyword,.scheme .hljs-keyword,.tex .hljs-special,.hljs-prompt{color:#990073}.hljs-built_in{color:#0086b3}.hljs-preprocessor,.hljs-pragma,.hljs-pi,.hljs-doctype,.hljs-shebang,.hljs-cdata{color:#999;font-weight:bold}.hljs-deletion{background:#fdd}.hljs-addition{background:#dfd}.diff .hljs-change{background:#0086b3}.hljs-chunk{color:#aaa}"
        },
        "$:/plugins/tiddlywiki/highlight/usage": {
            "title": "$:/plugins/tiddlywiki/highlight/usage",
            "text": "! Usage\n\nFenced code blocks can have a language specifier added to trigger highlighting in a specific language. Otherwise heuristics are used to detect the language.\n\n```\n ```js\n var a = b + c; // Highlighted as JavaScript\n ```\n```\n! Adding Themes\n\nYou can add themes from highlight.js by copying the CSS to a new tiddler and tagging it with [[$:/tags/Stylesheet]]. The available themes can be found on GitHub:\n\nhttps://github.com/isagalaev/highlight.js/tree/master/src/styles\n"
        }
    }
}
{
    "tiddlers": {
        "$:/plugins/wimmoermans/history/fhistory.js": {
            "created": "20160511174147745",
            "creator": "wjam",
            "text": "/*\\\ntitle: $:/plugins/wimmoermans/fhistory.js\ntype: application/javascript\nmodule-type: filteroperator\n\na filter to generate ALL tiddler titles from historylist, \nrepairs escaped characters \" \\\n\nassumptions format of historylist \n  \"title\":\\s\"(.*)\"  where .* is the title of the visited tiddler\n\n@preserve\n\\*/\n\n (function() {\n        \"use strict\";\n        exports.fullhistory = function(e, t, i) {\n           var    o = [],\n                    match=\"\",\n                    regexp= \"\",\n                    ttt=\"\",\n                    text=\"\";\n            regexp = new RegExp(\"\\\"title\\\": \\\"(.+)\\\"\", \"ig\");\n            text = $tw.wiki.getTiddlerText(\"$:/HistoryList\");\n            while (match = regexp.exec(text)) {\n                ttt=match[1].replace(/\\\\\\\"/g,\"\\\"\");\n                ttt=ttt.replace(/\\\\\\\\/g,\"\\\\\");\n                o.push(ttt); /* oldest first */\n            }; /*while*/\n            return o;\n        }; /* export */\n\n }   )();",
            "type": "application/javascript",
            "title": "$:/plugins/wimmoermans/history/fhistory.js",
            "tags": "historyplugin",
            "module-type": "filteroperator",
            "modifier": "wjam",
            "modified": "20160513184814825"
        },
        "$:/plugins/wimmoermans/history/history.js": {
            "created": "20160505064231013",
            "creator": "Wim Moermans",
            "text": "/*\\\ntitle: $:/plugins/wimmoermans/history.js\ntype: application/javascript\nmodule-type: filteroperator\n\na filter to generate tiddler titles from historylist, reverse order, no duplicates (only most recent), no drafts.\n\nassumptions\n  \"title\":\\s\"(.*)\"  where .* is the title\n  \"Draft of '\" is the prefix for tiddler in edit mode\n\n@preserve\n\\*/\n\n (function() {\n        \"use strict\";\n        exports.history = function(e, t, i) {\n           var results = new Array(),\n                    o = [],\n                    match=\"\",\n                    regexp= \"\",\n                    text=\"\",\nttt=\"\",\n                    i=0,\n                    j=0,\n                    entries=0,\n                    found=0;\n            regexp = new RegExp(\"\\\"title\\\": \\\"(.+)\\\"\", \"ig\");\n            text = $tw.wiki.getTiddlerText(\"$:/HistoryList\");\n            while (match = regexp.exec(text)) {\n                ttt=match[1].replace(/\\\\\\\"/g,\"\\\"\");\n                ttt=ttt.replace(/\\\\\\\\/g,\"\\\\\");\n                if (ttt.substr(0, 10) !== \"Draft of '\") {\n                    results.push(ttt); /* oldest first */\n                    entries = entries + 1;\n                }\n            }\n            i = entries-1;\n            while (i >= 0) {\n                j = i + 1;\n                found = 0;\n                while ((j <= entries) && (found === 0)) {\n                    if (results[i] === results[j]) {\n                        found = 1;\n                    }\n                    j = j + 1;\n                }\n                if (found === 0) {\n\n                    if( results[i] !== \"\"){\n                         o.push(results[i]);\n                    }\n                }\n                i = i - 1;\n            };\n            return o;\n        }\n\n }   )();",
            "type": "application/javascript",
            "title": "$:/plugins/wimmoermans/history/history.js",
            "tags": "historyplugin",
            "module-type": "filteroperator",
            "modifier": "wjam",
            "modified": "20160513175106215"
        },
        "$:/plugins/wimmoermans/history/HistoryTab": {
            "created": "20160504135142490",
            "creator": "Wim Moermans",
            "text": "<small>breadcrumbs:</small>\n\n{{{ [history[]] }}}\n\n\n",
            "title": "$:/plugins/wimmoermans/history/HistoryTab",
            "tags": "$:/tags/SideBar historyplugin",
            "modifier": "wjam",
            "modified": "20160507201121730",
            "caption": "History"
        },
        "$:/plugins/wimmoermans/history/HistoryTab2": {
            "text": "<$linkcatcher to=\"$:/temp/advancedsearch\">\n\n<<lingo Shadows/Hint>>\n\n<div class=\"tc-search\">\n<$edit-text tiddler=\"$:/temp/advancedsearch\" type=\"search\" tag=\"input\"/>\n<$reveal state=\"$:/temp/advancedsearch\" type=\"nomatch\" text=\"\">\n<$button class=\"tc-btn-invisible\">\n<$action-setfield $tiddler=\"$:/temp/advancedsearch\" $field=\"text\" $value=\"\"/>\n{{$:/core/images/close-button}}\n</$button>\n</$reveal>\n</div>\n\n</$linkcatcher>\n{{{ [history[]search{$:/temp/advancedsearch}limit[26]] }}}\n",
            "title": "$:/plugins/wimmoermans/history/HistoryTab2",
            "tags": "$:/tags/AdvancedSearch historyplugin",
            "modifier": "wjam",
            "modified": "20160507171948465",
            "creator": "Wim Moermans",
            "created": "20160505094007336",
            "caption": "History2"
        },
        "$:/plugins/wimmoermans/history/icon": {
            "created": "20160508110003253",
            "title": "$:/plugins/wimmoermans/history/icon",
            "type": "image/svg+xml",
            "text": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<!-- Created with Inkscape (http://www.inkscape.org/) -->\n\n<svg\n   xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n   xmlns:cc=\"http://creativecommons.org/ns#\"\n   xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"\n   xmlns:svg=\"http://www.w3.org/2000/svg\"\n   xmlns=\"http://www.w3.org/2000/svg\"\n   xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n   xmlns:sodipodi=\"http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd\"\n   xmlns:inkscape=\"http://www.inkscape.org/namespaces/inkscape\"\n   width=\"22\"\n   height=\"21\"\n   id=\"svg4046\"\n   version=\"1.1\"\n   inkscape:version=\"0.47 r22583\"\n   sodipodi:docname=\"cat backtrack red sign 22x21.svg\">\n  <defs\n     id=\"defs4048\">\n    <linearGradient\n       inkscape:collect=\"always\"\n       id=\"linearGradient3600\">\n      <stop\n         style=\"stop-color:#ff0024;stop-opacity:1;\"\n         offset=\"0\"\n         id=\"stop3602\" />\n      <stop\n         style=\"stop-color:#ff0024;stop-opacity:0;\"\n         offset=\"1\"\n         id=\"stop3604\" />\n    </linearGradient>\n    <inkscape:perspective\n       sodipodi:type=\"inkscape:persp3d\"\n       inkscape:vp_x=\"0 : 526.18109 : 1\"\n       inkscape:vp_y=\"0 : 1000 : 0\"\n       inkscape:vp_z=\"744.09448 : 526.18109 : 1\"\n       inkscape:persp3d-origin=\"372.04724 : 350.78739 : 1\"\n       id=\"perspective4054\" />\n    <inkscape:perspective\n       id=\"perspective4064\"\n       inkscape:persp3d-origin=\"0.5 : 0.33333333 : 1\"\n       inkscape:vp_z=\"1 : 0.5 : 1\"\n       inkscape:vp_y=\"0 : 1000 : 0\"\n       inkscape:vp_x=\"0 : 0.5 : 1\"\n       sodipodi:type=\"inkscape:persp3d\" />\n    <linearGradient\n       inkscape:collect=\"always\"\n       xlink:href=\"#linearGradient3600\"\n       id=\"linearGradient3606\"\n       x1=\"-17.230652\"\n       y1=\"4.6165885\"\n       x2=\"-3.4143419\"\n       y2=\"4.6165885\"\n       gradientUnits=\"userSpaceOnUse\" />\n  </defs>\n  <sodipodi:namedview\n     id=\"base\"\n     pagecolor=\"#ffffff\"\n     bordercolor=\"#666666\"\n     borderopacity=\"1.0\"\n     inkscape:pageopacity=\"0.0\"\n     inkscape:pageshadow=\"2\"\n     inkscape:zoom=\"31.678384\"\n     inkscape:cx=\"9.633971\"\n     inkscape:cy=\"9.3724875\"\n     inkscape:document-units=\"px\"\n     inkscape:current-layer=\"layer1\"\n     showgrid=\"false\"\n     inkscape:window-width=\"1690\"\n     inkscape:window-height=\"1181\"\n     inkscape:window-x=\"-5\"\n     inkscape:window-y=\"-5\"\n     inkscape:window-maximized=\"1\" />\n  <metadata\n     id=\"metadata4051\">\n    <rdf:RDF>\n      <cc:Work\n         rdf:about=\"\">\n        <dc:format>image/svg+xml</dc:format>\n        <dc:type\n           rdf:resource=\"http://purl.org/dc/dcmitype/StillImage\" />\n        <dc:title></dc:title>\n      </cc:Work>\n    </rdf:RDF>\n  </metadata>\n  <g\n     inkscape:label=\"Layer 1\"\n     inkscape:groupmode=\"layer\"\n     id=\"layer1\"\n     transform=\"translate(-12.564828,-228.71506)\">\n    <path\n       sodipodi:type=\"arc\"\n       style=\"fill:#fcfcfc;fill-opacity:1;fill-rule:nonzero\"\n       id=\"path6042\"\n       sodipodi:cx=\"-1.4836615\"\n       sodipodi:cy=\"-1.6968651\"\n       sodipodi:rx=\"6.976366\"\n       sodipodi:ry=\"6.8500967\"\n       d=\"m 5.4927045,-1.6968651 a 6.976366,6.8500967 0 1 1 -13.9527321,0 6.976366,6.8500967 0 1 1 13.9527321,0 z\"\n       transform=\"matrix(1.2556561,0,0,1.2788018,25.334287,241.26263)\" />\n    <path\n       id=\"path6044\"\n       d=\"m 30.4446,244.31397 c 0.310834,-0.28767 0.606444,-0.65004 0.656841,-0.80533 0.226308,-0.69733 -1.75153,-1.35182 -2.563323,-0.84824 -0.640438,0.39727 -1.154161,1.973 -0.807158,2.47583 0.257232,0.37275 0.420332,0.39322 1.137559,0.14288 0.460496,-0.16076 0.876334,-0.32601 0.924074,-0.36721 0.04729,-0.042 0.341159,-0.31027 0.65198,-0.59797 l 2.7e-5,4e-5 z m 0.597108,-2.74293 c 0.09612,-0.164 0.0099,-0.46244 -0.199577,-0.69068 -0.46117,-0.50252 -1.166755,-0.22586 -1.371622,0.53779 -0.138492,0.51626 -0.124003,0.53781 0.418457,0.62237 0.608375,0.0949 0.889192,-0.0195 1.152742,-0.46948 z m -3.686825,2.07878 c 0.168572,-0.62841 -0.06485,-0.93373 -0.745912,-0.97577 -0.770729,-0.0477 -1.241044,0.64384 -0.836604,1.22992 0.512291,0.74232 1.35136,0.60756 1.582532,-0.25415 l -1.6e-5,0 z m 1.462533,-2.12446 0.185272,-0.64054 -0.625699,-0.0677 c -0.706134,-0.0764 -0.924717,0.0207 -1.305369,0.57977 -0.335314,0.49243 -0.04392,0.93382 0.644496,0.97629 0.707662,0.0437 0.882331,-0.0906 1.101289,-0.84784 l 1.1e-5,-4e-5 z m -7.797022,1.15185 c 0.76937,-0.85185 0.741916,-1.28981 -0.106461,-1.69843 -0.998166,-0.48078 -1.914981,-0.37475 -2.454339,0.28389 -0.516439,0.63069 -0.583894,1.63687 -0.151704,2.26314 0.51397,0.74476 1.572512,0.41361 2.712495,-0.8486 z m -3.48099,-0.42697 c 0.0896,-0.69621 -0.04686,-0.87565 -0.696238,-0.91572 -1.139297,-0.0703 -1.566432,0.84984 -0.702808,1.51406 0.586816,0.4513 1.303444,0.14483 1.399073,-0.59834 l -2.7e-5,0 z m 3.354628,-2.52461 c 0.149115,-0.45951 -0.275478,-0.99883 -0.833864,-1.05921 -0.603977,-0.0653 -0.7421,0.0289 -0.89905,0.61367 -0.166828,0.62185 0.06374,0.9337 0.720441,0.97418 0.628634,0.0389 0.868921,-0.0867 1.012367,-0.52882 l 1.06e-4,1.8e-4 z m -2.408088,0.34458 c 0.112063,-0.75445 -0.0033,-0.89128 -0.721233,-0.85538 -0.828289,0.0413 -1.07332,0.23945 -1.137099,0.9192 -0.05268,0.56122 -0.02343,0.59189 0.653277,0.68515 0.878304,0.12109 1.095906,-0.0141 1.204881,-0.74921 l 1.74e-4,2.4e-4 z m 5.888163,-5.33851 c 0.142599,-0.43933 -0.245444,-0.96317 -1.034761,-1.39674 -0.659415,-0.36226 -1.526134,-0.27635 -1.956444,0.1939 -0.468183,0.51161 -0.852424,1.97658 -0.610417,2.32725 0.48829,0.70756 3.291025,-0.16736 3.601586,-1.12433 l 3.6e-5,-8e-5 z m 0.05327,-2.11052 c 0.567019,-0.52796 -0.337185,-1.89786 -1.117088,-1.69249 -0.480085,0.12648 -0.794832,1.02942 -0.505121,1.44923 0.309844,0.44897 1.249847,0.58994 1.622222,0.24325 l -1.3e-5,1e-5 z m -3.840095,1.12289 c 0.05032,-0.53627 0.0115,-0.59251 -0.526932,-0.76354 -0.319703,-0.10149 -0.703975,-0.10859 -0.853942,-0.0154 -0.412123,0.25566 -0.580704,0.98457 -0.316321,1.36768 0.511143,0.74066 1.608153,0.36021 1.697198,-0.58862 l -3e-6,-7e-5 z m 1.399399,-1.72835 c 0.13752,-0.4755 0.08353,-0.73271 -0.201357,-0.9592 -0.777497,-0.6182 -2.043348,0.0734 -1.830727,1.00011 0.08032,0.34992 1.408324,0.87902 1.720388,0.68544 0.06804,-0.0423 0.208269,-0.3691 0.311631,-0.72643 l 6.5e-5,8e-5 z\"\n       style=\"fill:#000000\"\n       sodipodi:nodetypes=\"cssssscccsssccsscccccsscccsssccsscccssscccssscccssscccsscccssscccssscc\" />\n  </g>\n</svg>\n",
            "modified": "20160508110047926"
        },
        "$:/plugins/wimmoermans/history/readme": {
            "created": "20160505113313287",
            "creator": "wjam",
            "text": "!!history filter\nTom Tumb (Dutch: Klein Duimpje).used breadcrumbs because he didn't want to get lost in the woods. \n\nWhen reading or editing a large ~TiddlyWiki you sometimes get lost and revisit tiddlers over and over.  This plugin ''automatically creates a list of all visited tiddlers'', and allows you to answer questions like \"Where did I read that?\", \"Did I update tiddler XXYY already?\", \"Which system tiddlers did I view/edit?\" \"Which tiddlers did I rename/delete?\". \n\n!!functionality \n\n*The ''plugin/filter'' generates the tiddlers which you visited since loading the ~TiddlyWiki. \n*Like  ~OpenTab all* tiddlers from the story river are shown in the history. When you ''close a tiddler'' it is removed from the ~OpenTab but is ''still present in the the history''. \n*Tiddler titles which were opened using tiddlers like $:/DefaultTiddlers are not present in the history.\n*Like  ~RecentTab, the history list includes the tiddlers you created or modified during this session. When you ''delete or rename'' a tiddler during your session the old tiddler title will be in ''//italics//''. \n\ncompare ''history[]  and ''fullhistory[]\n\n| |''history[]''|fullhistory[]|\n| most recent visited tiddlers|''most recent visited appear first''|most recent appear last|\n| Draft titles|''drafts not included ''|all drafts ^^dangerous[1]^^|\n| visited multiple times|''no duplicates, only most recent title''|includes all duplicates|\n| usage|normal use|advanced use only|\n\n!!examples\n\nTo display all visited tiddlers so far use\n\n ``{{{[history[]]}}}`` \n\nYou can sort the list alphabetically, ''search'' the tiddlers and ''limit'' the number of results to 25. e.g.\n\n``{{{[history[]search{$:/temp/search}limit[25]]}}}``\n\nif you want to exclude system tiddlers from the list:\n\n``{{{[history[]!is[system]]}}}``\n\nIf you want modified but possibly not yet saved tiddlers (incl renamed, deleted but excluding Draft. \n\n``{{{[history[]haschanged[]]}}}``\n\n''fullhistory[]'' is only included for //advanced users//. To generate the same list as history[] you would have to write \n``{{{[fullhistory[]!prefix[Draft]reverse[]unique[]]}}}``  ^^[2]^^\n\n!!how to install \n\n''Drag'' the link $:/plugins/wimmoermans/history to your wiki, ''import'' the tiddler and ''save'' your wiki, then ''LOAD'' the newly saved wiki.\nOr ''open'' the history tiddler in this wiki and use the top right icon ''V'', ''export tiddler'', ''JSON file'' to save the tiddler to disk, then in your wiki in the sidebar use ''Tools'',  ''import (paperclip)'' to import the JSON file you just saved, ''save'' your wiki, ''LOAD'' the saved wiki.\n\n# history filter <br>[[$:/plugins/wimmoermans/history/history.js]]\n\n#fullhistory filter <br>[[$:/plugins/wimmoermans/history/fhistory.js]]\n\n#History tab in the Sidebar.<br>[[$:/plugins/wimmoermans/history/HistoryTab]]<br><small>(to disable remove the ~$:/tags/SideBar tag)</small>\n# History2 tab for advanced seard tiddler <br>[[$:/plugins/wimmoermans/history/HistoryTab2]]<br><small>(to disable remove the ~$:/tags/AdvancedSearch tag)</small>\n#$:/plugins/wimmoermans/history/readme this tiddler\n# $:/plugins/wimmoermans/history/icon three cat paw prints (by Motovun ?)\n\n!!Google plus forum to discuss the history filters\nhttps://groups.google.com/forum/#!topic/tiddlywiki/u4lN-olqnPc\n\n\n!! ~TiddlyWiki version compatibility [3]\nhistory and fullhistory were tested on version 5.1.12 pre-release, 5.1.11, 5.1.9, 5.0.8-beta. For 5.0.8-beta the tab-example tiddlers require manually adding the field named 'caption' value 'History' and 'History2' to present the Tab captions.\n\n!!notes/warning\n[1] clicking on ''Draft'' titles in the history is ''dangerous'' especially when the tiddler is already open.\n\n[2] ''unique[]'' is a undocumented filter present in ~TiddlyWiki boot.js.\n\n[3] history scan the $:/HistoryList tiddler for \"title\"://single space//\"//tiddler title//\"  and displays the //tiddler title// value. It correctly handles double quote and backslahs in tiddler titles.\n",
            "title": "$:/plugins/wimmoermans/history/readme",
            "tags": "historyplugin sh",
            "modifier": "wjam",
            "modified": "20160514063831746"
        }
    }
}
<small>breadcrumbs:</small>

{{{ [history[]] }}}


\rules except wikilink
Proceedings of INTERSPEECH 2019
\rules except wikilink
INTERSPEECH 2019
$:/core/ui/MoreSideBar/Missing
$:/causal/Causal Productions History View

{
    "tiddlers": {
        "$:/info/browser": {
            "title": "$:/info/browser",
            "text": "yes"
        },
        "$:/info/node": {
            "title": "$:/info/node",
            "text": "no"
        }
    }
}
{
    "tiddlers": {
        "$:/themes/tiddlywiki/snowwhite/base": {
            "title": "$:/themes/tiddlywiki/snowwhite/base",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": "\\rules only filteredtranscludeinline transcludeinline macrodef macrocallinline\n\n.tc-sidebar-header {\n\ttext-shadow: 0 1px 0 <<colour sidebar-foreground-shadow>>;\n}\n\n.tc-tiddler-info {\n\t<<box-shadow \"inset 1px 2px 3px rgba(0,0,0,0.1)\">>\n}\n\n@media screen {\n\t.tc-tiddler-frame {\n\t\t<<box-shadow \"1px 1px 5px rgba(0, 0, 0, 0.3)\">>\n\t}\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\t.tc-tiddler-frame {\n\t\t<<box-shadow none>>\n\t}\n}\n\n.tc-page-controls button svg, .tc-tiddler-controls button svg, .tc-topbar button svg {\n\t<<transition \"fill 150ms ease-in-out\">>\n}\n\n.tc-tiddler-controls button.tc-selected,\n.tc-page-controls button.tc-selected {\n\t<<filter \"drop-shadow(0px -1px 2px rgba(0,0,0,0.25))\">>\n}\n\n.tc-tiddler-frame input.tc-edit-texteditor {\n\t<<box-shadow \"inset 0 1px 8px rgba(0, 0, 0, 0.15)\">>\n}\n\n.tc-edit-tags {\n\t<<box-shadow \"inset 0 1px 8px rgba(0, 0, 0, 0.15)\">>\n}\n\n.tc-tiddler-frame .tc-edit-tags input.tc-edit-texteditor {\n\t<<box-shadow \"none\">>\n\tborder: none;\n\toutline: none;\n}\n\ncanvas.tc-edit-bitmapeditor  {\n\t<<box-shadow \"2px 2px 5px rgba(0, 0, 0, 0.5)\">>\n}\n\n.tc-drop-down {\n\tborder-radius: 4px;\n\t<<box-shadow \"2px 2px 10px rgba(0, 0, 0, 0.5)\">>\n}\n\n.tc-block-dropdown {\n\tborder-radius: 4px;\n\t<<box-shadow \"2px 2px 10px rgba(0, 0, 0, 0.5)\">>\n}\n\n.tc-modal {\n\tborder-radius: 6px;\n\t<<box-shadow \"0 3px 7px rgba(0,0,0,0.3)\">>\n}\n\n.tc-modal-footer {\n\tborder-radius: 0 0 6px 6px;\n\t<<box-shadow \"inset 0 1px 0 #fff\">>;\n}\n\n\n.tc-alert {\n\tborder-radius: 6px;\n\t<<box-shadow \"0 3px 7px rgba(0,0,0,0.6)\">>\n}\n\n.tc-notification {\n\tborder-radius: 6px;\n\t<<box-shadow \"0 3px 7px rgba(0,0,0,0.3)\">>\n\ttext-shadow: 0 1px 0 rgba(255,255,255, 0.8);\n}\n\n.tc-sidebar-lists .tc-tab-set .tc-tab-divider {\n\tborder-top: none;\n\theight: 1px;\n\t<<background-linear-gradient \"left, rgba(0,0,0,0.15) 0%, rgba(0,0,0,0.0) 100%\">>\n}\n\n.tc-more-sidebar .tc-tab-buttons button {\n\t<<background-linear-gradient \"left, rgba(0,0,0,0.01) 0%, rgba(0,0,0,0.1) 100%\">>\n}\n\n.tc-more-sidebar .tc-tab-buttons button.tc-tab-selected {\n\t<<background-linear-gradient \"left, rgba(0,0,0,0.05) 0%, rgba(255,255,255,0.05) 100%\">>\n}\n\n.tc-message-box img {\n\t<<box-shadow \"1px 1px 3px rgba(0,0,0,0.5)\">>\n}\n\n.tc-plugin-info {\n\t<<box-shadow \"1px 1px 3px rgba(0,0,0,0.5)\">>\n}\n"
        }
    }
}
{
    "tiddlers": {
        "$:/themes/tiddlywiki/vanilla/base": {
            "title": "$:/themes/tiddlywiki/vanilla/base",
            "tags": "[[$:/tags/Stylesheet]]",
            "text": "\\define custom-background-datauri()\n<$set name=\"background\" value={{$:/themes/tiddlywiki/vanilla/settings/backgroundimage}}>\n<$list filter=\"[<background>is[image]]\">\n`background: url(`\n<$list filter=\"[<background>!has[_canonical_uri]]\">\n<$macrocall $name=\"datauri\" title={{$:/themes/tiddlywiki/vanilla/settings/backgroundimage}}/>\n</$list>\n<$list filter=\"[<background>has[_canonical_uri]]\">\n<$view tiddler={{$:/themes/tiddlywiki/vanilla/settings/backgroundimage}} field=\"_canonical_uri\"/>\n</$list>\n`) center center;`\n`background-attachment: `{{$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment}}`;\n-webkit-background-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;\n-moz-background-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;\n-o-background-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;\nbackground-size:` {{$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize}}`;`\n</$list>\n</$set>\n\\end\n\n\\define if-fluid-fixed(text,hiddenSidebarText)\n<$reveal state=\"$:/themes/tiddlywiki/vanilla/options/sidebarlayout\" type=\"match\" text=\"fluid-fixed\">\n$text$\n<$reveal state=\"$:/state/sidebar\" type=\"nomatch\" text=\"yes\" default=\"yes\">\n$hiddenSidebarText$\n</$reveal>\n</$reveal>\n\\end\n\n\\rules only filteredtranscludeinline transcludeinline macrodef macrocallinline macrocallblock\n\n/*\n** Start with the normalize CSS reset, and then belay some of its effects\n*/\n\n{{$:/themes/tiddlywiki/vanilla/reset}}\n\n*, input[type=\"search\"] {\n\tbox-sizing: border-box;\n\t-moz-box-sizing: border-box;\n\t-webkit-box-sizing: border-box;\n}\n\nhtml button {\n\tline-height: 1.2;\n\tcolor: <<colour button-foreground>>;\n\tbackground: <<colour button-background>>;\n\tborder-color: <<colour button-border>>;\n}\n\n/*\n** Basic element styles\n*/\n\nhtml {\n\tfont-family: {{$:/themes/tiddlywiki/vanilla/settings/fontfamily}};\n\ttext-rendering: optimizeLegibility; /* Enables kerning and ligatures etc. */\n\t-webkit-font-smoothing: antialiased;\n\t-moz-osx-font-smoothing: grayscale;\n}\n\nhtml:-webkit-full-screen {\n\tbackground-color: <<colour page-background>>;\n}\n\nbody.tc-body {\n\tfont-size: {{$:/themes/tiddlywiki/vanilla/metrics/fontsize}};\n\tline-height: {{$:/themes/tiddlywiki/vanilla/metrics/lineheight}};\n\tcolor: <<colour foreground>>;\n\tbackground-color: <<colour page-background>>;\n\tfill: <<colour foreground>>;\n\tword-wrap: break-word;\n\t<<custom-background-datauri>>\n}\n\nh1, h2, h3, h4, h5, h6 {\n\tline-height: 1.2;\n\tfont-weight: 300;\n}\n\npre {\n\tdisplay: block;\n\tpadding: 14px;\n\tmargin-top: 1em;\n\tmargin-bottom: 1em;\n\tword-break: normal;\n\tword-wrap: break-word;\n\twhite-space: {{$:/themes/tiddlywiki/vanilla/options/codewrapping}};\n\tbackground-color: <<colour pre-background>>;\n\tborder: 1px solid <<colour pre-border>>;\n\tpadding: 0 3px 2px;\n\tborder-radius: 3px;\n\tfont-family: {{$:/themes/tiddlywiki/vanilla/settings/codefontfamily}};\n}\n\ncode {\n\tcolor: <<colour code-foreground>>;\n\tbackground-color: <<colour code-background>>;\n\tborder: 1px solid <<colour code-border>>;\n\twhite-space: {{$:/themes/tiddlywiki/vanilla/options/codewrapping}};\n\tpadding: 0 3px 2px;\n\tborder-radius: 3px;\n\tfont-family: {{$:/themes/tiddlywiki/vanilla/settings/codefontfamily}};\n}\n\nblockquote {\n\tborder-left: 5px solid <<colour blockquote-bar>>;\n\tmargin-left: 25px;\n\tpadding-left: 10px;\n}\n\ndl dt {\n\tfont-weight: bold;\n\tmargin-top: 6px;\n}\n\ntextarea,\ninput[type=text],\ninput[type=search],\ninput[type=\"\"],\ninput:not([type]) {\n\tcolor: <<colour foreground>>;\n\tbackground: <<colour background>>;\n}\n\n.tc-muted {\n\tcolor: <<colour muted-foreground>>;\n}\n\nsvg.tc-image-button {\n\tpadding: 0px 1px 1px 0px;\n}\n\nkbd {\n\tdisplay: inline-block;\n\tpadding: 3px 5px;\n\tfont-size: 0.8em;\n\tline-height: 1.2;\n\tcolor: <<colour foreground>>;\n\tvertical-align: middle;\n\tbackground-color: <<colour background>>;\n\tborder: solid 1px <<colour muted-foreground>>;\n\tborder-bottom-color: <<colour muted-foreground>>;\n\tborder-radius: 3px;\n\tbox-shadow: inset 0 -1px 0 <<colour muted-foreground>>;\n}\n\n/*\nMarkdown likes putting code elements inside pre elements\n*/\npre > code {\n\tpadding: 0;\n\tborder: none;\n\tbackground-color: inherit;\n\tcolor: inherit;\n}\n\ntable {\n\tborder: 1px solid <<colour table-border>>;\n\twidth: auto;\n\tmax-width: 100%;\n\tcaption-side: bottom;\n\tmargin-top: 1em;\n\tmargin-bottom: 1em;\n}\n\ntable th, table td {\n\tpadding: 0 7px 0 7px;\n\tborder-top: 1px solid <<colour table-border>>;\n\tborder-left: 1px solid <<colour table-border>>;\n}\n\ntable thead tr td, table th {\n\tbackground-color: <<colour table-header-background>>;\n\tfont-weight: bold;\n}\n\ntable tfoot tr td {\n\tbackground-color: <<colour table-footer-background>>;\n}\n\n.tc-csv-table {\n\twhite-space: nowrap;\n}\n\n.tc-tiddler-frame img,\n.tc-tiddler-frame svg,\n.tc-tiddler-frame canvas,\n.tc-tiddler-frame embed,\n.tc-tiddler-frame iframe {\n\tmax-width: 100%;\n}\n\n.tc-tiddler-body > embed,\n.tc-tiddler-body > iframe {\n\twidth: 100%;\n\theight: 600px;\n}\n\n/*\n** Links\n*/\n\nbutton.tc-tiddlylink,\na.tc-tiddlylink {\n\ttext-decoration: none;\n\tfont-weight: normal;\n\tcolor: <<colour tiddler-link-foreground>>;\n\t-webkit-user-select: inherit; /* Otherwise the draggable attribute makes links impossible to select */\n}\n\n.tc-sidebar-lists a.tc-tiddlylink {\n\tcolor: <<colour sidebar-tiddler-link-foreground>>;\n}\n\n.tc-sidebar-lists a.tc-tiddlylink:hover {\n\tcolor: <<colour sidebar-tiddler-link-foreground-hover>>;\n}\n\nbutton.tc-tiddlylink:hover,\na.tc-tiddlylink:hover {\n\ttext-decoration: underline;\n}\n\na.tc-tiddlylink-resolves {\n}\n\na.tc-tiddlylink-shadow {\n\tfont-weight: bold;\n}\n\na.tc-tiddlylink-shadow.tc-tiddlylink-resolves {\n\tfont-weight: normal;\n}\n\na.tc-tiddlylink-missing {\n\tfont-style: italic;\n}\n\na.tc-tiddlylink-external {\n\ttext-decoration: underline;\n\tcolor: <<colour external-link-foreground>>;\n\tbackground-color: <<colour external-link-background>>;\n}\n\na.tc-tiddlylink-external:visited {\n\tcolor: <<colour external-link-foreground-visited>>;\n\tbackground-color: <<colour external-link-background-visited>>;\n}\n\na.tc-tiddlylink-external:hover {\n\tcolor: <<colour external-link-foreground-hover>>;\n\tbackground-color: <<colour external-link-background-hover>>;\n}\n\n/*\n** Drag and drop styles\n*/\n\n.tc-tiddler-dragger {\n\tposition: relative;\n\tz-index: -10000;\n}\n\n.tc-tiddler-dragger-inner {\n\tposition: absolute;\n\tdisplay: inline-block;\n\tpadding: 8px 20px;\n\tfont-size: 16.9px;\n\tfont-weight: bold;\n\tline-height: 20px;\n\tcolor: <<colour dragger-foreground>>;\n\ttext-shadow: 0 1px 0 rgba(0, 0, 0, 1);\n\twhite-space: nowrap;\n\tvertical-align: baseline;\n\tbackground-color: <<colour dragger-background>>;\n\tborder-radius: 20px;\n}\n\n.tc-tiddler-dragger-cover {\n\tposition: absolute;\n\tbackground-color: <<colour page-background>>;\n}\n\n.tc-dropzone {\n\tposition: relative;\n}\n\n.tc-dropzone.tc-dragover:before {\n\tz-index: 10000;\n\tdisplay: block;\n\tposition: fixed;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbackground: <<colour dropzone-background>>;\n\ttext-align: center;\n\tcontent: \"<<lingo DropMessage>>\";\n}\n\n/*\n** Plugin reload warning\n*/\n\n.tc-plugin-reload-warning {\n\tz-index: 1000;\n\tdisplay: block;\n\tposition: fixed;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbackground: <<colour alert-background>>;\n\ttext-align: center;\n}\n\n/*\n** Buttons\n*/\n\nbutton svg, button img, label svg, label img {\n\tvertical-align: middle;\n}\n\n.tc-btn-invisible {\n\tpadding: 0;\n\tmargin: 0;\n\tbackground: none;\n\tborder: none;\n}\n\n.tc-btn-boxed {\n\tfont-size: 0.6em;\n\tpadding: 0.2em;\n\tmargin: 1px;\n\tbackground: none;\n\tborder: 1px solid <<colour tiddler-controls-foreground>>;\n\tborder-radius: 0.25em;\n}\n\nhtml body.tc-body .tc-btn-boxed svg {\n\tfont-size: 1.6666em;\n}\n\n.tc-btn-boxed:hover {\n\tbackground: <<colour muted-foreground>>;\n\tcolor: <<colour background>>;\n}\n\nhtml body.tc-body .tc-btn-boxed:hover svg {\n\tfill: <<colour background>>;\n}\n\n.tc-btn-rounded {\n\tfont-size: 0.5em;\n\tline-height: 2;\n\tpadding: 0em 0.3em 0.2em 0.4em;\n\tmargin: 1px;\n\tborder: 1px solid <<colour muted-foreground>>;\n\tbackground: <<colour muted-foreground>>;\n\tcolor: <<colour background>>;\n\tborder-radius: 2em;\n}\n\nhtml body.tc-body .tc-btn-rounded svg {\n\tfont-size: 1.6666em;\n\tfill: <<colour background>>;\n}\n\n.tc-btn-rounded:hover {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tbackground: <<colour background>>;\n\tcolor: <<colour muted-foreground>>;\n}\n\nhtml body.tc-body .tc-btn-rounded:hover svg {\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-btn-icon svg {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-btn-text {\n\tpadding: 0;\n\tmargin: 0;\n}\n\n.tc-btn-big-green {\n\tdisplay: inline-block;\n\tpadding: 8px;\n\tmargin: 4px 8px 4px 8px;\n\tbackground: <<colour download-background>>;\n\tcolor: <<colour download-foreground>>;\n\tfill: <<colour download-foreground>>;\n\tborder: none;\n\tfont-size: 1.2em;\n\tline-height: 1.4em;\n\ttext-decoration: none;\n}\n\n.tc-btn-big-green svg,\n.tc-btn-big-green img {\n\theight: 2em;\n\twidth: 2em;\n\tvertical-align: middle;\n\tfill: <<colour download-foreground>>;\n}\n\n.tc-sidebar-lists input {\n\tcolor: <<colour foreground>>;\n}\n\n.tc-sidebar-lists button {\n\tcolor: <<colour sidebar-button-foreground>>;\n\tfill: <<colour sidebar-button-foreground>>;\n}\n\n.tc-sidebar-lists button.tc-btn-mini {\n\tcolor: <<colour sidebar-muted-foreground>>;\n}\n\n.tc-sidebar-lists button.tc-btn-mini:hover {\n\tcolor: <<colour sidebar-muted-foreground-hover>>;\n}\n\nbutton svg.tc-image-button, button .tc-image-button img {\n\theight: 1em;\n\twidth: 1em;\n}\n\n.tc-unfold-banner {\n\tposition: absolute;\n\tpadding: 0;\n\tmargin: 0;\n\tbackground: none;\n\tborder: none;\n\twidth: 100%;\n\twidth: calc(100% + 2px);\n\tmargin-left: -43px;\n\ttext-align: center;\n\tborder-top: 2px solid <<colour tiddler-info-background>>;\n\tmargin-top: 4px;\n}\n\n.tc-unfold-banner:hover {\n\tbackground: <<colour tiddler-info-background>>;\n\tborder-top: 2px solid <<colour tiddler-info-border>>;\n}\n\n.tc-unfold-banner svg, .tc-fold-banner svg {\n\theight: 0.75em;\n\tfill: <<colour tiddler-controls-foreground>>;\n}\n\n.tc-unfold-banner:hover svg, .tc-fold-banner:hover svg {\n\tfill: <<colour tiddler-controls-foreground-hover>>;\n}\n\n.tc-fold-banner {\n\tposition: absolute;\n\tpadding: 0;\n\tmargin: 0;\n\tbackground: none;\n\tborder: none;\n\twidth: 23px;\n\ttext-align: center;\n\tmargin-left: -35px;\n\ttop: 6px;\n\tbottom: 6px;\n}\n\n.tc-fold-banner:hover {\n\tbackground: <<colour tiddler-info-background>>;\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-unfold-banner {\n\t\tposition: static;\n\t\twidth: calc(100% + 59px);\n\t}\n\n\t.tc-fold-banner {\n\t\twidth: 16px;\n\t\tmargin-left: -16px;\n\t\tfont-size: 0.75em;\n\t}\n\n}\n\n/*\n** Tags and missing tiddlers\n*/\n\n.tc-tag-list-item {\n\tposition: relative;\n\tdisplay: inline-block;\n\tmargin-right: 7px;\n}\n\n.tc-tags-wrapper {\n\tmargin: 4px 0 14px 0;\n}\n\n.tc-missing-tiddler-label {\n\tfont-style: italic;\n\tfont-weight: normal;\n\tdisplay: inline-block;\n\tfont-size: 11.844px;\n\tline-height: 14px;\n\twhite-space: nowrap;\n\tvertical-align: baseline;\n}\n\nbutton.tc-tag-label, span.tc-tag-label {\n\tdisplay: inline-block;\n\tpadding: 0.16em 0.7em;\n\tfont-size: 0.9em;\n\tfont-weight: 300;\n\tline-height: 1.2em;\n\tcolor: <<colour tag-foreground>>;\n\twhite-space: nowrap;\n\tvertical-align: baseline;\n\tbackground-color: <<colour tag-background>>;\n\tborder-radius: 1em;\n}\n\n.tc-untagged-separator {\n\twidth: 10em;\n\tleft: 0;\n\tmargin-left: 0;\n\tborder: 0;\n\theight: 1px;\n\tbackground: <<colour tab-divider>>;\n}\n\nbutton.tc-untagged-label {\n\tbackground-color: <<colour untagged-background>>;\n}\n\n.tc-tag-label svg, .tc-tag-label img {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour tag-foreground>>;\n}\n\n.tc-tag-manager-table .tc-tag-label {\n\twhite-space: normal;\n}\n\n.tc-tag-manager-tag {\n\twidth: 100%;\n}\n\n/*\n** Page layout\n*/\n\n.tc-topbar {\n\tposition: fixed;\n\tz-index: 1200;\n}\n\n.tc-topbar-left {\n\tleft: 29px;\n\ttop: 5px;\n}\n\n.tc-topbar-right {\n\ttop: 5px;\n\tright: 29px;\n}\n\n.tc-topbar button {\n\tpadding: 8px;\n}\n\n.tc-topbar svg {\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-topbar button:hover svg {\n\tfill: <<colour foreground>>;\n}\n\n.tc-sidebar-header {\n\tcolor: <<colour sidebar-foreground>>;\n\tfill: <<colour sidebar-foreground>>;\n}\n\n.tc-sidebar-header .tc-title a.tc-tiddlylink-resolves {\n\tfont-weight: 300;\n}\n\n.tc-sidebar-header .tc-sidebar-lists p {\n\tmargin-top: 3px;\n\tmargin-bottom: 3px;\n}\n\n.tc-sidebar-header .tc-missing-tiddler-label {\n\tcolor: <<colour sidebar-foreground>>;\n}\n\n.tc-advanced-search input {\n\twidth: 60%;\n}\n\n.tc-search a svg {\n\twidth: 1.2em;\n\theight: 1.2em;\n\tvertical-align: middle;\n}\n\n.tc-page-controls {\n\tmargin-top: 14px;\n\tfont-size: 1.5em;\n}\n\n.tc-page-controls button {\n\tmargin-right: 0.5em;\n}\n\n.tc-page-controls a.tc-tiddlylink:hover {\n\ttext-decoration: none;\n}\n\n.tc-page-controls img {\n\twidth: 1em;\n}\n\n.tc-page-controls svg {\n\tfill: <<colour sidebar-controls-foreground>>;\n}\n\n.tc-page-controls button:hover svg, .tc-page-controls a:hover svg {\n\tfill: <<colour sidebar-controls-foreground-hover>>;\n}\n\n.tc-menu-list-item {\n\twhite-space: nowrap;\n}\n\n.tc-menu-list-count {\n\tfont-weight: bold;\n}\n\n.tc-menu-list-subitem {\n\tpadding-left: 7px;\n}\n\n.tc-story-river {\n\tposition: relative;\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-sidebar-header {\n\t\tpadding: 14px;\n\t\tmin-height: 32px;\n\t\tmargin-top: {{$:/themes/tiddlywiki/vanilla/metrics/storytop}};\n\t}\n\n\t.tc-story-river {\n\t\tposition: relative;\n\t\tpadding: 0;\n\t}\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-message-box {\n\t\tmargin: 21px -21px 21px -21px;\n\t}\n\n\t.tc-sidebar-scrollable {\n\t\tposition: fixed;\n\t\ttop: {{$:/themes/tiddlywiki/vanilla/metrics/storytop}};\n\t\tleft: {{$:/themes/tiddlywiki/vanilla/metrics/storyright}};\n\t\tbottom: 0;\n\t\tright: 0;\n\t\toverflow-y: auto;\n\t\toverflow-x: auto;\n\t\t-webkit-overflow-scrolling: touch;\n\t\tmargin: 0 0 0 -42px;\n\t\tpadding: 71px 0 28px 42px;\n\t}\n\n\t.tc-story-river {\n\t\tposition: relative;\n\t\tleft: {{$:/themes/tiddlywiki/vanilla/metrics/storyleft}};\n\t\ttop: {{$:/themes/tiddlywiki/vanilla/metrics/storytop}};\n\t\twidth: {{$:/themes/tiddlywiki/vanilla/metrics/storywidth}};\n\t\tpadding: 42px 42px 42px 42px;\n\t}\n\n<<if-no-sidebar \"\n\n\t.tc-story-river {\n\t\twidth: calc(100% - {{$:/themes/tiddlywiki/vanilla/metrics/storyleft}});\n\t}\n\n\">>\n\n}\n\n@media print {\n\n\tbody.tc-body {\n\t\tbackground-color: transparent;\n\t}\n\n\t.tc-sidebar-header, .tc-topbar {\n\t\tdisplay: none;\n\t}\n\n\t.tc-story-river {\n\t\tmargin: 0;\n\t\tpadding: 0;\n\t}\n\n\t.tc-story-river .tc-tiddler-frame {\n\t\tmargin: 0;\n\t\tborder: none;\n\t\tpadding: 0;\n\t}\n}\n\n/*\n** Tiddler styles\n*/\n\n.tc-tiddler-frame {\n\tposition: relative;\n\tmargin-bottom: 28px;\n\tbackground-color: <<colour tiddler-background>>;\n\tborder: 1px solid <<colour tiddler-border>>;\n}\n\n{{$:/themes/tiddlywiki/vanilla/sticky}}\n\n.tc-tiddler-info {\n\tpadding: 14px 42px 14px 42px;\n\tbackground-color: <<colour tiddler-info-background>>;\n\tborder-top: 1px solid <<colour tiddler-info-border>>;\n\tborder-bottom: 1px solid <<colour tiddler-info-border>>;\n}\n\n.tc-tiddler-info p {\n\tmargin-top: 3px;\n\tmargin-bottom: 3px;\n}\n\n.tc-tiddler-info .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour tiddler-info-tab-background>>;\n\tborder-bottom: 1px solid <<colour tiddler-info-tab-background>>;\n}\n\n.tc-view-field-table {\n\twidth: 100%;\n}\n\n.tc-view-field-name {\n\twidth: 1%; /* Makes this column be as narrow as possible */\n\ttext-align: right;\n\tfont-style: italic;\n\tfont-weight: 200;\n}\n\n.tc-view-field-value {\n}\n\n@media (max-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\t.tc-tiddler-frame {\n\t\tpadding: 14px 14px 14px 14px;\n\t}\n\n\t.tc-tiddler-info {\n\t\tmargin: 0 -14px 0 -14px;\n\t}\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\t.tc-tiddler-frame {\n\t\tpadding: 28px 42px 42px 42px;\n\t\twidth: {{$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth}};\n\t\tborder-radius: 2px;\n\t}\n\n<<if-no-sidebar \"\n\n\t.tc-tiddler-frame {\n\t\twidth: 100%;\n\t}\n\n\">>\n\n\t.tc-tiddler-info {\n\t\tmargin: 0 -42px 0 -42px;\n\t}\n}\n\n.tc-site-title,\n.tc-titlebar {\n\tfont-weight: 300;\n\tfont-size: 2.35em;\n\tline-height: 1.2em;\n\tcolor: <<colour tiddler-title-foreground>>;\n\tmargin: 0;\n}\n\n.tc-site-title {\n\tcolor: <<colour site-title-foreground>>;\n}\n\n.tc-tiddler-title-icon {\n\tvertical-align: middle;\n}\n\n.tc-system-title-prefix {\n\tcolor: <<colour muted-foreground>>;\n}\n\n.tc-titlebar h2 {\n\tfont-size: 1em;\n\tdisplay: inline;\n}\n\n.tc-titlebar img {\n\theight: 1em;\n}\n\n.tc-subtitle {\n\tfont-size: 0.9em;\n\tcolor: <<colour tiddler-subtitle-foreground>>;\n\tfont-weight: 300;\n}\n\n.tc-tiddler-missing .tc-title {\n  font-style: italic;\n  font-weight: normal;\n}\n\n.tc-tiddler-frame .tc-tiddler-controls {\n\tfloat: right;\n}\n\n.tc-tiddler-controls .tc-drop-down {\n\tfont-size: 0.6em;\n}\n\n.tc-tiddler-controls .tc-drop-down .tc-drop-down {\n\tfont-size: 1em;\n}\n\n.tc-tiddler-controls > span > button {\n\tvertical-align: baseline;\n\tmargin-left:5px;\n}\n\n.tc-tiddler-controls button svg, .tc-tiddler-controls button img,\n.tc-search button svg, .tc-search a svg {\n\theight: 0.75em;\n\tfill: <<colour tiddler-controls-foreground>>;\n}\n\n.tc-tiddler-controls button.tc-selected svg,\n.tc-page-controls button.tc-selected svg  {\n\tfill: <<colour tiddler-controls-foreground-selected>>;\n}\n\n.tc-tiddler-controls button.tc-btn-invisible:hover svg,\n.tc-search button:hover svg, .tc-search a:hover svg {\n\tfill: <<colour tiddler-controls-foreground-hover>>;\n}\n\n@media print {\n\t.tc-tiddler-controls {\n\t\tdisplay: none;\n\t}\n}\n\n.tc-tiddler-help { /* Help prompts within tiddler template */\n\tcolor: <<colour muted-foreground>>;\n\tmargin-top: 14px;\n}\n\n.tc-tiddler-help a.tc-tiddlylink {\n\tcolor: <<colour very-muted-foreground>>;\n}\n\n.tc-tiddler-frame .tc-edit-texteditor {\n\twidth: 100%;\n\tmargin: 4px 0 4px 0;\n}\n\n.tc-tiddler-frame input.tc-edit-texteditor,\n.tc-tiddler-frame textarea.tc-edit-texteditor,\n.tc-tiddler-frame iframe.tc-edit-texteditor {\n\tpadding: 3px 3px 3px 3px;\n\tborder: 1px solid <<colour tiddler-editor-border>>;\n\tline-height: 1.3em;\n\t-webkit-appearance: none;\n}\n\n.tc-tiddler-frame .tc-binary-warning {\n\twidth: 100%;\n\theight: 5em;\n\ttext-align: center;\n\tpadding: 3em 3em 6em 3em;\n\tbackground: <<colour alert-background>>;\n\tborder: 1px solid <<colour alert-border>>;\n}\n\n.tc-tiddler-frame input.tc-edit-texteditor {\n\tbackground-color: <<colour tiddler-editor-background>>;\n}\n\ncanvas.tc-edit-bitmapeditor  {\n\tborder: 6px solid <<colour tiddler-editor-border-image>>;\n\tcursor: crosshair;\n\t-moz-user-select: none;\n\t-webkit-user-select: none;\n\t-ms-user-select: none;\n\tmargin-top: 6px;\n\tmargin-bottom: 6px;\n}\n\n.tc-edit-bitmapeditor-width {\n\tdisplay: block;\n}\n\n.tc-edit-bitmapeditor-height {\n\tdisplay: block;\n}\n\n.tc-tiddler-body {\n\tclear: both;\n}\n\n.tc-tiddler-frame .tc-tiddler-body {\n\tfont-size: {{$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize}};\n\tline-height: {{$:/themes/tiddlywiki/vanilla/metrics/bodylineheight}};\n}\n\n.tc-titlebar, .tc-tiddler-edit-title {\n\toverflow: hidden; /* https://github.com/Jermolene/TiddlyWiki5/issues/282 */\n}\n\nhtml body.tc-body.tc-single-tiddler-window {\n\tmargin: 1em;\n\tbackground: <<colour tiddler-background>>;\n}\n\n.tc-single-tiddler-window img,\n.tc-single-tiddler-window svg,\n.tc-single-tiddler-window canvas,\n.tc-single-tiddler-window embed,\n.tc-single-tiddler-window iframe {\n\tmax-width: 100%;\n}\n\n/*\n** Editor\n*/\n\n.tc-editor-toolbar {\n\tmargin-top: 8px;\n}\n\n.tc-editor-toolbar button {\n\tvertical-align: middle;\n\tbackground-color: <<colour tiddler-controls-foreground>>;\n\tfill: <<colour tiddler-controls-foreground-selected>>;\n\tborder-radius: 4px;\n\tpadding: 3px;\n\tmargin: 2px 0 2px 4px;\n}\n\n.tc-editor-toolbar button.tc-text-editor-toolbar-item-adjunct {\n\tmargin-left: 1px;\n\twidth: 1em;\n\tborder-radius: 8px;\n}\n\n.tc-editor-toolbar button.tc-text-editor-toolbar-item-start-group {\n\tmargin-left: 11px;\n}\n\n.tc-editor-toolbar button.tc-selected {\n\tbackground-color: <<colour primary>>;\n}\n\n.tc-editor-toolbar button svg {\n\twidth: 1.6em;\n\theight: 1.2em;\n}\n\n.tc-editor-toolbar button:hover {\n\tbackground-color: <<colour tiddler-controls-foreground-selected>>;\n\tfill: <<colour background>>;\n}\n\n.tc-editor-toolbar .tc-text-editor-toolbar-more {\n\twhite-space: normal;\n}\n\n.tc-editor-toolbar .tc-text-editor-toolbar-more button {\n\tdisplay: inline-block;\n\tpadding: 3px;\n\twidth: auto;\n}\n\n.tc-editor-toolbar .tc-search-results {\n\tpadding: 0;\n}\n\n/*\n** Adjustments for fluid-fixed mode\n*/\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n<<if-fluid-fixed text:\"\"\"\n\n\t.tc-story-river {\n\t\tpadding-right: 0;\n\t\tposition: relative;\n\t\twidth: auto;\n\t\tleft: 0;\n\t\tmargin-left: {{$:/themes/tiddlywiki/vanilla/metrics/storyleft}};\n\t\tmargin-right: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth}};\n\t}\n\n\t.tc-tiddler-frame {\n\t\twidth: 100%;\n\t}\n\n\t.tc-sidebar-scrollable {\n\t\tleft: auto;\n\t\tbottom: 0;\n\t\tright: 0;\n\t\twidth: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth}};\n\t}\n\n\tbody.tc-body .tc-storyview-zoomin-tiddler {\n\t\twidth: 100%;\n\t\twidth: calc(100% - 42px);\n\t}\n\n\"\"\" hiddenSidebarText:\"\"\"\n\n\t.tc-story-river {\n\t\tpadding-right: 3em;\n\t\tmargin-right: 0;\n\t}\n\n\tbody.tc-body .tc-storyview-zoomin-tiddler {\n\t\twidth: 100%;\n\t\twidth: calc(100% - 84px);\n\t}\n\n\"\"\">>\n\n}\n\n/*\n** Toolbar buttons\n*/\n\n.tc-page-controls svg.tc-image-new-button {\n  fill: <<colour toolbar-new-button>>;\n}\n\n.tc-page-controls svg.tc-image-options-button {\n  fill: <<colour toolbar-options-button>>;\n}\n\n.tc-page-controls svg.tc-image-save-button {\n  fill: <<colour toolbar-save-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-info-button {\n  fill: <<colour toolbar-info-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-edit-button {\n  fill: <<colour toolbar-edit-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-close-button {\n  fill: <<colour toolbar-close-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-delete-button {\n  fill: <<colour toolbar-delete-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-cancel-button {\n  fill: <<colour toolbar-cancel-button>>;\n}\n\n.tc-tiddler-controls button svg.tc-image-done-button {\n  fill: <<colour toolbar-done-button>>;\n}\n\n/*\n** Tiddler edit mode\n*/\n\n.tc-tiddler-edit-frame em.tc-edit {\n\tcolor: <<colour muted-foreground>>;\n\tfont-style: normal;\n}\n\n.tc-edit-type-dropdown a.tc-tiddlylink-missing {\n\tfont-style: normal;\n}\n\n.tc-edit-tags {\n\tborder: 1px solid <<colour tiddler-editor-border>>;\n\tpadding: 4px 8px 4px 8px;\n}\n\n.tc-edit-add-tag {\n\tdisplay: inline-block;\n}\n\n.tc-edit-add-tag .tc-add-tag-name input {\n\twidth: 50%;\n}\n\n.tc-edit-tags .tc-tag-label {\n\tdisplay: inline-block;\n}\n\n.tc-edit-tags-list {\n\tmargin: 14px 0 14px 0;\n}\n\n.tc-remove-tag-button {\n\tpadding-left: 4px;\n}\n\n.tc-tiddler-preview {\n\toverflow: auto;\n}\n\n.tc-tiddler-preview-preview {\n\tfloat: right;\n\twidth: 49%;\n\tborder: 1px solid <<colour tiddler-editor-border>>;\n\tmargin: 4px 3px 3px 3px;\n\tpadding: 3px 3px 3px 3px;\n}\n\n.tc-tiddler-frame .tc-tiddler-preview .tc-edit-texteditor {\n\twidth: 49%;\n}\n\n.tc-tiddler-frame .tc-tiddler-preview canvas.tc-edit-bitmapeditor {\n\tmax-width: 49%;\n}\n\n.tc-edit-fields {\n\twidth: 100%;\n}\n\n\n.tc-edit-fields table, .tc-edit-fields tr, .tc-edit-fields td {\n\tborder: none;\n\tpadding: 4px;\n}\n\n.tc-edit-fields > tbody > .tc-edit-field:nth-child(odd) {\n\tbackground-color: <<colour tiddler-editor-fields-odd>>;\n}\n\n.tc-edit-fields > tbody > .tc-edit-field:nth-child(even) {\n\tbackground-color: <<colour tiddler-editor-fields-even>>;\n}\n\n.tc-edit-field-name {\n\ttext-align: right;\n}\n\n.tc-edit-field-value input {\n\twidth: 100%;\n}\n\n.tc-edit-field-remove {\n}\n\n.tc-edit-field-remove svg {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour muted-foreground>>;\n\tvertical-align: middle;\n}\n\n.tc-edit-field-add-name {\n\tdisplay: inline-block;\n\twidth: 15%;\n}\n\n.tc-edit-field-add-value {\n\tdisplay: inline-block;\n\twidth: 40%;\n}\n\n.tc-edit-field-add-button {\n\tdisplay: inline-block;\n\twidth: 10%;\n}\n\n/*\n** Storyview Classes\n*/\n\n.tc-storyview-zoomin-tiddler {\n\tposition: absolute;\n\tdisplay: block;\n\twidth: 100%;\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-storyview-zoomin-tiddler {\n\t\twidth: calc(100% - 84px);\n\t}\n\n}\n\n/*\n** Dropdowns\n*/\n\n.tc-btn-dropdown {\n\ttext-align: left;\n}\n\n.tc-btn-dropdown svg, .tc-btn-dropdown img {\n\theight: 1em;\n\twidth: 1em;\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-drop-down-wrapper {\n\tposition: relative;\n}\n\n.tc-drop-down {\n\tmin-width: 380px;\n\tborder: 1px solid <<colour dropdown-border>>;\n\tbackground-color: <<colour dropdown-background>>;\n\tpadding: 7px 0 7px 0;\n\tmargin: 4px 0 0 0;\n\twhite-space: nowrap;\n\ttext-shadow: none;\n\tline-height: 1.4;\n}\n\n.tc-drop-down .tc-drop-down {\n\tmargin-left: 14px;\n}\n\n.tc-drop-down button svg, .tc-drop-down a svg  {\n\tfill: <<colour foreground>>;\n}\n\n.tc-drop-down button.tc-btn-invisible:hover svg {\n\tfill: <<colour foreground>>;\n}\n\n.tc-drop-down p {\n\tpadding: 0 14px 0 14px;\n}\n\n.tc-drop-down svg {\n\twidth: 1em;\n\theight: 1em;\n}\n\n.tc-drop-down img {\n\twidth: 1em;\n}\n\n.tc-drop-down-language-chooser img {\n\twidth: 2em;\n\tvertical-align: baseline;\n}\n\n.tc-drop-down a, .tc-drop-down button {\n\tdisplay: block;\n\tpadding: 0 14px 0 14px;\n\twidth: 100%;\n\ttext-align: left;\n\tcolor: <<colour foreground>>;\n\tline-height: 1.4;\n}\n\n.tc-drop-down .tc-tab-set .tc-tab-buttons button {\n\tdisplay: inline-block;\n    width: auto;\n    margin-bottom: 0px;\n    border-bottom-left-radius: 0;\n    border-bottom-right-radius: 0;\n}\n\n.tc-drop-down .tc-prompt {\n\tpadding: 0 14px;\n}\n\n.tc-drop-down .tc-chooser {\n\tborder: none;\n}\n\n.tc-drop-down .tc-chooser .tc-swatches-horiz {\n\tfont-size: 0.4em;\n\tpadding-left: 1.2em;\n}\n\n.tc-drop-down .tc-file-input-wrapper {\n\twidth: 100%;\n}\n\n.tc-drop-down .tc-file-input-wrapper button {\n\tcolor: <<colour foreground>>;\n}\n\n.tc-drop-down a:hover, .tc-drop-down button:hover, .tc-drop-down .tc-file-input-wrapper:hover button {\n\tcolor: <<colour tiddler-link-background>>;\n\tbackground-color: <<colour tiddler-link-foreground>>;\n\ttext-decoration: none;\n}\n\n.tc-drop-down .tc-tab-buttons button {\n\tbackground-color: <<colour dropdown-tab-background>>;\n}\n\n.tc-drop-down .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour dropdown-tab-background-selected>>;\n\tborder-bottom: 1px solid <<colour dropdown-tab-background-selected>>;\n}\n\n.tc-drop-down-bullet {\n\tdisplay: inline-block;\n\twidth: 0.5em;\n}\n\n.tc-drop-down .tc-tab-contents a {\n\tpadding: 0 0.5em 0 0.5em;\n}\n\n.tc-block-dropdown-wrapper {\n\tposition: relative;\n}\n\n.tc-block-dropdown {\n\tposition: absolute;\n\tmin-width: 220px;\n\tborder: 1px solid <<colour dropdown-border>>;\n\tbackground-color: <<colour dropdown-background>>;\n\tpadding: 7px 0;\n\tmargin: 4px 0 0 0;\n\twhite-space: nowrap;\n\tz-index: 1000;\n\ttext-shadow: none;\n}\n\n.tc-block-dropdown.tc-search-drop-down {\n\tmargin-left: -12px;\n}\n\n.tc-block-dropdown a {\n\tdisplay: block;\n\tpadding: 4px 14px 4px 14px;\n}\n\n.tc-block-dropdown.tc-search-drop-down a {\n\tdisplay: block;\n\tpadding: 0px 10px 0px 10px;\n}\n\n.tc-drop-down .tc-dropdown-item-plain,\n.tc-block-dropdown .tc-dropdown-item-plain {\n\tpadding: 4px 14px 4px 7px;\n}\n\n.tc-drop-down .tc-dropdown-item,\n.tc-block-dropdown .tc-dropdown-item {\n\tpadding: 4px 14px 4px 7px;\n\tcolor: <<colour muted-foreground>>;\n}\n\n.tc-block-dropdown a:hover {\n\tcolor: <<colour tiddler-link-background>>;\n\tbackground-color: <<colour tiddler-link-foreground>>;\n\ttext-decoration: none;\n}\n\n.tc-search-results {\n\tpadding: 0 7px 0 7px;\n}\n\n.tc-image-chooser, .tc-colour-chooser {\n\twhite-space: normal;\n}\n\n.tc-image-chooser a,\n.tc-colour-chooser a {\n\tdisplay: inline-block;\n\tvertical-align: top;\n\ttext-align: center;\n\tposition: relative;\n}\n\n.tc-image-chooser a {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tpadding: 2px;\n\tmargin: 2px;\n\twidth: 4em;\n\theight: 4em;\n}\n\n.tc-colour-chooser a {\n\tpadding: 3px;\n\twidth: 2em;\n\theight: 2em;\n\tvertical-align: middle;\n}\n\n.tc-image-chooser a:hover,\n.tc-colour-chooser a:hover {\n\tbackground: <<colour primary>>;\n\tpadding: 0px;\n\tborder: 3px solid <<colour primary>>;\n}\n\n.tc-image-chooser a svg,\n.tc-image-chooser a img {\n\tdisplay: inline-block;\n\twidth: auto;\n\theight: auto;\n\tmax-width: 3.5em;\n\tmax-height: 3.5em;\n\tposition: absolute;\n\ttop: 0;\n\tbottom: 0;\n\tleft: 0;\n\tright: 0;\n\tmargin: auto;\n}\n\n/*\n** Modals\n*/\n\n.tc-modal-wrapper {\n\tposition: fixed;\n\toverflow: auto;\n\toverflow-y: scroll;\n\ttop: 0;\n\tright: 0;\n\tbottom: 0;\n\tleft: 0;\n\tz-index: 900;\n}\n\n.tc-modal-backdrop {\n\tposition: fixed;\n\ttop: 0;\n\tright: 0;\n\tbottom: 0;\n\tleft: 0;\n\tz-index: 1000;\n\tbackground-color: <<colour modal-backdrop>>;\n}\n\n.tc-modal {\n\tz-index: 1100;\n\tbackground-color: <<colour modal-background>>;\n\tborder: 1px solid <<colour modal-border>>;\n}\n\n@media (max-width: 55em) {\n\t.tc-modal {\n\t\tposition: fixed;\n\t\ttop: 1em;\n\t\tleft: 1em;\n\t\tright: 1em;\n\t}\n\n\t.tc-modal-body {\n\t\toverflow-y: auto;\n\t\tmax-height: 400px;\n\t\tmax-height: 60vh;\n\t}\n}\n\n@media (min-width: 55em) {\n\t.tc-modal {\n\t\tposition: fixed;\n\t\ttop: 2em;\n\t\tleft: 25%;\n\t\twidth: 50%;\n\t}\n\n\t.tc-modal-body {\n\t\toverflow-y: auto;\n\t\tmax-height: 400px;\n\t\tmax-height: 60vh;\n\t}\n}\n\n.tc-modal-header {\n\tpadding: 9px 15px;\n\tborder-bottom: 1px solid <<colour modal-header-border>>;\n}\n\n.tc-modal-header h3 {\n\tmargin: 0;\n\tline-height: 30px;\n}\n\n.tc-modal-header img, .tc-modal-header svg {\n\twidth: 1em;\n\theight: 1em;\n}\n\n.tc-modal-body {\n\tpadding: 15px;\n}\n\n.tc-modal-footer {\n\tpadding: 14px 15px 15px;\n\tmargin-bottom: 0;\n\ttext-align: right;\n\tbackground-color: <<colour modal-footer-background>>;\n\tborder-top: 1px solid <<colour modal-footer-border>>;\n}\n\n/*\n** Notifications\n*/\n\n.tc-notification {\n\tposition: fixed;\n\ttop: 14px;\n\tright: 42px;\n\tz-index: 1300;\n\tmax-width: 280px;\n\tpadding: 0 14px 0 14px;\n\tbackground-color: <<colour notification-background>>;\n\tborder: 1px solid <<colour notification-border>>;\n}\n\n/*\n** Tabs\n*/\n\n.tc-tab-set.tc-vertical {\n\tdisplay: -webkit-flex;\n\tdisplay: flex;\n}\n\n.tc-tab-buttons {\n\tfont-size: 0.85em;\n\tpadding-top: 1em;\n\tmargin-bottom: -2px;\n}\n\n.tc-tab-buttons.tc-vertical  {\n\tz-index: 100;\n\tdisplay: block;\n\tpadding-top: 14px;\n\tvertical-align: top;\n\ttext-align: right;\n\tmargin-bottom: inherit;\n\tmargin-right: -1px;\n\tmax-width: 33%;\n\t-webkit-flex: 0 0 auto;\n\tflex: 0 0 auto;\n}\n\n.tc-tab-buttons button.tc-tab-selected {\n\tcolor: <<colour tab-foreground-selected>>;\n\tbackground-color: <<colour tab-background-selected>>;\n\tborder-left: 1px solid <<colour tab-border-selected>>;\n\tborder-top: 1px solid <<colour tab-border-selected>>;\n\tborder-right: 1px solid <<colour tab-border-selected>>;\n}\n\n.tc-tab-buttons button {\n\tcolor: <<colour tab-foreground>>;\n\tpadding: 3px 5px 3px 5px;\n\tmargin-right: 0.3em;\n\tfont-weight: 300;\n\tborder: none;\n\tbackground: inherit;\n\tbackground-color: <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-right: 1px solid <<colour tab-border>>;\n\tborder-top-left-radius: 2px;\n\tborder-top-right-radius: 2px;\n}\n\n.tc-tab-buttons.tc-vertical button {\n\tdisplay: block;\n\twidth: 100%;\n\tmargin-top: 3px;\n\tmargin-right: 0;\n\ttext-align: right;\n\tbackground-color: <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n\tborder-right: none;\n\tborder-top-left-radius: 2px;\n\tborder-bottom-left-radius: 2px;\n}\n\n.tc-tab-buttons.tc-vertical button.tc-tab-selected {\n\tbackground-color: <<colour tab-background-selected>>;\n\tborder-right: 1px solid <<colour tab-background-selected>>;\n}\n\n.tc-tab-divider {\n\tborder-top: 1px solid <<colour tab-divider>>;\n}\n\n.tc-tab-divider.tc-vertical  {\n\tdisplay: none;\n}\n\n.tc-tab-content {\n\tmargin-top: 14px;\n}\n\n.tc-tab-content.tc-vertical  {\n\tdisplay: inline-block;\n\tvertical-align: top;\n\tpadding-top: 0;\n\tpadding-left: 14px;\n\tborder-left: 1px solid <<colour tab-border>>;\n\t-webkit-flex: 1 0 70%;\n\tflex: 1 0 70%;\n}\n\n.tc-sidebar-lists .tc-tab-buttons {\n\tmargin-bottom: -1px;\n}\n\n.tc-sidebar-lists .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour sidebar-tab-background-selected>>;\n\tcolor: <<colour sidebar-tab-foreground-selected>>;\n\tborder-left: 1px solid <<colour sidebar-tab-border-selected>>;\n\tborder-top: 1px solid <<colour sidebar-tab-border-selected>>;\n\tborder-right: 1px solid <<colour sidebar-tab-border-selected>>;\n}\n\n.tc-sidebar-lists .tc-tab-buttons button {\n\tbackground-color: <<colour sidebar-tab-background>>;\n\tcolor: <<colour sidebar-tab-foreground>>;\n\tborder-left: 1px solid <<colour sidebar-tab-border>>;\n\tborder-top: 1px solid <<colour sidebar-tab-border>>;\n\tborder-right: 1px solid <<colour sidebar-tab-border>>;\n}\n\n.tc-sidebar-lists .tc-tab-divider {\n\tborder-top: 1px solid <<colour sidebar-tab-divider>>;\n}\n\n.tc-more-sidebar .tc-tab-buttons button {\n\tdisplay: block;\n\twidth: 100%;\n\tbackground-color: <<colour sidebar-tab-background>>;\n\tborder-top: none;\n\tborder-left: none;\n\tborder-bottom: none;\n\tborder-right: 1px solid #ccc;\n\tmargin-bottom: inherit;\n}\n\n.tc-more-sidebar .tc-tab-buttons button.tc-tab-selected {\n\tbackground-color: <<colour sidebar-tab-background-selected>>;\n\tborder: none;\n}\n\n/*\n** Alerts\n*/\n\n.tc-alerts {\n\tposition: fixed;\n\ttop: 0;\n\tleft: 0;\n\tmax-width: 500px;\n\tz-index: 20000;\n}\n\n.tc-alert {\n\tposition: relative;\n\tmargin: 28px;\n\tpadding: 14px 14px 14px 14px;\n\tborder: 2px solid <<colour alert-border>>;\n\tbackground-color: <<colour alert-background>>;\n}\n\n.tc-alert-toolbar {\n\tposition: absolute;\n\ttop: 14px;\n\tright: 14px;\n}\n\n.tc-alert-toolbar svg {\n\tfill: <<colour alert-muted-foreground>>;\n}\n\n.tc-alert-subtitle {\n\tcolor: <<colour alert-muted-foreground>>;\n\tfont-weight: bold;\n}\n\n.tc-alert-highlight {\n\tcolor: <<colour alert-highlight>>;\n}\n\n@media (min-width: {{$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint}}) {\n\n\t.tc-static-alert {\n\t\tposition: relative;\n\t}\n\n\t.tc-static-alert-inner {\n\t\tposition: absolute;\n\t\tz-index: 100;\n\t}\n\n}\n\n.tc-static-alert-inner {\n\tpadding: 0 2px 2px 42px;\n\tcolor: <<colour static-alert-foreground>>;\n}\n\n/*\n** Control panel\n*/\n\n.tc-control-panel td {\n\tpadding: 4px;\n}\n\n.tc-control-panel table, .tc-control-panel table input, .tc-control-panel table textarea {\n\twidth: 100%;\n}\n\n.tc-plugin-info {\n\tdisplay: block;\n\tborder: 1px solid <<colour muted-foreground>>;\n\tbackground-colour: <<colour background>>;\n\tmargin: 0.5em 0 0.5em 0;\n\tpadding: 4px;\n}\n\n.tc-plugin-info-disabled {\n\tbackground: -webkit-repeating-linear-gradient(45deg, #ff0, #ff0 10px, #eee 10px, #eee 20px);\n\tbackground: repeating-linear-gradient(45deg, #ff0, #ff0 10px, #eee 10px, #eee 20px);\n}\n\n.tc-plugin-info-disabled:hover {\n\tbackground: -webkit-repeating-linear-gradient(45deg, #aa0, #aa0 10px, #888 10px, #888 20px);\n\tbackground: repeating-linear-gradient(45deg, #aa0, #aa0 10px, #888 10px, #888 20px);\n}\n\na.tc-tiddlylink.tc-plugin-info:hover {\n\ttext-decoration: none;\n\tbackground-color: <<colour primary>>;\n\tcolor: <<colour background>>;\n\tfill: <<colour foreground>>;\n}\n\na.tc-tiddlylink.tc-plugin-info:hover .tc-plugin-info > .tc-plugin-info-chunk > svg {\n\tfill: <<colour foreground>>;\n}\n\n.tc-plugin-info-chunk {\n\tdisplay: inline-block;\n\tvertical-align: middle;\n}\n\n.tc-plugin-info-chunk h1 {\n\tfont-size: 1em;\n\tmargin: 2px 0 2px 0;\n}\n\n.tc-plugin-info-chunk h2 {\n\tfont-size: 0.8em;\n\tmargin: 2px 0 2px 0;\n}\n\n.tc-plugin-info-chunk div {\n\tfont-size: 0.7em;\n\tmargin: 2px 0 2px 0;\n}\n\n.tc-plugin-info:hover > .tc-plugin-info-chunk > img, .tc-plugin-info:hover > .tc-plugin-info-chunk > svg {\n\twidth: 2em;\n\theight: 2em;\n\tfill: <<colour foreground>>;\n}\n\n.tc-plugin-info > .tc-plugin-info-chunk > img, .tc-plugin-info > .tc-plugin-info-chunk > svg {\n\twidth: 2em;\n\theight: 2em;\n\tfill: <<colour muted-foreground>>;\n}\n\n.tc-plugin-info.tc-small-icon > .tc-plugin-info-chunk > img, .tc-plugin-info.tc-small-icon > .tc-plugin-info-chunk > svg {\n\twidth: 1em;\n\theight: 1em;\n}\n\n.tc-plugin-info-dropdown {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tmargin-top: -8px;\n}\n\n.tc-plugin-info-dropdown-message {\n\tbackground: <<colour message-background>>;\n\tpadding: 0.5em 1em 0.5em 1em;\n\tfont-weight: bold;\n\tfont-size: 0.8em;\n}\n\n.tc-plugin-info-dropdown-body {\n\tpadding: 1em 1em 1em 1em;\n}\n\n/*\n** Message boxes\n*/\n\n.tc-message-box {\n\tborder: 1px solid <<colour message-border>>;\n\tbackground: <<colour message-background>>;\n\tpadding: 0px 21px 0px 21px;\n\tfont-size: 12px;\n\tline-height: 18px;\n\tcolor: <<colour message-foreground>>;\n}\n\n/*\n** Pictures\n*/\n\n.tc-bordered-image {\n\tborder: 1px solid <<colour muted-foreground>>;\n\tpadding: 5px;\n\tmargin: 5px;\n}\n\n/*\n** Floats\n*/\n\n.tc-float-right {\n\tfloat: right;\n}\n\n/*\n** Chooser\n*/\n\n.tc-chooser {\n\tborder: 1px solid <<colour table-border>>;\n}\n\n.tc-chooser-item {\n\tborder: 8px;\n\tpadding: 2px 4px;\n}\n\n.tc-chooser-item a.tc-tiddlylink {\n\tdisplay: block;\n\ttext-decoration: none;\n\tcolor: <<colour tiddler-link-foreground>>;\n\tbackground-color: <<colour tiddler-link-background>>;\n}\n\n.tc-chooser-item a.tc-tiddlylink:hover {\n\ttext-decoration: none;\n\tcolor: <<colour tiddler-link-background>>;\n\tbackground-color: <<colour tiddler-link-foreground>>;\n}\n\n/*\n** Palette swatches\n*/\n\n.tc-swatches-horiz {\n}\n\n.tc-swatches-horiz .tc-swatch {\n\tdisplay: inline-block;\n}\n\n.tc-swatch {\n\twidth: 2em;\n\theight: 2em;\n\tmargin: 0.4em;\n\tborder: 1px solid #888;\n}\n\n/*\n** Table of contents\n*/\n\n.tc-sidebar-lists .tc-table-of-contents {\n\twhite-space: nowrap;\n}\n\n.tc-table-of-contents button {\n\tcolor: <<colour sidebar-foreground>>;\n}\n\n.tc-table-of-contents svg {\n\twidth: 0.7em;\n\theight: 0.7em;\n\tvertical-align: middle;\n\tfill: <<colour sidebar-foreground>>;\n}\n\n.tc-table-of-contents ol {\n\tlist-style-type: none;\n\tpadding-left: 0;\n}\n\n.tc-table-of-contents ol ol {\n\tpadding-left: 1em;\n}\n\n.tc-table-of-contents li {\n\tfont-size: 1.0em;\n\tfont-weight: bold;\n}\n\n.tc-table-of-contents li a {\n\tfont-weight: bold;\n}\n\n.tc-table-of-contents li li {\n\tfont-size: 0.95em;\n\tfont-weight: normal;\n\tline-height: 1.4;\n}\n\n.tc-table-of-contents li li a {\n\tfont-weight: normal;\n}\n\n.tc-table-of-contents li li li {\n\tfont-size: 0.95em;\n\tfont-weight: 200;\n\tline-height: 1.5;\n}\n\n.tc-table-of-contents li li li a {\n\tfont-weight: bold;\n}\n\n.tc-table-of-contents li li li li {\n\tfont-size: 0.95em;\n\tfont-weight: 200;\n}\n\n.tc-tabbed-table-of-contents {\n\tdisplay: -webkit-flex;\n\tdisplay: flex;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents {\n\tz-index: 100;\n\tdisplay: inline-block;\n\tpadding-left: 1em;\n\tmax-width: 50%;\n\t-webkit-flex: 0 0 auto;\n\tflex: 0 0 auto;\n\tbackground: <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item > a,\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item-selected > a {\n\tdisplay: block;\n\tpadding: 0.12em 1em 0.12em 0.25em;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item > a {\n\tborder-top: 1px solid <<colour tab-background>>;\n\tborder-left: 1px solid <<colour tab-background>>;\n\tborder-bottom: 1px solid <<colour tab-background>>;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item > a:hover {\n\ttext-decoration: none;\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n\tbackground: <<colour tab-border>>;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item-selected > a {\n\tborder-top: 1px solid <<colour tab-border>>;\n\tborder-left: 1px solid <<colour tab-border>>;\n\tborder-bottom: 1px solid <<colour tab-border>>;\n\tbackground: <<colour background>>;\n\tmargin-right: -1px;\n}\n\n.tc-tabbed-table-of-contents .tc-table-of-contents .toc-item-selected > a:hover {\n\ttext-decoration: none;\n}\n\n.tc-tabbed-table-of-contents .tc-tabbed-table-of-contents-content {\n\tdisplay: inline-block;\n\tvertical-align: top;\n\tpadding-left: 1.5em;\n\tpadding-right: 1.5em;\n\tborder: 1px solid <<colour tab-border>>;\n\t-webkit-flex: 1 0 50%;\n\tflex: 1 0 50%;\n}\n\n/*\n** Dirty indicator\n*/\n\nbody.tc-dirty span.tc-dirty-indicator, body.tc-dirty span.tc-dirty-indicator svg {\n\tfill: <<colour dirty-indicator>>;\n\tcolor: <<colour dirty-indicator>>;\n}\n\n/*\n** File inputs\n*/\n\n.tc-file-input-wrapper {\n\tposition: relative;\n\toverflow: hidden;\n\tdisplay: inline-block;\n\tvertical-align: middle;\n}\n\n.tc-file-input-wrapper input[type=file] {\n\tposition: absolute;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbottom: 0;\n\tfont-size: 999px;\n\tmax-width: 100%;\n\tmax-height: 100%;\n\tfilter: alpha(opacity=0);\n\topacity: 0;\n\toutline: none;\n\tbackground: white;\n\tcursor: pointer;\n\tdisplay: inline-block;\n}\n\n/*\n** Thumbnail macros\n*/\n\n.tc-thumbnail-wrapper {\n\tposition: relative;\n\tdisplay: inline-block;\n\tmargin: 6px;\n\tvertical-align: top;\n}\n\n.tc-thumbnail-right-wrapper {\n\tfloat:right;\n\tmargin: 0.5em 0 0.5em 0.5em;\n}\n\n.tc-thumbnail-image {\n\ttext-align: center;\n\toverflow: hidden;\n\tborder-radius: 3px;\n}\n\n.tc-thumbnail-image svg,\n.tc-thumbnail-image img {\n\tfilter: alpha(opacity=1);\n\topacity: 1;\n\tmin-width: 100%;\n\tmin-height: 100%;\n\tmax-width: 100%;\n}\n\n.tc-thumbnail-wrapper:hover .tc-thumbnail-image svg,\n.tc-thumbnail-wrapper:hover .tc-thumbnail-image img {\n\tfilter: alpha(opacity=0.8);\n\topacity: 0.8;\n}\n\n.tc-thumbnail-background {\n\tposition: absolute;\n\tborder-radius: 3px;\n}\n\n.tc-thumbnail-icon svg,\n.tc-thumbnail-icon img {\n\twidth: 3em;\n\theight: 3em;\n\t<<filter \"drop-shadow(2px 2px 4px rgba(0,0,0,0.3))\">>\n}\n\n.tc-thumbnail-wrapper:hover .tc-thumbnail-icon svg,\n.tc-thumbnail-wrapper:hover .tc-thumbnail-icon img {\n\tfill: #fff;\n\t<<filter \"drop-shadow(3px 3px 4px rgba(0,0,0,0.6))\">>\n}\n\n.tc-thumbnail-icon {\n\tposition: absolute;\n\ttop: 0;\n\tleft: 0;\n\tright: 0;\n\tbottom: 0;\n\tdisplay: -webkit-flex;\n\t-webkit-align-items: center;\n\t-webkit-justify-content: center;\n\tdisplay: flex;\n\talign-items: center;\n\tjustify-content: center;\n}\n\n.tc-thumbnail-caption {\n\tposition: absolute;\n\tbackground-color: #777;\n\tcolor: #fff;\n\ttext-align: center;\n\tbottom: 0;\n\twidth: 100%;\n\tfilter: alpha(opacity=0.9);\n\topacity: 0.9;\n\tline-height: 1.4;\n\tborder-bottom-left-radius: 3px;\n\tborder-bottom-right-radius: 3px;\n}\n\n.tc-thumbnail-wrapper:hover .tc-thumbnail-caption {\n\tfilter: alpha(opacity=1);\n\topacity: 1;\n}\n\n/*\n** Errors\n*/\n\n.tc-error {\n\tbackground: #f00;\n\tcolor: #fff;\n}\n"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize",
            "text": "15px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/bodylineheight": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/bodylineheight",
            "text": "22px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/fontsize": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/fontsize",
            "text": "14px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/lineheight": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/lineheight",
            "text": "20px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storyleft": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storyleft",
            "text": "0px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storytop": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storytop",
            "text": "0px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storyright": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storyright",
            "text": "770px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/storywidth": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/storywidth",
            "text": "770px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth",
            "text": "686px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint",
            "text": "960px"
        },
        "$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth": {
            "title": "$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth",
            "text": "350px"
        },
        "$:/themes/tiddlywiki/vanilla/options/stickytitles": {
            "title": "$:/themes/tiddlywiki/vanilla/options/stickytitles",
            "text": "no"
        },
        "$:/themes/tiddlywiki/vanilla/options/sidebarlayout": {
            "title": "$:/themes/tiddlywiki/vanilla/options/sidebarlayout",
            "text": "fixed-fluid"
        },
        "$:/themes/tiddlywiki/vanilla/options/codewrapping": {
            "title": "$:/themes/tiddlywiki/vanilla/options/codewrapping",
            "text": "pre-wrap"
        },
        "$:/themes/tiddlywiki/vanilla/reset": {
            "title": "$:/themes/tiddlywiki/vanilla/reset",
            "type": "text/plain",
            "text": "/*! normalize.css v3.0.0 | MIT License | git.io/normalize */\n\n/**\n * 1. Set default font family to sans-serif.\n * 2. Prevent iOS text size adjust after orientation change, without disabling\n *    user zoom.\n */\n\nhtml {\n  font-family: sans-serif; /* 1 */\n  -ms-text-size-adjust: 100%; /* 2 */\n  -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/**\n * Remove default margin.\n */\n\nbody {\n  margin: 0;\n}\n\n/* HTML5 display definitions\n   ========================================================================== */\n\n/**\n * Correct `block` display not defined in IE 8/9.\n */\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n/**\n * 1. Correct `inline-block` display not defined in IE 8/9.\n * 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n */\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; /* 1 */\n  vertical-align: baseline; /* 2 */\n}\n\n/**\n * Prevent modern browsers from displaying `audio` without controls.\n * Remove excess height in iOS 5 devices.\n */\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n/**\n * Address `[hidden]` styling not present in IE 8/9.\n * Hide the `template` element in IE, Safari, and Firefox < 22.\n */\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n/* Links\n   ========================================================================== */\n\n/**\n * Remove the gray background color from active links in IE 10.\n */\n\na {\n  background: transparent;\n}\n\n/**\n * Improve readability when focused and also mouse hovered in all browsers.\n */\n\na:active,\na:hover {\n  outline: 0;\n}\n\n/* Text-level semantics\n   ========================================================================== */\n\n/**\n * Address styling not present in IE 8/9, Safari 5, and Chrome.\n */\n\nabbr[title] {\n  border-bottom: 1px dotted;\n}\n\n/**\n * Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome.\n */\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n/**\n * Address styling not present in Safari 5 and Chrome.\n */\n\ndfn {\n  font-style: italic;\n}\n\n/**\n * Address variable `h1` font-size and margin within `section` and `article`\n * contexts in Firefox 4+, Safari 5, and Chrome.\n */\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n/**\n * Address styling not present in IE 8/9.\n */\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n/**\n * Address inconsistent and variable font size in all browsers.\n */\n\nsmall {\n  font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` affecting `line-height` in all browsers.\n */\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n/* Embedded content\n   ========================================================================== */\n\n/**\n * Remove border when inside `a` element in IE 8/9.\n */\n\nimg {\n  border: 0;\n}\n\n/**\n * Correct overflow displayed oddly in IE 9.\n */\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n/* Grouping content\n   ========================================================================== */\n\n/**\n * Address margin not present in IE 8/9 and Safari 5.\n */\n\nfigure {\n  margin: 1em 40px;\n}\n\n/**\n * Address differences between Firefox and other browsers.\n */\n\nhr {\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\n\n/**\n * Contain overflow in all browsers.\n */\n\npre {\n  overflow: auto;\n}\n\n/**\n * Address odd `em`-unit font size rendering in all browsers.\n */\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n/* Forms\n   ========================================================================== */\n\n/**\n * Known limitation: by default, Chrome and Safari on OS X allow very limited\n * styling of `select`, unless a `border` property is set.\n */\n\n/**\n * 1. Correct color not being inherited.\n *    Known issue: affects color of disabled elements.\n * 2. Correct font properties not being inherited.\n * 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; /* 1 */\n  font: inherit; /* 2 */\n  margin: 0; /* 3 */\n}\n\n/**\n * Address `overflow` set to `hidden` in IE 8/9/10.\n */\n\nbutton {\n  overflow: visible;\n}\n\n/**\n * Address inconsistent `text-transform` inheritance for `button` and `select`.\n * All other form control elements do not inherit `text-transform` values.\n * Correct `button` style inheritance in Firefox, IE 8+, and Opera\n * Correct `select` style inheritance in Firefox.\n */\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n/**\n * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n *    and `video` controls.\n * 2. Correct inability to style clickable `input` types in iOS.\n * 3. Improve usability and consistency of cursor style between image-type\n *    `input` and others.\n */\n\nbutton,\nhtml input[type=\"button\"], /* 1 */\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; /* 2 */\n  cursor: pointer; /* 3 */\n}\n\n/**\n * Re-set default cursor for disabled elements.\n */\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n/**\n * Remove inner padding and border in Firefox 4+.\n */\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n/**\n * Address Firefox 4+ setting `line-height` on `input` using `!important` in\n * the UA stylesheet.\n */\n\ninput {\n  line-height: normal;\n}\n\n/**\n * It's recommended that you don't attempt to style these elements.\n * Firefox's implementation doesn't respect box-sizing, padding, or width.\n *\n * 1. Address box sizing set to `content-box` in IE 8/9/10.\n * 2. Remove excess padding in IE 8/9/10.\n */\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; /* 1 */\n  padding: 0; /* 2 */\n}\n\n/**\n * Fix the cursor style for Chrome's increment/decrement buttons. For certain\n * `font-size` values of the `input`, it causes the cursor style of the\n * decrement button to change from `default` to `text`.\n */\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n/**\n * 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome.\n * 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome\n *    (include `-moz` to future-proof).\n */\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; /* 1 */\n  -moz-box-sizing: content-box;\n  -webkit-box-sizing: content-box; /* 2 */\n  box-sizing: content-box;\n}\n\n/**\n * Remove inner padding and search cancel button in Safari and Chrome on OS X.\n * Safari (but not Chrome) clips the cancel button when the search input has\n * padding (and `textfield` appearance).\n */\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n/**\n * Define consistent border, margin, and padding.\n */\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n/**\n * 1. Correct `color` not being inherited in IE 8/9.\n * 2. Remove padding so people aren't caught out if they zero out fieldsets.\n */\n\nlegend {\n  border: 0; /* 1 */\n  padding: 0; /* 2 */\n}\n\n/**\n * Remove default vertical scrollbar in IE 8/9.\n */\n\ntextarea {\n  overflow: auto;\n}\n\n/**\n * Don't inherit the `font-weight` (applied by a rule above).\n * NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n */\n\noptgroup {\n  font-weight: bold;\n}\n\n/* Tables\n   ========================================================================== */\n\n/**\n * Remove most spacing between table cells.\n */\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}\n"
        },
        "$:/themes/tiddlywiki/vanilla/settings/fontfamily": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/fontfamily",
            "text": "\"Helvetica Neue\", Helvetica, Arial, \"Lucida Grande\", \"DejaVu Sans\", sans-serif"
        },
        "$:/themes/tiddlywiki/vanilla/settings/codefontfamily": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/codefontfamily",
            "text": "Monaco, Consolas, \"Lucida Console\", \"DejaVu Sans Mono\", monospace"
        },
        "$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment",
            "text": "fixed"
        },
        "$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize": {
            "title": "$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize",
            "text": "auto"
        },
        "$:/themes/tiddlywiki/vanilla/sticky": {
            "title": "$:/themes/tiddlywiki/vanilla/sticky",
            "text": "<$reveal state=\"$:/themes/tiddlywiki/vanilla/options/stickytitles\" type=\"match\" text=\"yes\">\n``\n.tc-tiddler-title {\n\tposition: -webkit-sticky;\n\tposition: -moz-sticky;\n\tposition: -o-sticky;\n\tposition: -ms-sticky;\n\tposition: sticky;\n\ttop: 0px;\n\tbackground: ``<<colour tiddler-background>>``;\n\tz-index: 500;\n}\n``\n</$reveal>\n"
        },
        "$:/themes/tiddlywiki/vanilla/themetweaks": {
            "title": "$:/themes/tiddlywiki/vanilla/themetweaks",
            "tags": "$:/tags/ControlPanel/Appearance",
            "caption": "{{$:/language/ThemeTweaks/ThemeTweaks}}",
            "text": "\\define lingo-base() $:/language/ThemeTweaks/\n\n\\define replacement-text()\n[img[$(imageTitle)$]]\n\\end\n\n\\define backgroundimage-dropdown()\n<div class=\"tc-drop-down-wrapper\">\n<$button popup=<<qualify \"$:/state/popup/themetweaks/backgroundimage\">> class=\"tc-btn-invisible tc-btn-dropdown\">{{$:/core/images/down-arrow}}</$button>\n<$reveal state=<<qualify \"$:/state/popup/themetweaks/backgroundimage\">> type=\"popup\" position=\"belowleft\" text=\"\" default=\"\">\n<div class=\"tc-drop-down\">\n<$macrocall $name=\"image-picker\" actions=\"\"\"\n\n<$action-setfield\n\t$tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimage\"\n\t$value=<<imageTitle>>\n/>\n\n\"\"\"/>\n</div>\n</$reveal>\n</div>\n\\end\n\n\\define backgroundimageattachment-dropdown()\n<$select tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment\" default=\"scroll\">\n<option value=\"scroll\"><<lingo Settings/BackgroundImageAttachment/Scroll>></option>\n<option value=\"fixed\"><<lingo Settings/BackgroundImageAttachment/Fixed>></option>\n</$select>\n\\end\n\n\\define backgroundimagesize-dropdown()\n<$select tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize\" default=\"scroll\">\n<option value=\"auto\"><<lingo Settings/BackgroundImageSize/Auto>></option>\n<option value=\"cover\"><<lingo Settings/BackgroundImageSize/Cover>></option>\n<option value=\"contain\"><<lingo Settings/BackgroundImageSize/Contain>></option>\n</$select>\n\\end\n\n<<lingo ThemeTweaks/Hint>>\n\n! <<lingo Options>>\n\n|<$link to=\"$:/themes/tiddlywiki/vanilla/options/sidebarlayout\"><<lingo Options/SidebarLayout>></$link> |<$select tiddler=\"$:/themes/tiddlywiki/vanilla/options/sidebarlayout\"><option value=\"fixed-fluid\"><<lingo Options/SidebarLayout/Fixed-Fluid>></option><option value=\"fluid-fixed\"><<lingo Options/SidebarLayout/Fluid-Fixed>></option></$select> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/options/stickytitles\"><<lingo Options/StickyTitles>></$link><br>//<<lingo Options/StickyTitles/Hint>>// |<$select tiddler=\"$:/themes/tiddlywiki/vanilla/options/stickytitles\"><option value=\"no\">{{$:/language/No}}</option><option value=\"yes\">{{$:/language/Yes}}</option></$select> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/options/codewrapping\"><<lingo Options/CodeWrapping>></$link> |<$select tiddler=\"$:/themes/tiddlywiki/vanilla/options/codewrapping\"><option value=\"pre\">{{$:/language/No}}</option><option value=\"pre-wrap\">{{$:/language/Yes}}</option></$select> |\n\n! <<lingo Settings>>\n\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/fontfamily\"><<lingo Settings/FontFamily>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/settings/fontfamily\" default=\"\" tag=\"input\"/> | |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/codefontfamily\"><<lingo Settings/CodeFontFamily>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/settings/codefontfamily\" default=\"\" tag=\"input\"/> | |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimage\"><<lingo Settings/BackgroundImage>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimage\" default=\"\" tag=\"input\"/> |<<backgroundimage-dropdown>> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimageattachment\"><<lingo Settings/BackgroundImageAttachment>></$link> |<<backgroundimageattachment-dropdown>> | |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/settings/backgroundimagesize\"><<lingo Settings/BackgroundImageSize>></$link> |<<backgroundimagesize-dropdown>> | |\n\n! <<lingo Metrics>>\n\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/fontsize\"><<lingo Metrics/FontSize>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/fontsize\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/lineheight\"><<lingo Metrics/LineHeight>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/lineheight\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize\"><<lingo Metrics/BodyFontSize>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/bodyfontsize\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/bodylineheight\"><<lingo Metrics/BodyLineHeight>></$link> |<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/bodylineheight\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storyleft\"><<lingo Metrics/StoryLeft>></$link><br>//<<lingo Metrics/StoryLeft/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storyleft\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storytop\"><<lingo Metrics/StoryTop>></$link><br>//<<lingo Metrics/StoryTop/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storytop\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storyright\"><<lingo Metrics/StoryRight>></$link><br>//<<lingo Metrics/StoryRight/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storyright\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/storywidth\"><<lingo Metrics/StoryWidth>></$link><br>//<<lingo Metrics/StoryWidth/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/storywidth\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth\"><<lingo Metrics/TiddlerWidth>></$link><br>//<<lingo Metrics/TiddlerWidth/Hint>>//<br> |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/tiddlerwidth\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint\"><<lingo Metrics/SidebarBreakpoint>></$link><br>//<<lingo Metrics/SidebarBreakpoint/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarbreakpoint\" default=\"\" tag=\"input\"/> |\n|<$link to=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth\"><<lingo Metrics/SidebarWidth>></$link><br>//<<lingo Metrics/SidebarWidth/Hint>>// |^<$edit-text tiddler=\"$:/themes/tiddlywiki/vanilla/metrics/sidebarwidth\" default=\"\" tag=\"input\"/> |\n"
        }
    }
}
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192716.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-2|PAPER Tue-SS-3-6-2 — LEAP Diarization System for the Second DIHARD Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LEAP Diarization System for the Second DIHARD Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191891.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-11|PAPER Wed-P-6-A-11 — A Study of x-Vector Based Speaker Recognition on Short Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of x-Vector Based Speaker Recognition on Short Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192741.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-4|PAPER Tue-P-5-D-4 — Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192685.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-5|PAPER Tue-P-5-D-5 — Quantifying Expectation Modulation in Human Speech Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Expectation Modulation in Human Speech Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-6|PAPER Thu-P-10-A-6 — Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191305.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-1|PAPER Wed-O-7-2-1 — Neural Named Entity Recognition from Subword Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Named Entity Recognition from Subword Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191975.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-9|PAPER Tue-P-3-B-9 — Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192526.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-5|PAPER Thu-O-9-1-5 — A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191847.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — A Frequency Normalization Technique for Kindergarten Speech Recognition Inspired by the Role of f,,o,, in Vowel Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Frequency Normalization Technique for Kindergarten Speech Recognition Inspired by the Role of f,,o,, in Vowel Perception</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192988.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-7|PAPER Wed-SS-6-4-7 — Voice Quality and Between-Frame Entropy for Sleepiness Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality and Between-Frame Entropy for Sleepiness Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-15|PAPER Mon-P-1-A-15 — Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-2|PAPER Mon-P-2-B-2 — Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-3|PAPER Wed-O-8-2-3 — Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-10|PAPER Mon-P-2-B-10 — A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-4|PAPER Wed-S&T-3-4 — SpeechMarker: A Voice Based Multi-Level Attendance Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMarker: A Voice Based Multi-Level Attendance Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192280.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-9|PAPER Thu-P-10-A-9 — Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192280.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-9|PAPER Thu-P-10-A-9 — Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192612.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191804.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-4|PAPER Thu-P-9-B-4 — Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-6|PAPER Tue-O-5-3-6 — Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198011.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-3|PAPER Wed-S&T-3-3 — Multimodal Dialog with the MALACH Audiovisual Archive]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Dialog with the MALACH Audiovisual Archive</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198009.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-2|PAPER Thu-S&T-6-2 — Framework for Conducting Tasks Requiring Human Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framework for Conducting Tasks Requiring Human Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191945.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-9|PAPER Thu-P-10-C-9 — Improving Speech Synthesis with Discourse Relations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Synthesis with Discourse Relations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-5|PAPER Tue-P-3-D-5 — “ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-4|PAPER Thu-O-10-5-4 — Code-Switching Detection Using ASR-Generated Language Posteriors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Detection Using ASR-Generated Language Posteriors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192637.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-2|PAPER Wed-P-8-D-2 — An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-5|PAPER Mon-O-2-1-5 — A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-11|PAPER Tue-P-5-C-11 — Lattice Generation in Attention-Based Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Generation in Attention-Based Speech Recognition Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192720.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-3|PAPER Thu-P-10-B-3 — Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-10|PAPER Tue-P-3-D-10 — Neural Network-Based Modeling of Phonetic Durations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network-Based Modeling of Phonetic Durations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191592.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-6|PAPER Thu-P-9-C-6 — Voice Quality as a Turn-Taking Cue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality as a Turn-Taking Cue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192792.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-5|PAPER Wed-O-7-5-5 — Coarse-to-Fine Optimization for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coarse-to-Fine Optimization for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192599.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-5|PAPER Mon-O-1-1-5 — Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-5|PAPER Mon-O-2-5-5 — Towards Variability Resistant Dialectal Speech Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Variability Resistant Dialectal Speech Evaluation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-2|PAPER Mon-P-1-C-2 — Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198030.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-5|PAPER Tue-S&T-2-5 — FarSpeech: Arabic Natural Language Processing for Live Arabic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FarSpeech: Arabic Natural Language Processing for Live Arabic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-4|PAPER Mon-O-1-5-4 — Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191701.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-4|PAPER Thu-O-10-4-4 — CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-5|PAPER Tue-P-4-D-5 — Time to Frequency Domain Mapping of the Voice Source: The Influence of Open Quotient and Glottal Skew on the Low End of the Source Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Time to Frequency Domain Mapping of the Voice Source: The Influence of Open Quotient and Glottal Skew on the Low End of the Source Spectrum</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-4|PAPER Wed-O-6-1-4 — The Role of Voice Quality in the Perception of Prominence in Synthetic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Voice Quality in the Perception of Prominence in Synthetic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-13|PAPER Thu-P-10-C-13 — Dual Encoder Classifier Models as Constraints in Neural Text Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Encoder Classifier Models as Constraints in Neural Text Normalization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192151.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-11|PAPER Thu-P-10-D-11 — Hypernasality Severity Detection Using Constant Q Cepstral Coefficients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hypernasality Severity Detection Using Constant Q Cepstral Coefficients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191796.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-6|PAPER Thu-P-9-B-6 — Deep Sensing of Breathing Signal During Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Sensing of Breathing Signal During Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191180.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-11|PAPER Thu-P-9-D-11 — Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192912.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-8|PAPER Mon-P-1-A-8 — Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-5|PAPER Tue-O-3-1-5 — Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-2|PAPER Tue-O-5-3-2 — Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-1|PAPER Tue-P-5-B-1 — Multilingual Speech Recognition with Corpus Relatedness Sampling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Corpus Relatedness Sampling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192278.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-6|PAPER Wed-SS-6-4-6 — Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-6|PAPER Wed-S&T-4-6 — SANTLR: Speech Annotation Toolkit for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SANTLR: Speech Annotation Toolkit for Low Resource Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-3|PAPER Thu-O-10-5-3 — Variational Attention Using Articulatory Priors for Generating Code Mixed Speech Using Monolingual Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Attention Using Articulatory Priors for Generating Code Mixed Speech Using Monolingual Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193109.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-2|PAPER Mon-P-2-D-2 — Towards a Speaker Independent Speech-BCI Using Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Speaker Independent Speech-BCI Using Speaker Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192879.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-2|PAPER Tue-O-5-2-2 — An Analysis of Local Monotonic Attention Variants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Local Monotonic Attention Variants</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192225.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-3|PAPER Thu-O-10-1-3 — Language Modeling with Deep Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling with Deep Transformers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192772.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-4|PAPER Tue-P-5-B-4 — Recognition of Latin American Spanish Using Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Latin American Spanish Using Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191773.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-9|PAPER Wed-P-8-D-9 — Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192347.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-8|PAPER Wed-P-8-C-8 — Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-12|PAPER Tue-SS-4-4-12 — A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Path Signature Approach for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-7|PAPER Tue-P-4-C-7 — Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-2|PAPER Wed-P-7-C-2 — Modeling User Context for Valence Prediction from Narratives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling User Context for Valence Prediction from Narratives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191945.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-9|PAPER Thu-P-10-C-9 — Improving Speech Synthesis with Discourse Relations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Synthesis with Discourse Relations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192394.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-2|PAPER Wed-P-8-E-2 — Neural Network Distillation on IoT Platforms for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network Distillation on IoT Platforms for Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-3|PAPER Tue-P-5-A-3 — Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191705.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-1|PAPER Mon-O-1-5-1 — High Quality, Lightweight and Adaptable TTS Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality, Lightweight and Adaptable TTS Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192702.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-2|PAPER Mon-O-1-1-2 — Very Deep Self-Attention Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Very Deep Self-Attention Networks for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-1|PAPER Tue-O-3-5-1 — Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-8|PAPER Tue-P-3-D-8 — Cross-Lingual Consistency of Phonological Features: An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Consistency of Phonological Features: An Empirical Study</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192740.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-12|PAPER Tue-P-5-C-12 — Sampling from Stochastic Finite Automata with Applications to CTC Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sampling from Stochastic Finite Automata with Applications to CTC Decoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-5|PAPER Tue-O-5-1-5 — Using a Manifold Vocoder for Spectral Voice and Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using a Manifold Vocoder for Spectral Voice and Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-4|PAPER Tue-SS-4-4-4 — Robust Bayesian and Light Neural Networks for Voice Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Bayesian and Light Neural Networks for Voice Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192890.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-3|PAPER Wed-P-7-D-3 — V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-5|PAPER Wed-P-7-D-5 — Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191846.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-6|PAPER Wed-P-7-E-6 — Open-Vocabulary Keyword Spotting with Audio and Text Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Vocabulary Keyword Spotting with Audio and Text Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191877.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-5|PAPER Wed-P-8-B-5 — Binary Speech Features for Keyword Spotting Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Binary Speech Features for Keyword Spotting Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192618.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-4|PAPER Wed-O-8-1-4 — MobiLipNet: Resource-Efficient Deep Learning Based Lipreading]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiLipNet: Resource-Efficient Deep Learning Based Lipreading</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-6|PAPER Wed-O-6-2-6 — Self-Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-6|PAPER Mon-P-2-E-6 — Unsupervised Low-Rank Representations for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Low-Rank Representations for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-1|PAPER Tue-P-3-C-1 — Deep Hierarchical Fusion with Application in Sentiment Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hierarchical Fusion with Application in Sentiment Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191148.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-3|PAPER Thu-SS-9-6-3 — Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191703.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-4|PAPER Thu-SS-9-6-4 — Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191873.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-6|PAPER Wed-P-8-B-6 — wav2vec: Unsupervised Pre-Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">wav2vec: Unsupervised Pre-Training for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-1|PAPER Wed-P-7-D-1 — Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192462.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-3|PAPER Tue-SS-3-6-3 — ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-3|PAPER Wed-P-6-A-3 — Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Progressive Speech Enhancement with Residual Connections</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192240.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-4|PAPER Wed-O-8-5-4 — A Deep Neural Network for Short-Segment Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Neural Network for Short-Segment Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192526.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-5|PAPER Thu-O-9-1-5 — A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-3|PAPER Tue-SS-5-6-3 — Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191349.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-10|PAPER Mon-P-1-C-10 — Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-1|PAPER Thu-S&T-6-1 — Elpis, an Accessible Speech-to-Text Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Elpis, an Accessible Speech-to-Text Tool</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-6|PAPER Wed-O-7-2-6 — Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191696.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-5|PAPER Thu-P-9-C-5 — Personalized Dialogue Response Generation Learned from Monologues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalized Dialogue Response Generation Learned from Monologues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191621.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-6|PAPER Thu-P-9-D-6 — Prosodic Phrase Alignment for Machine Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Phrase Alignment for Machine Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192950.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-3|PAPER Tue-P-4-D-3 — Compensation for French Liquid Deletion During Auditory Sentence Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compensation for French Liquid Deletion During Auditory Sentence Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-2|PAPER Mon-P-2-C-2 — One-vs-All Models for Asynchronous Training: An Empirical Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-vs-All Models for Asynchronous Training: An Empirical Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191773.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-9|PAPER Wed-P-8-D-9 — Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192988.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-7|PAPER Wed-SS-6-4-7 — Voice Quality and Between-Frame Entropy for Sleepiness Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality and Between-Frame Entropy for Sleepiness Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-3|PAPER Tue-O-5-1-3 — Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192908.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-2|PAPER Thu-P-9-E-2 — Deep Multitask Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Multitask Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193095.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-1|PAPER Wed-P-6-C-1 — Optimizing Speech-Input Length for Speaker-Independent Depression Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Speech-Input Length for Speaker-Independent Depression Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192240.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-4|PAPER Wed-O-8-5-4 — A Deep Neural Network for Short-Segment Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Neural Network for Short-Segment Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192371.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-2|PAPER Tue-O-4-1-2 — Attention Based Hybrid i-Vector BLSTM Model for Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Based Hybrid i-Vector BLSTM Model for Language Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-5|PAPER Mon-P-2-D-5 — Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-11|PAPER Wed-P-7-E-11 — A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-3|PAPER Wed-O-8-3-3 — Speech Based Emotion Prediction: Can a Linear Model Work?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Based Emotion Prediction: Can a Linear Model Work?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-3|PAPER Tue-P-3-D-3 — Quantifying Fundamental Frequency Modulation as a Function of Language, Speaking Style and Speaker]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Fundamental Frequency Modulation as a Function of Language, Speaking Style and Speaker</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192956.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-1|PAPER Wed-P-6-A-1 — Blind Channel Response Estimation for Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Channel Response Estimation for Replay Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192879.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-2|PAPER Tue-O-5-2-2 — An Analysis of Local Monotonic Attention Variants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Local Monotonic Attention Variants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-1|PAPER Mon-O-2-2-1 — Untranscribed Web Audio for Low Resource Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Untranscribed Web Audio for Low Resource Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192890.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-3|PAPER Wed-P-7-D-3 — V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-5|PAPER Wed-P-7-D-5 — Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-4|PAPER Mon-O-1-5-4 — Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-1|PAPER Tue-O-5-1-1 — Survey Talk: Preserving Privacy in Speaker and Speech Characterisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Preserving Privacy in Speaker and Speech Characterisation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-10|PAPER Tue-P-3-C-10 — Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191658.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-4|PAPER Thu-O-10-2-4 — Robust Speech Emotion Recognition Under Different Encoding Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Speech Emotion Recognition Under Different Encoding Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192502.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-5|PAPER Tue-P-3-C-5 — Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191826.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-4|PAPER Thu-P-9-C-4 — An Incremental Turn-Taking Model for Task-Oriented Dialog Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Incremental Turn-Taking Model for Task-Oriented Dialog Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193179.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-5|PAPER Tue-O-5-5-5 — Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193114.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-11|PAPER Thu-P-9-E-11 — My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-6|PAPER Thu-O-9-3-6 — Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-4|PAPER Wed-O-6-1-4 — The Role of Voice Quality in the Perception of Prominence in Synthetic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Voice Quality in the Perception of Prominence in Synthetic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192048.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-5|PAPER Tue-SS-5-6-5 — Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192244.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-3|PAPER Mon-O-1-2-3 — Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-12|PAPER Tue-SS-4-4-12 — A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-7|PAPER Tue-SS-4-4-7 — IIIT-H Spoofing Countermeasures for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IIIT-H Spoofing Countermeasures for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-6|PAPER Thu-SS-9-6-6 — Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-15|PAPER Mon-P-2-A-15 — Semi-Supervised Voice Conversion with Amortized Variational Inference]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Voice Conversion with Amortized Variational Inference</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-2|PAPER Wed-P-7-C-2 — Modeling User Context for Valence Prediction from Narratives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling User Context for Valence Prediction from Narratives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193060.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-4|PAPER Thu-O-10-1-4 — Scalable Multi Corpora Neural Language Models for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scalable Multi Corpora Neural Language Models for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191865.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-1|PAPER Mon-O-2-4-1 — Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-9|PAPER Tue-P-3-A-9 — Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-1|PAPER Wed-P-8-D-1 — Vietnamese Learners Tackling the German /ʃt/ in Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vietnamese Learners Tackling the German /ʃt/ in Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193051.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-2|PAPER Thu-P-9-D-2 — On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192742.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-2|PAPER Wed-P-6-A-2 — Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192460.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-2|PAPER Thu-O-9-2-2 — Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-6|PAPER Tue-P-5-D-6 — Perception of Pitch Contours in Speech and Nonspeech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Pitch Contours in Speech and Nonspeech</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-1|PAPER Wed-O-7-1-1 — Survey Talk: Recognition of Foreign-Accented Speech: Challenges and Opportunities for Human and Computer Speech Communication]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Recognition of Foreign-Accented Speech: Challenges and Opportunities for Human and Computer Speech Communication</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191150.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-5|PAPER Wed-P-8-D-5 — Speaking Rate, Information Density, and Information Rate in First-Language and Second-Language Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaking Rate, Information Density, and Information Rate in First-Language and Second-Language Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Althingi ASR System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192367.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-6|PAPER Thu-P-10-C-6 — Bootstrapping a Text Normalization System for an Inflected Language. Numbers as a Test Case]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bootstrapping a Text Normalization System for an Inflected Language. Numbers as a Test Case</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191806.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-11|PAPER Tue-P-3-D-11 — An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191817.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-4|PAPER Thu-P-9-D-4 — Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191790.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-1|PAPER Thu-O-9-3-1 — Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-7|PAPER Tue-P-4-B-7 — Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192650.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-13|PAPER Mon-P-2-D-13 — Strength and Structure: Coupling Tones with Oral Constriction Gestures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Strength and Structure: Coupling Tones with Oral Constriction Gestures</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192389.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-8|PAPER Thu-P-10-D-8 — Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191773.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-9|PAPER Wed-P-8-D-9 — Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192066.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-9|PAPER Tue-P-4-D-9 — Acoustic Correlates of Phonation Type in Chichimec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Correlates of Phonation Type in Chichimec</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-10|PAPER Wed-P-8-D-10 — Nasal Consonant Discrimination in Infant- and Adult-Directed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Consonant Discrimination in Infant- and Adult-Directed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192446.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-5|PAPER Wed-O-7-1-5 — Foreign-Language Knowledge Enhances Artificial-Language Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Foreign-Language Knowledge Enhances Artificial-Language Segmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192729.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-2|PAPER Tue-O-3-4-2 — ERP Signal Analysis with Temporal Resolution Using a Time Window Bank]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ERP Signal Analysis with Temporal Resolution Using a Time Window Bank</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-1|PAPER Tue-P-4-C-1 — Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-4|PAPER Wed-P-8-D-4 — Using Prosody to Discover Word Order Alternations in a Novel Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Prosody to Discover Word Order Alternations in a Novel Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192545.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-1|PAPER Thu-O-10-3-1 — Sentence Prosody and  Wh-Indeterminates in Taiwan Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sentence Prosody and  Wh-Indeterminates in Taiwan Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192339.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-4|PAPER Wed-P-7-B-4 — Towards Debugging Deep Neural Networks by Generating Speech Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Debugging Deep Neural Networks by Generating Speech Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-12|PAPER Wed-P-6-C-12 — Automatic Assessment of Language Impairment Based on Raw ASR Output]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Assessment of Language Impairment Based on Raw ASR Output</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-8|PAPER Tue-P-5-B-8 — Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-11|PAPER Wed-P-8-C-11 — Better Morphology Prediction for Better Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Better Morphology Prediction for Better Speech Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-8|PAPER Mon-P-2-C-8 — Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-4|PAPER Wed-P-6-E-4 — A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-4|PAPER Mon-O-1-4-4 — Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-3|PAPER Tue-P-3-E-3 — Towards Generalized Speech Enhancement with Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Generalized Speech Enhancement with Generative Adversarial Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191621.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-6|PAPER Thu-P-9-D-6 — Prosodic Phrase Alignment for Machine Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Phrase Alignment for Machine Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192244.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-3|PAPER Mon-O-1-2-3 — Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-12|PAPER Tue-SS-4-4-12 — A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192462.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-3|PAPER Tue-SS-3-6-3 — ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-3|PAPER Wed-P-6-A-3 — Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Progressive Speech Enhancement with Residual Connections</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-4|PAPER Thu-O-10-3-4 — Age-Related Changes in European Portuguese Vowel Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Changes in European Portuguese Vowel Acoustics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192984.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-2|PAPER Tue-P-4-D-2 — Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-3|PAPER Wed-O-6-1-3 — Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-5|PAPER Tue-O-3-1-5 — Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-3|PAPER Mon-S&T-1-3 — SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192678.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-2|PAPER Tue-P-3-B-2 — Unbiased Semi-Supervised LF-MMI Training Using Dropout]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbiased Semi-Supervised LF-MMI Training Using Dropout</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-2|PAPER Mon-O-1-3-2 — An Investigation on Speaker Specific Articulatory Synthesis with Speaker Independent Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation on Speaker Specific Articulatory Synthesis with Speaker Independent Articulatory Inversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192295.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-3|PAPER Mon-P-2-E-3 — Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-1|PAPER Mon-O-2-5-1 — VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191450.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-9|PAPER Mon-P-1-E-9 — Weakly Supervised Syllable Segmentation by Vowel-Consonant Peak Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weakly Supervised Syllable Segmentation by Vowel-Consonant Peak Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192512.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-4|PAPER Wed-O-8-4-4 — A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-15|PAPER Thu-P-10-C-15 — Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-4|PAPER Mon-O-1-5-4 — Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-1|PAPER Tue-P-5-E-1 — Multiview Shared Subspace Learning Across Speakers and Speech Commands]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiview Shared Subspace Learning Across Speakers and Speech Commands</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-3|PAPER Tue-P-5-A-3 — Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193060.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-4|PAPER Thu-O-10-1-4 — Scalable Multi Corpora Neural Language Models for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scalable Multi Corpora Neural Language Models for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191603.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-4|PAPER Mon-O-2-1-4 — A Saliency-Based Attention LSTM Model for Cognitive Load Classification from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Saliency-Based Attention LSTM Model for Cognitive Load Classification from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-2|PAPER Mon-P-1-B-2 — Label Driven Time-Frequency Masking for Robust Continuous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Label Driven Time-Frequency Masking for Robust Continuous Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192090.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-5|PAPER Mon-P-1-B-5 — Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-3|PAPER Wed-P-7-C-3 — Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192881.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-2|PAPER Tue-P-5-B-2 — Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-9|PAPER Wed-P-6-B-9 — Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-5|PAPER Thu-O-10-5-5 — Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-10|PAPER Mon-P-2-E-10 — Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191513.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-6|PAPER Wed-O-7-4-6 — Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191949.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-2|PAPER Thu-O-10-1-2 — Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-4|PAPER Wed-O-8-3-4 — Speech Emotion Recognition Based on Multi-Label Emotion Existence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition Based on Multi-Label Emotion Existence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-10|PAPER Wed-P-7-C-10 — Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192415.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-2|PAPER Thu-SS-9-6-2 — Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-2|PAPER Mon-P-1-E-2 — Automatic Detection of Breath Using Voice Activity Detection and SVM Classifier with Application on News Reports]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Breath Using Voice Activity Detection and SVM Classifier with Application on News Reports</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-9|PAPER Tue-P-3-A-9 — Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-3|PAPER Tue-O-3-3-3 — Iterative Delexicalization for Improved Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Delexicalization for Improved Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192460.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-2|PAPER Thu-O-9-2-2 — Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-3|PAPER Tue-P-4-E-3 — Fully-Convolutional Network for Pitch Estimation of Speech Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fully-Convolutional Network for Pitch Estimation of Speech Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-8|PAPER Mon-P-1-B-8 — NIESR: Nuisance Invariant End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NIESR: Nuisance Invariant End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-6|PAPER Wed-S&T-5-6 — Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-11|PAPER Wed-P-7-E-11 — A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-8|PAPER Mon-P-2-A-8 — GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191333.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-1|PAPER Wed-O-8-4-1 — Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191492.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-9|PAPER Tue-P-5-E-9 — Analyzing Intra-Speaker and Inter-Speaker Vocal Tract Impedance Characteristics in a Low-Dimensional Feature Space Using t-SNE]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Intra-Speaker and Inter-Speaker Vocal Tract Impedance Characteristics in a Low-Dimensional Feature Space Using t-SNE</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-9|PAPER Tue-P-5-A-9 — Transformer Based Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer Based Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192934.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-1|PAPER Tue-P-3-D-1 — L2 Pronunciation Accuracy and Context: A Pilot Study on the Realization of Geminates in Italian as L2 by French Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">L2 Pronunciation Accuracy and Context: A Pilot Study on the Realization of Geminates in Italian as L2 by French Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-7|PAPER Tue-P-3-D-7 — Prosodic Effects on Plosive Duration in German and Austrian German]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Effects on Plosive Duration in German and Austrian German</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191189.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-14|PAPER Tue-P-3-D-14 — Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192066.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-9|PAPER Tue-P-4-D-9 — Acoustic Correlates of Phonation Type in Chichimec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Correlates of Phonation Type in Chichimec</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192720.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-3|PAPER Thu-P-10-B-3 — Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191959.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-7|PAPER Tue-P-5-B-7 — Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-3|PAPER Tue-SS-5-6-3 — Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191567.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-2|PAPER Tue-P-3-E-2 — UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191806.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-11|PAPER Tue-P-3-D-11 — An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-1|PAPER Thu-S&T-6-1 — Elpis, an Accessible Speech-to-Text Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Elpis, an Accessible Speech-to-Text Tool</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191281.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-11|PAPER Wed-P-6-D-11 — R²SPIN: Re-Recording the Revised Speech Perception in Noise Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R²SPIN: Re-Recording the Revised Speech Perception in Noise Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193152.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-1|PAPER Thu-P-9-C-1 — Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-1|PAPER Mon-O-2-3-1 — SparseSpeech: Unsupervised Acoustic Unit Discovery with Memory-Augmented Sequence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SparseSpeech: Unsupervised Acoustic Unit Discovery with Memory-Augmented Sequence Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-4|PAPER Mon-O-2-4-4 — Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-6|PAPER Thu-O-10-4-6 — Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-3|PAPER Thu-P-9-B-3 — “Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192528.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-4|PAPER Tue-O-3-4-4 — The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-5|PAPER Mon-O-1-4-5 — Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192371.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-2|PAPER Tue-O-4-1-2 — Attention Based Hybrid i-Vector BLSTM Model for Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Based Hybrid i-Vector BLSTM Model for Language Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-1|PAPER Thu-P-9-E-1 — On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191942.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-5|PAPER Mon-P-1-E-5 — A Combination of Model-Based and Feature-Based Strategy for Speech-to-Singing Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Combination of Model-Based and Feature-Based Strategy for Speech-to-Singing Alignment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191928.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-5|PAPER Tue-P-4-E-5 — Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191925.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-6|PAPER Tue-P-4-E-6 — On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-7|PAPER Tue-S&T-2-7 —  NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192339.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-4|PAPER Wed-P-7-B-4 — Towards Debugging Deep Neural Networks by Generating Speech Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Debugging Deep Neural Networks by Generating Speech Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192291.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-6|PAPER Tue-P-4-C-6 — Influence of Contextuality on Prosodic Realization of Information Structure in Chinese Dialogues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Influence of Contextuality on Prosodic Realization of Information Structure in Chinese Dialogues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191577.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-11|PAPER Tue-P-4-C-11 — Conversational Emotion Analysis via Attention Mechanisms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Analysis via Attention Mechanisms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-1|PAPER Thu-O-9-4-1 — Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191617.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-12|PAPER Thu-P-10-D-12 — Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191940.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-7|PAPER Thu-P-10-E-7 — Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-7|PAPER Mon-P-2-A-7 — Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192078.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-2|PAPER Tue-O-3-5-2 — Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-11|PAPER Tue-P-5-B-11 — Towards Language-Universal Mandarin-English Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Language-Universal Mandarin-English Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191418.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-11|PAPER Thu-P-10-C-11 — Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193113.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-1|PAPER Mon-P-1-C-1 — Predicting Humor by Learning from Time-Aligned Comments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Humor by Learning from Time-Aligned Comments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-4|PAPER Mon-O-2-4-4 — Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191241.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-1|PAPER Tue-P-5-C-1 — Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-2|PAPER Wed-P-7-E-2 — A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-5|PAPER Mon-O-2-1-5 — A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191349.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-10|PAPER Mon-P-1-C-10 — Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-10|PAPER Tue-P-3-C-10 — Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192712.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-1|PAPER Tue-P-3-E-1 — Speech Augmentation via Speaker-Specific Noise in Unseen Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Augmentation via Speaker-Specific Noise in Unseen Environment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-2|PAPER Wed-O-8-3-2 — Continuous Emotion Recognition in Speech — Do We Need Recurrence?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continuous Emotion Recognition in Speech — Do We Need Recurrence?</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191658.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-4|PAPER Thu-O-10-2-4 — Robust Speech Emotion Recognition Under Different Encoding Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Speech Emotion Recognition Under Different Encoding Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Path Signature Approach for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193233.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-1|PAPER Tue-P-5-A-1 — Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-10|PAPER Thu-P-10-B-10 —  Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-9|PAPER Thu-P-10-E-9 — Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191733.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-7|PAPER Mon-P-1-C-7 — Laughter Dynamics in Dyadic Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Laughter Dynamics in Dyadic Conversations</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-10|PAPER Wed-P-8-D-10 — Nasal Consonant Discrimination in Infant- and Adult-Directed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Consonant Discrimination in Infant- and Adult-Directed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-3|PAPER Tue-SS-5-6-3 — Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-3|PAPER Wed-O-7-2-3 — An Empirical Evaluation of DTW Subsampling Methods for Keyword Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Empirical Evaluation of DTW Subsampling Methods for Keyword Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193116.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-6|PAPER Mon-P-1-A-6 — Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192384.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-3|PAPER Wed-P-7-E-3 — Unsupervised Methods for Audio Classification from Lecture Discussion Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Audio Classification from Lecture Discussion Recordings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193051.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-2|PAPER Thu-P-9-D-2 — On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191973.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-4|PAPER Wed-O-8-2-4 — Multi-Stride Self-Attention for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stride Self-Attention for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-5|PAPER Thu-O-9-5-5 — Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-2|PAPER Wed-O-6-5-2 — Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192250.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-10|PAPER Thu-P-10-A-10 — Mixup Learning Strategies for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixup Learning Strategies for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192551.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-5|PAPER Wed-P-6-C-5 — An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191736.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-10|PAPER Mon-P-1-D-10 — Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192415.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-2|PAPER Thu-SS-9-6-2 — Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191433.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-13|PAPER Tue-P-5-D-13 — Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191535.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-13|PAPER Wed-P-6-A-13 — Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191891.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-11|PAPER Wed-P-6-A-11 — A Study of x-Vector Based Speaker Recognition on Short Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of x-Vector Based Speaker Recognition on Short Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-3|PAPER Tue-P-5-C-3 — Improving Performance of End-to-End ASR on Numeric Sequences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Performance of End-to-End ASR on Numeric Sequences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192098.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-6|PAPER Wed-P-8-D-6 — Articulation Rate as a Metric in Spoken Language Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulation Rate as a Metric in Spoken Language Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192338.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-2|PAPER Wed-SS-8-6-2 — Aerodynamics and Lumped-Masses Combined with Delay Lines for Modeling Vertical and Anterior-Posterior Phase Differences in Pathological Vocal Fold Vibration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Aerodynamics and Lumped-Masses Combined with Delay Lines for Modeling Vertical and Anterior-Posterior Phase Differences in Pathological Vocal Fold Vibration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-8|PAPER Thu-P-10-A-8 — End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-6|PAPER Wed-P-7-C-6 — Speech Emotion Recognition with a Reject Option]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition with a Reject Option</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-3|PAPER Thu-P-9-C-3 — A Neural Turn-Taking Model without RNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Neural Turn-Taking Model without RNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192772.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-4|PAPER Tue-P-5-B-4 — Recognition of Latin American Spanish Using Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Latin American Spanish Using Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191705.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-1|PAPER Mon-O-1-5-1 — High Quality, Lightweight and Adaptable TTS Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality, Lightweight and Adaptable TTS Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191800.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-2|PAPER Tue-O-5-1-2 — Evaluating Near End Listening Enhancement Algorithms in Realistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Near End Listening Enhancement Algorithms in Realistic Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-1|PAPER Mon-P-2-D-1 — Multi-Corpus Acoustic-to-Articulatory Speech Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Corpus Acoustic-to-Articulatory Speech Inversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-6|PAPER Tue-O-5-3-6 — Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-12|PAPER Wed-P-7-C-12 — Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-1|PAPER Tue-P-3-A-1 — Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192551.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-5|PAPER Wed-P-6-C-5 — An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192528.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-4|PAPER Tue-O-3-4-4 — The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191800.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-2|PAPER Tue-O-5-1-2 — Evaluating Near End Listening Enhancement Algorithms in Realistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Near End Listening Enhancement Algorithms in Realistic Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-4|PAPER Thu-O-10-3-4 — Age-Related Changes in European Portuguese Vowel Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Changes in European Portuguese Vowel Acoustics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191281.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-11|PAPER Wed-P-6-D-11 — R²SPIN: Re-Recording the Revised Speech Perception in Noise Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R²SPIN: Re-Recording the Revised Speech Perception in Noise Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-4|PAPER Wed-O-7-1-4 — Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-1|PAPER Mon-O-1-3-1 — Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-9|PAPER Tue-P-3-D-9 — Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-1|PAPER Tue-O-4-3-1 — Fusion Strategy for Prosodic and Lexical Representations of Word Importance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Strategy for Prosodic and Lexical Representations of Word Importance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-4|PAPER Wed-S&T-5-4 — Synthesized Spoken Names: Biases Impacting Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synthesized Spoken Names: Biases Impacting Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193072.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-2|PAPER Mon-P-1-D-2 — Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192913.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-1|PAPER Wed-P-7-E-1 — Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191272.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-1|PAPER Tue-O-4-5-1 — A Unified Bayesian Source Modelling for Determined Blind Source Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Bayesian Source Modelling for Determined Blind Source Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-5|PAPER Wed-P-8-A-5 — Super-Wideband Spectral Envelope Modeling for Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Super-Wideband Spectral Envelope Modeling for Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193074.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-5|PAPER Wed-O-6-2-5 — Supervised Classifiers for Audio Impairments with Noisy Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Classifiers for Audio Impairments with Noisy Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-7|PAPER Mon-P-2-E-7 — On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-9|PAPER Wed-P-6-A-9 — Device Feature Extractor for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Device Feature Extractor for Replay Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191484.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-3|PAPER Wed-P-8-C-3 — Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191722.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-2|PAPER Wed-O-6-3-2 — Adversarially Trained End-to-End Korean Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained End-to-End Korean Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191141.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-10|PAPER Wed-P-6-E-10 — Masking Estimation with Phase Restoration of Clean Speech for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Masking Estimation with Phase Restoration of Clean Speech for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-2|PAPER Mon-P-2-B-2 — Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-3|PAPER Wed-O-8-2-3 — Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191766.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-5|PAPER Tue-P-5-C-5 — Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-5|PAPER Mon-O-2-2-5 — Large Margin Training for Attention Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Training for Attention Based End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192454.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-1|PAPER Tue-O-5-2-1 — Multi-Span Acoustic Modelling Using Raw Waveform Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Span Acoustic Modelling Using Raw Waveform Signals</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-5|PAPER Tue-O-4-5-5 — Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-3|PAPER Thu-P-9-C-3 — A Neural Turn-Taking Model without RNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Neural Turn-Taking Model without RNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-1|PAPER Tue-P-3-C-1 — Deep Hierarchical Fusion with Application in Sentiment Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hierarchical Fusion with Application in Sentiment Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-7|PAPER Wed-P-6-C-7 — Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191776.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-8|PAPER Tue-P-4-E-8 — Optimizing Voice Activity Detection for Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Voice Activity Detection for Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191776.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-8|PAPER Tue-P-4-E-8 — Optimizing Voice Activity Detection for Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Voice Activity Detection for Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192551.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-5|PAPER Wed-P-6-C-5 — An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192929.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-4|PAPER Thu-P-9-E-4 — Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191625.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-6|PAPER Thu-P-9-E-6 — Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-5|PAPER Tue-P-4-B-5 — Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-2|PAPER Wed-P-7-E-2 — A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-2|PAPER Tue-P-5-E-2 — A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192659.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-2|PAPER Wed-P-8-B-2 — Prosody Usage Optimization for Children Speech Recognition with Zero Resource Children Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody Usage Optimization for Children Speech Recognition with Zero Resource Children Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-10|PAPER Thu-P-10-B-10 —  Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192730.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-5|PAPER Tue-P-5-A-5 — End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191794.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-1|PAPER Tue-SS-4-4-1 — ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-4|PAPER Tue-O-4-1-4 — Target Speaker Extraction for Multi-Talker Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Extraction for Multi-Talker Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192044.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-3|PAPER Mon-O-2-1-3 — Attentive to Individual: A Multimodal Emotion Recognition Network with Personalized Attention Profile]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentive to Individual: A Multimodal Emotion Recognition Network with Personalized Attention Profile</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-7|PAPER Tue-P-3-C-7 — Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-8|PAPER Tue-P-3-C-8 — Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-6|PAPER Tue-P-3-D-6 — Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-8|PAPER Wed-P-6-C-8 — Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-11|PAPER Tue-SS-4-4-11 — Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191766.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-5|PAPER Tue-P-5-C-5 — Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191777.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-3|PAPER Wed-O-7-5-3 — Incorporating Symbolic Sequential Modeling for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Symbolic Sequential Modeling for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191519.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-2|PAPER Wed-P-6-E-2 — Noise Adaptive Speech Enhancement Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noise Adaptive Speech Enhancement Using Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-6|PAPER Tue-P-3-D-6 — Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-3|PAPER Wed-O-7-1-3 — Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-6|PAPER Tue-P-3-E-6 — KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192511.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-9|PAPER Wed-P-8-A-9 — A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192171.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-4|PAPER Wed-P-8-E-4 — A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-1|PAPER Thu-P-9-E-1 — On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193214.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-1|PAPER Mon-P-1-D-1 — Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-1|PAPER Tue-P-4-C-1 — Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-8|PAPER Wed-P-6-C-8 — Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-2|PAPER Mon-P-2-E-2 — ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-9|PAPER Mon-P-2-E-9 — An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-10|PAPER Mon-P-2-E-10 — Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-3|PAPER Mon-S&T-1-3 — SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191520.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-10|PAPER Tue-P-4-E-10 — Acoustic Modeling for Automatic Lyrics-to-Audio Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Modeling for Automatic Lyrics-to-Audio Alignment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-7|PAPER Tue-S&T-2-7 —  NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-8|PAPER Tue-P-4-B-8 — Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192078.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-2|PAPER Tue-O-3-5-2 — Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-3|PAPER Mon-O-1-3-3 — Individual Difference of Relative Tongue Size and its Acoustic Effects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Difference of Relative Tongue Size and its Acoustic Effects</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-1|PAPER Mon-O-2-3-1 — SparseSpeech: Unsupervised Acoustic Unit Discovery with Memory-Augmented Sequence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SparseSpeech: Unsupervised Acoustic Unit Discovery with Memory-Augmented Sequence Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192210.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-5|PAPER Wed-P-6-D-5 — Perceiving Older Adults Producing Clear and Lombard Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceiving Older Adults Producing Clear and Lombard Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193099.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-3|PAPER Mon-O-1-5-3 — Expediting TTS Synthesis with Adversarial Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expediting TTS Synthesis with Adversarial Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-5|PAPER Tue-P-4-D-5 — Time to Frequency Domain Mapping of the Voice Source: The Influence of Open Quotient and Glottal Skew on the Low End of the Source Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Time to Frequency Domain Mapping of the Voice Source: The Influence of Open Quotient and Glottal Skew on the Low End of the Source Spectrum</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-4|PAPER Wed-O-6-1-4 — The Role of Voice Quality in the Perception of Prominence in Synthetic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Voice Quality in the Perception of Prominence in Synthetic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-4|PAPER Mon-O-1-5-4 — Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-3|PAPER Tue-S&T-2-3 — Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-2|PAPER Wed-P-8-C-2 — Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191541.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-8|PAPER Tue-P-5-E-8 — ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192977.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-2|PAPER Tue-O-3-3-2 — Mining Polysemous Triplets with Recurrent Neural Networks for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mining Polysemous Triplets with Recurrent Neural Networks for Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193033.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-2|PAPER Thu-P-9-C-2 — Benchmarking Benchmarks: Introducing New Automatic Indicators for Benchmarking Spoken Language Understanding Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Benchmarking Benchmarks: Introducing New Automatic Indicators for Benchmarking Spoken Language Understanding Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191553.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-7|PAPER Thu-P-9-D-7 — Spot the Pleasant People! Navigating the Cocktail Party Buzz]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Pleasant People! Navigating the Cocktail Party Buzz</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192876.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-6|PAPER Mon-P-2-D-6 — Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191691.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-3|PAPER Tue-O-5-4-3 — Multi-Lingual Dialogue Act Recognition with Deep Learning Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Lingual Dialogue Act Recognition with Deep Learning Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191865.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-1|PAPER Mon-O-2-4-1 — Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191658.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-4|PAPER Thu-O-10-2-4 — Robust Speech Emotion Recognition Under Different Encoding Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Speech Emotion Recognition Under Different Encoding Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-11|PAPER Wed-P-8-E-11 — Semi-Supervised Audio Classification with Consistency-Based Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Audio Classification with Consistency-Based Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193140.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-3|PAPER Thu-O-10-2-3 — Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-7|PAPER Tue-P-3-C-7 — Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-8|PAPER Tue-P-3-C-8 — Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191548.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-2|PAPER Tue-O-4-3-2 — Self Attention in Variational Sequential Learning for Summarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self Attention in Variational Sequential Learning for Summarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191300.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-10|PAPER Thu-P-9-C-10 — Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191235.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-12|PAPER Tue-P-5-A-12 — Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-8|PAPER Tue-P-3-D-8 — Cross-Lingual Consistency of Phonological Features: An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Consistency of Phonological Features: An Empirical Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191388.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-5|PAPER Mon-P-1-A-5 — LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191693.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-11|PAPER Wed-SS-6-4-11 — Spatial, Temporal and Spectral Multiresolution Analysis for the INTERSPEECH 2019 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial, Temporal and Spectral Multiresolution Analysis for the INTERSPEECH 2019 ComParE Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192743.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Explaining Sentiment Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Explaining Sentiment Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192845.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-5|PAPER Mon-P-2-E-5 — Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191393.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-5|PAPER Wed-O-8-1-5 — LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-15|PAPER Mon-P-2-A-15 — Semi-Supervised Voice Conversion with Amortized Variational Inference]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Voice Conversion with Amortized Variational Inference</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-2|PAPER Wed-P-6-D-2 — Disfluencies and Human Speech Transcription Errors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Human Speech Transcription Errors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191708.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-5|PAPER Wed-O-8-3-5 — Gender De-Biasing in Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gender De-Biasing in Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191617.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-12|PAPER Thu-P-10-D-12 — Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191940.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-7|PAPER Thu-P-10-E-7 — Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-5|PAPER Tue-O-3-4-5 — The Neural Correlates Underlying Lexically-Guided Perceptual Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Neural Correlates Underlying Lexically-Guided Perceptual Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-5|PAPER Thu-O-9-3-5 — Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191809.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-11|PAPER Tue-P-3-E-11 — Speech Enhancement with Variance Constrained Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Variance Constrained Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-5|PAPER Tue-P-4-B-5 — Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191775.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-4|PAPER Mon-O-2-3-4 — Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191781.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-10|PAPER Tue-P-5-A-10 — Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198007.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-2|PAPER Tue-S&T-2-2 — Online Speech Processing and Analysis Suite]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Speech Processing and Analysis Suite</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-2|PAPER Thu-O-9-3-2 — GPU-Based WFST Decoding with Extra Large Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GPU-Based WFST Decoding with Extra Large Language Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-6|PAPER Tue-P-4-B-6 — Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192190.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-5|PAPER Tue-P-3-A-5 — Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-13|PAPER Wed-P-8-D-13 — The Production of Chinese Affricates /ts/ and /ts^^h^^/ by Native Urdu Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Production of Chinese Affricates /ts/ and /ts^^h^^/ by Native Urdu Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191390.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-7|PAPER Wed-P-6-B-7 —  Kite: Automatic Speech Recognition for Unmanned Aerial Vehicles]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Kite: Automatic Speech Recognition for Unmanned Aerial Vehicles</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192763.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-2|PAPER Wed-O-7-1-2 — The Effects of Time Expansion on English as a Second Language Individuals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effects of Time Expansion on English as a Second Language Individuals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-2|PAPER Tue-P-5-D-2 — Individual Differences in Implicit Attention to Phonetic Detail in Speech Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Differences in Implicit Attention to Phonetic Detail in Speech Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-3|PAPER Wed-SS-6-4-3 — Deep Neural Baselines for Computational Paralinguistics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Baselines for Computational Paralinguistics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-3|PAPER Tue-O-5-1-3 — Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192912.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-8|PAPER Mon-P-1-A-8 — Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-5|PAPER Thu-O-9-5-5 — Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192980.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192961.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-7|PAPER Mon-P-1-A-7 — Multi-PLDA Diarization on Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-PLDA Diarization on Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-6|PAPER Tue-P-5-D-6 — Perception of Pitch Contours in Speech and Nonspeech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Pitch Contours in Speech and Nonspeech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-4|PAPER Wed-O-7-5-4 — Maximum a posteriori Speech Enhancement Based on Double Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Maximum a posteriori Speech Enhancement Based on Double Spectrum</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192335.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-7|PAPER Tue-P-5-A-7 — Unified Language-Independent DNN-Based G2P Converter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Language-Independent DNN-Based G2P Converter</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-4|PAPER Mon-P-1-C-4 — Deep Learning Based Mandarin Accent Identification for Accent Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Mandarin Accent Identification for Accent Robust ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-4|PAPER Thu-O-10-3-4 — Age-Related Changes in European Portuguese Vowel Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Changes in European Portuguese Vowel Acoustics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-4|PAPER Mon-O-1-2-4 — Exploiting Multi-Channel Speech Presence Probability in Parametric Multi-Channel Wiener Filter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Channel Speech Presence Probability in Parametric Multi-Channel Wiener Filter</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-8|PAPER Thu-P-10-A-8 — End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192918.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-4|PAPER Tue-P-4-D-4 — Prosodic Factors Influencing Vowel Reduction in Russian]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Factors Influencing Vowel Reduction in Russian</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192127.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-4|PAPER Mon-P-1-B-4 — Full-Sentence Correlation: A Method to Handle Unpredictable Noise for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Full-Sentence Correlation: A Method to Handle Unpredictable Noise for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-2|PAPER Tue-P-4-B-2 — Language Learning Using Speech to Image Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Learning Using Speech to Image Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-3|PAPER Tue-SS-4-4-3 — The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-1|PAPER Tue-O-4-1-1 — Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191435.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-12|PAPER Wed-SS-7-A-12 — The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191542.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-8|PAPER Thu-P-9-A-8 — Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-14|PAPER Thu-P-10-A-14 — Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191436.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-15|PAPER Thu-P-10-A-15 — The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-6|PAPER Thu-O-10-5-6 — Multi-Graph Decoding for Code-Switching ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Graph Decoding for Code-Switching ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-1|PAPER Tue-P-3-A-1 — Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-5|PAPER Wed-O-6-3-5 — A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-4|PAPER Wed-P-8-B-4 — Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192459.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-4|PAPER Thu-P-10-E-4 — Influence of Speaker-Specific Parameters on Speech Separation Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Influence of Speaker-Specific Parameters on Speech Separation Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-5|PAPER Tue-O-5-4-5 — Discovering Dialog Rules by Means of an Evolutionary Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discovering Dialog Rules by Means of an Evolutionary Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191718.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-5|PAPER Mon-O-2-3-5 — Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-10|PAPER Wed-P-7-B-10 — Transfer Learning from Audio-Visual Grounding to Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning from Audio-Visual Grounding to Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192507.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-7|PAPER Tue-P-4-D-7 — A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198004.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-2|PAPER Mon-S&T-1-2 — Depression State Assessment: Application for Detection of Depression by Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Depression State Assessment: Application for Detection of Depression by Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-10|PAPER Thu-P-10-D-10 — Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-3|PAPER Wed-P-6-A-3 — Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Progressive Speech Enhancement with Residual Connections</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192714.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-3|PAPER Mon-P-1-D-3 — RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193109.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-2|PAPER Mon-P-2-D-2 — Towards a Speaker Independent Speech-BCI Using Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Speaker Independent Speech-BCI Using Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193105.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-1|PAPER Tue-O-3-4-1 — Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191764.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-2|PAPER Thu-O-9-1-2 — An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-6|PAPER Wed-O-8-4-6 — Nonparallel Emotional Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191580.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-7|PAPER Wed-P-8-A-7 — Artificial Bandwidth Extension Using H∞ Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Artificial Bandwidth Extension Using H∞ Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191495.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-11|PAPER Mon-P-1-B-11 — Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191493.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-12|PAPER Mon-P-1-B-12 — Enhanced Spectral Features for Distortion-Independent Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhanced Spectral Features for Distortion-Independent Acoustic Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191428.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-13|PAPER Thu-P-9-A-13 — Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192651.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-3|PAPER Thu-P-9-E-3 — Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193060.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-4|PAPER Thu-O-10-1-4 — Scalable Multi Corpora Neural Language Models for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scalable Multi Corpora Neural Language Models for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-1|PAPER Mon-P-2-C-1 — Mitigating Noisy Inputs for Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Noisy Inputs for Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-2|PAPER Mon-P-2-B-2 — Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-3|PAPER Wed-O-8-2-3 — Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-7|PAPER Tue-P-4-C-7 — Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191886.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-8|PAPER Tue-P-4-C-8 — Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-6|PAPER Thu-P-10-A-6 — Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-3|PAPER Tue-S&T-2-3 — Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-1|PAPER Tue-O-5-4-1 — Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-2|PAPER Tue-O-5-4-2 — HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191789.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-7|PAPER Thu-P-9-B-7 — Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191189.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-14|PAPER Tue-P-3-D-14 — Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-11|PAPER Mon-P-1-E-11 — Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192869.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-1|PAPER Mon-P-2-A-1 — Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-1|PAPER Mon-P-1-B-1 — Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191527.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-8|PAPER Thu-P-9-C-8 — Analysis of Effect and Timing of Fillers in Natural Turn-Taking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Effect and Timing of Fillers in Natural Turn-Taking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-1|PAPER Tue-P-3-B-1 — Attention Model for Articulatory Features Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Model for Articulatory Features Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-7|PAPER Wed-P-6-C-7 — Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192219.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-7|PAPER Tue-P-5-E-7 — Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-4|PAPER Wed-P-8-B-4 — Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192589.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-2|PAPER Wed-P-7-B-2 — Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192486.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-3|PAPER Thu-P-9-A-3 — VAE-Based Regularization for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VAE-Based Regularization for Deep Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-5|PAPER Mon-O-2-2-5 — Large Margin Training for Attention Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Training for Attention Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-1|PAPER Thu-P-10-E-1 — A Modified Algorithm for Multiple Input Spectrogram Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Modified Algorithm for Multiple Input Spectrogram Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192650.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-13|PAPER Mon-P-2-D-13 — Strength and Structure: Coupling Tones with Oral Constriction Gestures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Strength and Structure: Coupling Tones with Oral Constriction Gestures</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-2|PAPER Wed-O-6-1-2 — Dimensions of Prosodic Prominence in an Attractor Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dimensions of Prosodic Prominence in an Attractor Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192389.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-8|PAPER Thu-P-10-D-8 — Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192714.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-3|PAPER Mon-P-1-D-3 — RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192956.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-1|PAPER Wed-P-6-A-1 — Blind Channel Response Estimation for Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Channel Response Estimation for Replay Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-3|PAPER Mon-P-1-A-3 — MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-11|PAPER Wed-P-8-C-11 — Better Morphology Prediction for Better Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Better Morphology Prediction for Better Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-9|PAPER Wed-P-6-D-9 — Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191349.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-10|PAPER Mon-P-1-C-10 — Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192462.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-3|PAPER Tue-SS-3-6-3 — ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-3|PAPER Wed-P-6-A-3 — Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Progressive Speech Enhancement with Residual Connections</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-1|PAPER Tue-P-3-C-1 — Deep Hierarchical Fusion with Application in Sentiment Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hierarchical Fusion with Application in Sentiment Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-6|PAPER Mon-P-2-E-6 — Unsupervised Low-Rank Representations for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Low-Rank Representations for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192845.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-5|PAPER Mon-P-2-E-5 — Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192684.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-6|PAPER Tue-P-4-D-6 — Testing the Distinctiveness of Intonational Tunes: Evidence from Imitative Productions in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Testing the Distinctiveness of Intonational Tunes: Evidence from Imitative Productions in American English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-3|PAPER Wed-O-8-3-3 — Speech Based Emotion Prediction: Can a Linear Model Work?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Based Emotion Prediction: Can a Linear Model Work?</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192361.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-6|PAPER Wed-P-6-A-6 — An Adaptive-Q Cochlear Model for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive-Q Cochlear Model for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191535.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-13|PAPER Wed-P-6-A-13 — Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193179.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-5|PAPER Tue-O-5-5-5 — Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191674.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-11|PAPER Wed-P-8-D-11 — No Distributional Learning in Adults from Attended Listening to Non-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">No Distributional Learning in Adults from Attended Listening to Non-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192394.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-2|PAPER Wed-P-8-E-2 — Neural Network Distillation on IoT Platforms for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network Distillation on IoT Platforms for Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-1|PAPER Wed-O-6-3-1 — Unsupervised Singing Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Singing Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193095.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-1|PAPER Wed-P-6-C-1 — Optimizing Speech-Input Length for Speaker-Independent Depression Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Speech-Input Length for Speaker-Independent Depression Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191674.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-11|PAPER Wed-P-8-D-11 — No Distributional Learning in Adults from Attended Listening to Non-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">No Distributional Learning in Adults from Attended Listening to Non-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191405.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-12|PAPER Mon-P-1-C-12 — Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192080.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-10|PAPER Wed-P-6-C-10 — Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191886.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-8|PAPER Tue-P-4-C-8 — Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-4|PAPER Tue-O-5-3-4 — Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-9|PAPER Tue-P-4-C-9 — Identifying Mood Episodes Using Dialogue Features from Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Mood Episodes Using Dialogue Features from Clinical Interviews</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-8|PAPER Wed-P-7-C-8 — Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-7|PAPER Mon-P-2-B-7 — End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-1|PAPER Tue-P-4-D-1 — The Effect of Phoneme Distribution on Perceptual Similarity in English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Phoneme Distribution on Perceptual Similarity in English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192169.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-6|PAPER Thu-P-10-E-6 — Towards Joint Sound Scene and Polyphonic Sound Event Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Joint Sound Scene and Polyphonic Sound Event Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191718.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-5|PAPER Mon-O-2-3-5 — Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192851.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-4|PAPER Wed-P-7-D-4 — The Contribution of Lip Protrusion to Anglo-English /r/: Evidence from Hyper- and Non-Hyperarticulated Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Contribution of Lip Protrusion to Anglo-English /r/: Evidence from Hyper- and Non-Hyperarticulated Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-4|PAPER Wed-P-6-E-4 — A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192415.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-2|PAPER Thu-SS-9-6-2 — Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191520.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-10|PAPER Tue-P-4-E-10 — Acoustic Modeling for Automatic Lyrics-to-Audio Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Modeling for Automatic Lyrics-to-Audio Alignment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-4|PAPER Thu-O-10-5-4 — Code-Switching Detection Using ASR-Generated Language Posteriors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Detection Using ASR-Generated Language Posteriors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-5|PAPER Thu-O-10-5-5 — Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-6|PAPER Thu-O-10-5-6 — Multi-Graph Decoding for Code-Switching ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Graph Decoding for Code-Switching ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191514.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-6|PAPER Mon-O-1-5-6 — A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-4|PAPER Tue-O-4-1-4 — Target Speaker Extraction for Multi-Talker Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Extraction for Multi-Talker Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-5|PAPER Wed-P-8-C-5 — Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192521.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-4|PAPER Thu-P-10-C-4 — Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-4|PAPER Mon-O-2-4-4 — Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-6|PAPER Thu-O-10-4-6 — Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-5|PAPER Tue-O-5-2-5 — Trainable Dynamic Subsampling for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Trainable Dynamic Subsampling for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-12|PAPER Tue-P-3-C-12 — Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191257.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-9|PAPER Wed-P-8-B-9 — On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192278.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-6|PAPER Wed-SS-6-4-6 — Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192971.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-3|PAPER Tue-O-5-2-3 — Layer Trajectory BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Layer Trajectory BLSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191467.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-6|PAPER Wed-O-8-2-6 — Self-Teaching Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Teaching Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192482.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-3|PAPER Tue-O-4-3-3 — Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-4|PAPER Thu-S&T-6-4 — The SAIL LABS Media Mining Indexer and the CAVA Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SAIL LABS Media Mining Indexer and the CAVA Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-6|PAPER Wed-P-6-B-6 — The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192637.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-2|PAPER Wed-P-8-D-2 — An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191817.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-4|PAPER Thu-P-9-D-4 — Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191812.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-9|PAPER Mon-P-2-D-9 — Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-9|PAPER Mon-P-2-A-9 — Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-10|PAPER Thu-P-10-D-10 — Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-3|PAPER Wed-S&T-5-3 — Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-3|PAPER Thu-O-10-4-3 — The Greennn Tree — Lengthening Position Influences Uncertainty Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Greennn Tree — Lengthening Position Influences Uncertainty Perception</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-2|PAPER Thu-P-10-C-2 — Spontaneous Conversational Speech Synthesis from Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spontaneous Conversational Speech Synthesis from Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-2|PAPER Wed-P-7-C-2 — Modeling User Context for Valence Prediction from Narratives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling User Context for Valence Prediction from Narratives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-9|PAPER Wed-P-6-B-9 — Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-8|PAPER Wed-P-8-B-8 — Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-5|PAPER Thu-O-10-5-5 — Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-2|PAPER Tue-P-3-A-2 — Selection and Training Schemes for Improving TTS Voice Built on Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Selection and Training Schemes for Improving TTS Voice Built on Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-3|PAPER Wed-SS-6-4-3 — Deep Neural Baselines for Computational Paralinguistics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Baselines for Computational Paralinguistics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191789.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-7|PAPER Thu-P-9-B-7 — Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192293.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-6|PAPER Wed-O-8-3-6 — CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-2|PAPER Thu-O-10-3-2 — Frication as a Vowel Feature? — Evidence from the Rui’an Wu Chinese Dialect]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frication as a Vowel Feature? — Evidence from the Rui’an Wu Chinese Dialect</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-3|PAPER Thu-O-10-3-3 — Vowels and Diphthongs in the Xupu Xiang Chinese Dialect]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vowels and Diphthongs in the Xupu Xiang Chinese Dialect</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193140.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-3|PAPER Thu-O-10-2-3 — Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-9|PAPER Tue-P-3-D-9 — Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192629.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-3|PAPER Wed-P-6-B-3 — Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-6|PAPER Tue-S&T-2-6 — A System for Real-Time Privacy Preserving Data Collection for Ambient Assisted Living]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A System for Real-Time Privacy Preserving Data Collection for Ambient Assisted Living</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-5|PAPER Thu-O-10-5-5 — Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191124.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-12|PAPER Wed-P-6-D-12 — Contributions of Consonant-Vowel Transitions to Mandarin Tone Identification in Simulated Electric-Acoustic Hearing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contributions of Consonant-Vowel Transitions to Mandarin Tone Identification in Simulated Electric-Acoustic Hearing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191336.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-9|PAPER Thu-P-9-D-9 — Noisy BiLSTM-Based Models for Disfluency Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noisy BiLSTM-Based Models for Disfluency Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192980.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191861.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-1|PAPER Mon-P-2-E-1 — Salient Speech Representations Based on Cloned Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Salient Speech Representations Based on Cloned Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192995.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-2|PAPER Wed-P-7-D-2 — Articulation of Vowel Length Contrasts in Australian English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulation of Vowel Length Contrasts in Australian English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193072.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-2|PAPER Mon-P-1-D-2 — Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192910.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-6|PAPER Wed-SS-8-6-6 — Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-4|PAPER Mon-P-1-C-4 — Deep Learning Based Mandarin Accent Identification for Accent Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Mandarin Accent Identification for Accent Robust ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192719.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-6|PAPER Thu-O-9-2-6 — Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191327.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-2|PAPER Mon-O-1-4-2 — Harmonic-Aligned Frame Mask Based on Non-Stationary Gabor Transform with Application to Content-Dependent Speaker Comparison]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic-Aligned Frame Mask Based on Non-Stationary Gabor Transform with Application to Content-Dependent Speaker Comparison</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-10|PAPER Thu-P-10-B-10 —  Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191696.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-5|PAPER Thu-P-9-C-5 — Personalized Dialogue Response Generation Learned from Monologues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalized Dialogue Response Generation Learned from Monologues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-10|PAPER Thu-P-10-C-10 — Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192347.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-8|PAPER Wed-P-8-C-8 — Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-5|PAPER Wed-O-6-3-5 — A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-1|PAPER Tue-P-5-B-1 — Multilingual Speech Recognition with Corpus Relatedness Sampling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Corpus Relatedness Sampling</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-1|PAPER Wed-O-8-1-1 — Survey Talk: Multimodal Processing of Speech and Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Multimodal Processing of Speech and Language</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-6|PAPER Wed-S&T-4-6 — SANTLR: Speech Annotation Toolkit for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SANTLR: Speech Annotation Toolkit for Low Resource Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-2|PAPER Thu-P-10-B-2 — Cross-Attention End-to-End ASR for Two-Party Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Attention End-to-End ASR for Two-Party Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192115.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-3|PAPER Mon-O-2-4-3 — Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192540.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-4|PAPER Wed-SS-6-4-4 — Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198001.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-1|PAPER Wed-S&T-4-1 — BAS Web Services for Automatic Subtitle Creation and Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BAS Web Services for Automatic Subtitle Creation and Anonymization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-11|PAPER Thu-P-10-E-11 — End-to-End Music Source Separation: Is it Possible in the Waveform Domain?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Music Source Separation: Is it Possible in the Waveform Domain?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191189.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-14|PAPER Tue-P-3-D-14 — Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192098.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-6|PAPER Wed-P-8-D-6 — Articulation Rate as a Metric in Spoken Language Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulation Rate as a Metric in Spoken Language Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193246.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-1|PAPER Mon-P-2-B-1 — Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191924.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-1|PAPER Wed-O-7-5-1 — Speech Denoising with Deep Feature Losses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Denoising with Deep Feature Losses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192731.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-4|PAPER Wed-O-6-2-4 — A Deep Residual Network for Large-Scale Acoustic Scene Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Residual Network for Large-Scale Acoustic Scene Analysis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192653.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-2|PAPER Wed-O-7-4-2 — Multiple Sound Source Localization with SVD-PHAT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiple Sound Source Localization with SVD-PHAT</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-6|PAPER Wed-P-6-B-6 — The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191752.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-9|PAPER Mon-P-2-B-9 — An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-2|PAPER Tue-O-4-2-2 — A New GAN-Based End-to-End TTS Training Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New GAN-Based End-to-End TTS Training Algorithm</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-7|PAPER Thu-P-10-C-7 — Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-3|PAPER Tue-P-5-E-3 — Acoustic Scene Classification with Mismatched Devices Using CliqueNets and Mixup Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification with Mismatched Devices Using CliqueNets and Mixup Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-1|PAPER Thu-P-9-E-1 — On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192732.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-3|PAPER Mon-P-2-C-3 — Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193033.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-2|PAPER Thu-P-9-C-2 — Benchmarking Benchmarks: Introducing New Automatic Indicators for Benchmarking Spoken Language Understanding Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Benchmarking Benchmarks: Introducing New Automatic Indicators for Benchmarking Spoken Language Understanding Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192108.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-7|PAPER Wed-P-6-E-7 — Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-4|PAPER Tue-O-4-2-4 — Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191891.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-11|PAPER Wed-P-6-A-11 — A Study of x-Vector Based Speaker Recognition on Short Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of x-Vector Based Speaker Recognition on Short Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192552.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-5|PAPER Mon-P-1-C-5 — Calibrating DNN Posterior Probability Estimates of HMM/DNN Models to Improve Social Signal Detection from Audio Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Calibrating DNN Posterior Probability Estimates of HMM/DNN Models to Improve Social Signal Detection from Audio Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191726.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-8|PAPER Wed-SS-6-4-8 — Using Fisher Vector and Bag-of-Audio-Words Representations to Identify Styrian Dialects, Sleepiness, Baby & Orca Sounds]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Fisher Vector and Bag-of-Audio-Words Representations to Identify Styrian Dialects, Sleepiness, Baby & Orca Sounds</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192217.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-9|PAPER Wed-P-6-C-9 — Assessing Parkinson’s Disease from Speech Using Fisher Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Parkinson’s Disease from Speech Using Fisher Vectors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-5|PAPER Thu-O-10-2-5 — Using the Bag-of-Audio-Word Feature Representation of ASR DNN Posteriors for Paralinguistic Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using the Bag-of-Audio-Word Feature Representation of ASR DNN Posteriors for Paralinguistic Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198004.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-2|PAPER Mon-S&T-1-2 — Depression State Assessment: Application for Detection of Depression by Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Depression State Assessment: Application for Detection of Depression by Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192732.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-3|PAPER Mon-P-2-C-3 — Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192636.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-8|PAPER Wed-P-8-A-8 — Quality Degradation Diagnosis for Voice Networks — Estimating the Perceived Noisiness, Coloration, and Discontinuity of Transmitted Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quality Degradation Diagnosis for Voice Networks — Estimating the Perceived Noisiness, Coloration, and Discontinuity of Transmitted Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-10|PAPER Wed-P-8-A-10 — Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193062.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-3|PAPER Tue-P-4-C-3 — Analyzing Verbal and Nonverbal Features for Predicting Group Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Verbal and Nonverbal Features for Predicting Group Performance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-1|PAPER Thu-O-10-4-1 — Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193107.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-5|PAPER Thu-O-10-1-5 — Who Needs Words? Lexicon-Free Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Who Needs Words? Lexicon-Free Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-9|PAPER Tue-P-3-D-9 — Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191952.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-10|PAPER Tue-P-3-B-10 — Guiding CTC Posterior Spike Timings for Improved Posterior Fusion and Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guiding CTC Posterior Spike Timings for Improved Posterior Fusion and Knowledge Distillation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191930.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-11|PAPER Tue-P-3-B-11 — Direct Neuron-Wise Fusion of Cognate Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Neuron-Wise Fusion of Cognate Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-14|PAPER Tue-P-3-B-14 — Multi-Task CTC Training with Auxiliary Feature Reconstruction for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task CTC Training with Auxiliary Feature Reconstruction for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-1|PAPER Mon-P-2-D-1 — Multi-Corpus Acoustic-to-Articulatory Speech Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Corpus Acoustic-to-Articulatory Speech Inversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193179.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-5|PAPER Tue-O-5-5-5 — Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-8|PAPER Tue-P-5-D-8 — Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191442.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-12|PAPER Thu-P-9-A-12 — Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191440.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-13|PAPER Thu-P-10-A-13 — Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-4|PAPER Thu-P-10-B-4 — An Online Attention-Based Model for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Online Attention-Based Model for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-8|PAPER Tue-P-3-C-8 — Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-3|PAPER Wed-O-6-5-3 — Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191847.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — A Frequency Normalization Technique for Kindergarten Speech Recognition Inspired by the Role of f,,o,, in Vowel Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Frequency Normalization Technique for Kindergarten Speech Recognition Inspired by the Role of f,,o,, in Vowel Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193146.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-1|PAPER Tue-O-3-2-1 — Deep Speaker Recognition: Modular or Monolithic?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Recognition: Modular or Monolithic?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192589.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-2|PAPER Wed-P-7-B-2 — Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193060.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-4|PAPER Thu-O-10-1-4 — Scalable Multi Corpora Neural Language Models for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scalable Multi Corpora Neural Language Models for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-5|PAPER Tue-O-4-5-5 — Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198005.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-1|PAPER Tue-S&T-2-1 — Directional Audio Rendering Using a Neural Network Based Personalized HRTF]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Directional Audio Rendering Using a Neural Network Based Personalized HRTF</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-2|PAPER Wed-O-6-5-2 — Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-1|PAPER Thu-O-9-2-1 — Advancing Sequence-to-Sequence Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Sequence-to-Sequence Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-4|PAPER Mon-P-2-C-4 — M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191539.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-9|PAPER Thu-P-10-B-9 — Real to H-Space Encoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real to H-Space Encoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191368.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — Expressiveness Influences Human Vocal Alignment Toward voice-AI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expressiveness Influences Human Vocal Alignment Toward voice-AI</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-1|PAPER Mon-O-1-3-1 — Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-1|PAPER Tue-P-5-D-1 — The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191433.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-13|PAPER Tue-P-5-D-13 — Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-6|PAPER Mon-P-2-E-6 — Unsupervised Low-Rank Representations for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Low-Rank Representations for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-5|PAPER Thu-SS-9-6-5 — Extracting Mel-Frequency and Bark-Frequency Cepstral Coefficients from Encrypted Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extracting Mel-Frequency and Bark-Frequency Cepstral Coefficients from Encrypted Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192732.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-3|PAPER Mon-P-2-C-3 — Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192743.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Explaining Sentiment Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Explaining Sentiment Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192378.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-6|PAPER Mon-P-1-D-6 — Automatic Lyric Transcription from Karaoke Vocal Tracks: Resources and a Baseline System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Lyric Transcription from Karaoke Vocal Tracks: Resources and a Baseline System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192422.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-14|PAPER Tue-P-5-D-14 — End-to-End Convolutional Sequence Learning for ASL Fingerspelling Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Convolutional Sequence Learning for ASL Fingerspelling Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192618.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-4|PAPER Wed-O-8-1-4 — MobiLipNet: Resource-Efficient Deep Learning Based Lipreading]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiLipNet: Resource-Efficient Deep Learning Based Lipreading</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-4|PAPER Thu-S&T-6-4 — The SAIL LABS Media Mining Indexer and the CAVA Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SAIL LABS Media Mining Indexer and the CAVA Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-9|PAPER Tue-P-5-A-9 — Transformer Based Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer Based Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-8|PAPER Thu-P-10-A-8 — End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192394.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-2|PAPER Wed-P-8-E-2 — Neural Network Distillation on IoT Platforms for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network Distillation on IoT Platforms for Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-5|PAPER Wed-P-6-A-5 — Adversarial Optimization for Dictionary Attacks on Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Optimization for Dictionary Attacks on Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-10|PAPER Tue-P-3-C-10 — Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-11|PAPER Wed-P-6-B-11 — CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-2|PAPER Wed-P-7-C-2 — Modeling User Context for Valence Prediction from Narratives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling User Context for Valence Prediction from Narratives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191826.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-4|PAPER Thu-P-9-C-4 — An Incremental Turn-Taking Model for Task-Oriented Dialog Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Incremental Turn-Taking Model for Task-Oriented Dialog Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-15|PAPER Mon-P-2-A-15 — Semi-Supervised Voice Conversion with Amortized Variational Inference]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Voice Conversion with Amortized Variational Inference</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-8|PAPER Tue-P-5-B-8 — Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-1|PAPER Mon-P-2-C-1 — Mitigating Noisy Inputs for Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Noisy Inputs for Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191382.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-2|PAPER Thu-O-10-5-2 — Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192912.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-8|PAPER Mon-P-1-A-8 — Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-15|PAPER Tue-P-5-C-15 — Performance Monitoring for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Performance Monitoring for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-4|PAPER Tue-O-5-4-4 — BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193191.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-2|PAPER Tue-P-5-A-2 — Building a Mixed-Lingual Neural TTS System with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Mixed-Lingual Neural TTS System with Only Monolingual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-3|PAPER Wed-O-8-4-3 — Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-5|PAPER Wed-P-8-A-5 — Super-Wideband Spectral Envelope Modeling for Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Super-Wideband Spectral Envelope Modeling for Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193060.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-4|PAPER Thu-O-10-1-4 — Scalable Multi Corpora Neural Language Models for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Scalable Multi Corpora Neural Language Models for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191522.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-9|PAPER Thu-P-9-A-9 — Two-Stage Training for Chinese Dialect Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Stage Training for Chinese Dialect Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-3|PAPER Mon-P-1-C-3 — Mitigating Gender and L1 Differences to Improve State and Trait Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Gender and L1 Differences to Improve State and Trait Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-3|PAPER Mon-O-1-4-3 — Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-3|PAPER Wed-S&T-5-3 — Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-2|PAPER Thu-P-10-C-2 — Spontaneous Conversational Speech Synthesis from Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spontaneous Conversational Speech Synthesis from Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-1|PAPER Mon-P-1-B-1 — Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-5|PAPER Tue-O-4-3-5 — Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192132.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-5|PAPER Wed-P-6-B-5 — Leveraging a Character, Word and Prosody Triplet for an ASR Error Robust and Agglutination Friendly Punctuation Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging a Character, Word and Prosody Triplet for an ASR Error Robust and Agglutination Friendly Punctuation Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-3|PAPER Mon-P-1-E-3 — Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191991.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-15|PAPER Tue-SS-4-4-15 — Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191982.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-3|PAPER Tue-O-4-1-3 — RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191943.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-11|PAPER Mon-P-1-A-11 — Joint Speech Recognition and Speaker Diarization via Sequence Transduction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speech Recognition and Speaker Diarization via Sequence Transduction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-5|PAPER Wed-P-8-C-5 — Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192182.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-5|PAPER Wed-P-7-B-5 — Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-3|PAPER Tue-SS-4-4-3 — The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-12|PAPER Wed-SS-6-4-12 — The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191514.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-6|PAPER Mon-O-1-5-6 — A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191942.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-5|PAPER Mon-P-1-E-5 — A Combination of Model-Based and Feature-Based Strategy for Speech-to-Singing Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Combination of Model-Based and Feature-Based Strategy for Speech-to-Singing Alignment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191887.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-10|PAPER Tue-SS-4-4-10 — Long Range Acoustic Features for Spoofed Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Long Range Acoustic Features for Spoofed Speech Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-4|PAPER Tue-O-4-1-4 — Target Speaker Extraction for Multi-Talker Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Extraction for Multi-Talker Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-4|PAPER Tue-O-4-2-4 — Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191928.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-5|PAPER Tue-P-4-E-5 — Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191925.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-6|PAPER Tue-P-4-E-6 — On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191520.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-10|PAPER Tue-P-4-E-10 — Acoustic Modeling for Automatic Lyrics-to-Audio Alignment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Modeling for Automatic Lyrics-to-Audio Alignment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-7|PAPER Tue-S&T-2-7 —  NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191894.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-9|PAPER Wed-SS-6-4-9 — Instantaneous Phase and Long-Term Acoustic Cues for Orca Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Instantaneous Phase and Long-Term Acoustic Cues for Orca Activity Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192361.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-6|PAPER Wed-P-6-A-6 — An Adaptive-Q Cochlear Model for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive-Q Cochlear Model for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191382.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-2|PAPER Thu-O-10-5-2 — Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-4|PAPER Thu-O-10-5-4 — Code-Switching Detection Using ASR-Generated Language Posteriors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Detection Using ASR-Generated Language Posteriors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-6|PAPER Thu-O-10-5-6 — Multi-Graph Decoding for Code-Switching ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Graph Decoding for Code-Switching ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191994.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-5|PAPER Thu-P-10-A-5 — A Unified Framework for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-6|PAPER Wed-O-8-4-6 — Nonparallel Emotional Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193155.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-3|PAPER Mon-P-2-B-3 — Multi-Accent Adaptation Based on Gate Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Accent Adaptation Based on Gate Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-11|PAPER Tue-P-5-D-11 — Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192384.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-3|PAPER Wed-P-7-E-3 — Unsupervised Methods for Audio Classification from Lecture Discussion Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Audio Classification from Lecture Discussion Recordings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-3|PAPER Mon-P-1-B-3 — Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NITK Kids’ Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192851.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-4|PAPER Wed-P-7-D-4 — The Contribution of Lip Protrusion to Anglo-English /r/: Evidence from Hyper- and Non-Hyperarticulated Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Contribution of Lip Protrusion to Anglo-English /r/: Evidence from Hyper- and Non-Hyperarticulated Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-6|PAPER Tue-P-5-E-6 — Understanding and Visualizing Raw Waveform-Based CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding and Visualizing Raw Waveform-Based CNNs</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192911.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-2|PAPER Thu-P-10-D-2 — Profiling Speech Motor Impairments in Persons with Amyotrophic Lateral Sclerosis: An Acoustic-Based Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Profiling Speech Motor Impairments in Persons with Amyotrophic Lateral Sclerosis: An Acoustic-Based Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191298.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-10|PAPER Wed-P-7-E-10 — Music Genre Classification Using Duplicated Convolutional Layers in Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Music Genre Classification Using Duplicated Convolutional Layers in Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191580.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-7|PAPER Wed-P-8-A-7 — Artificial Bandwidth Extension Using H∞ Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Artificial Bandwidth Extension Using H∞ Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191997.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-9|PAPER Wed-SS-7-A-9 — The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Path Signature Approach for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191473.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-1|PAPER Mon-O-1-4-1 — An Unsupervised Autoregressive Model for Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Autoregressive Model for Speech Representation Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192731.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-4|PAPER Wed-O-6-2-4 — A Deep Residual Network for Large-Scale Acoustic Scene Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Residual Network for Large-Scale Acoustic Scene Analysis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-6|PAPER Wed-O-8-5-6 — VoiceID Loss: Speech Enhancement for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceID Loss: Speech Enhancement for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-7|PAPER Mon-P-2-A-7 — Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-3|PAPER Tue-P-5-C-3 — Improving Performance of End-to-End ASR on Numeric Sequences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Performance of End-to-End ASR on Numeric Sequences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-13|PAPER Thu-P-10-C-13 — Dual Encoder Classifier Models as Constraints in Neural Text Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Encoder Classifier Models as Constraints in Neural Text Normalization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192651.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-3|PAPER Thu-P-9-E-3 — Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-2|PAPER Tue-O-4-2-2 — A New GAN-Based End-to-End TTS Training Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New GAN-Based End-to-End TTS Training Algorithm</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-7|PAPER Thu-P-10-C-7 — Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-11|PAPER Wed-P-6-C-11 — Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-3|PAPER Wed-O-6-5-3 — Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192608.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-4|PAPER Tue-P-3-B-4 — Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192224.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-2|PAPER Mon-O-2-3-2 — Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-1|PAPER Thu-P-9-E-1 — On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192881.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-2|PAPER Tue-P-5-B-2 — Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-15|PAPER Mon-P-1-A-15 — Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-5|PAPER Wed-P-7-C-5 — Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191781.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-10|PAPER Tue-P-5-A-10 — Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192716.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-2|PAPER Tue-SS-3-6-2 — LEAP Diarization System for the Second DIHARD Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LEAP Diarization System for the Second DIHARD Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191510.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-10|PAPER Thu-P-9-A-10 — Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191428.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-13|PAPER Thu-P-9-A-13 — Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-8|PAPER Wed-P-7-C-8 — Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-1|PAPER Tue-P-3-A-1 — Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-3|PAPER Mon-P-1-E-3 — Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191991.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-15|PAPER Tue-SS-4-4-15 — Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191982.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-3|PAPER Tue-O-4-1-3 — RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-3|PAPER Tue-S&T-2-3 — Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191750.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-9|PAPER Mon-P-1-D-9 — Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-6|PAPER Mon-O-2-4-6 — Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192120.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-10|PAPER Wed-P-6-A-10 — Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192169.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-6|PAPER Thu-P-10-E-6 — Towards Joint Sound Scene and Polyphonic Sound Event Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Joint Sound Scene and Polyphonic Sound Event Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192384.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-3|PAPER Wed-P-7-E-3 — Unsupervised Methods for Audio Classification from Lecture Discussion Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Audio Classification from Lecture Discussion Recordings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-10|PAPER Thu-P-9-B-10 — On the Use of Pitch Features for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Use of Pitch Features for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191796.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-6|PAPER Thu-P-9-B-6 — Deep Sensing of Breathing Signal During Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Sensing of Breathing Signal During Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192336.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-6|PAPER Tue-SS-5-6-6 — Zero Resource Speech Synthesis Using Transcripts Derived from Perceptual Acoustic Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Zero Resource Speech Synthesis Using Transcripts Derived from Perceptual Acoustic Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191504.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-8|PAPER Mon-P-1-E-8 — Phone Aware Nearest Neighbor Technique Using Spectral Transition Measure for Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone Aware Nearest Neighbor Technique Using Spectral Transition Measure for Non-Parallel Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192608.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-4|PAPER Tue-P-3-B-4 — Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192742.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-2|PAPER Wed-P-6-A-2 — Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193096.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-7|PAPER Wed-SS-8-6-7 — Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191800.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-2|PAPER Tue-O-5-1-2 — Evaluating Near End Listening Enhancement Algorithms in Realistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Near End Listening Enhancement Algorithms in Realistic Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191148.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-3|PAPER Thu-SS-9-6-3 — Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-8|PAPER Wed-P-8-B-8 — Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193051.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-2|PAPER Thu-P-9-D-2 — On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192162.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-5|PAPER Mon-P-2-B-5 — Cumulative Adaptation for BLSTM Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cumulative Adaptation for BLSTM Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192879.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-2|PAPER Tue-O-5-2-2 — An Analysis of Local Monotonic Attention Variants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Local Monotonic Attention Variants</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-7|PAPER Tue-P-3-B-7 — Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191728.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-6|PAPER Wed-O-6-5-6 — Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192225.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-3|PAPER Thu-O-10-1-3 — Language Modeling with Deep Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling with Deep Transformers</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191817.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-4|PAPER Thu-P-9-D-4 — Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192678.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-2|PAPER Tue-P-3-B-2 — Unbiased Semi-Supervised LF-MMI Training Using Dropout]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbiased Semi-Supervised LF-MMI Training Using Dropout</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192791.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-4|PAPER Wed-P-6-C-4 — Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191388.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-5|PAPER Mon-P-1-A-5 — LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191864.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-7|PAPER Tue-P-4-E-7 — Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191311.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-5|PAPER Tue-O-4-2-5 — Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192236.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-5|PAPER Mon-P-2-A-5 — StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-1|PAPER Thu-P-10-E-1 — A Modified Algorithm for Multiple Input Spectrogram Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Modified Algorithm for Multiple Input Spectrogram Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-6|PAPER Mon-P-1-C-6 — Conversational and Social Laughter Synthesis with WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational and Social Laughter Synthesis with WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-6|PAPER Tue-O-3-4-6 — Speech Quality Evaluation of Synthesized Japanese Speech Using EEG]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Quality Evaluation of Synthesized Japanese Speech Using EEG</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191864.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-7|PAPER Tue-P-4-E-7 — Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191855.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-1|PAPER Wed-O-6-2-1 — Audio Classification of Bit-Representation Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Classification of Bit-Representation Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-14|PAPER Mon-P-2-C-14 — Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-3|PAPER Thu-P-9-C-3 — A Neural Turn-Taking Model without RNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Neural Turn-Taking Model without RNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193038.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-7|PAPER Thu-O-10-5-7 — End-to-End Multilingual Multi-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multilingual Multi-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-4|PAPER Thu-O-9-3-4 — Vectorized Beam Search for CTC-Attention-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vectorized Beam Search for CTC-Attention-Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193267.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-1|PAPER Tue-P-4-E-1 — Direct F0 Estimation with Neural-Network-Based Regression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct F0 Estimation with Neural-Network-Based Regression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-7|PAPER Mon-S&T-1-7 — Speech-Based Web Navigation for Limited Mobility Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-Based Web Navigation for Limited Mobility Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-14|PAPER Mon-P-1-B-14 — One-Pass Single-Channel Noisy Speech Recognition Using a Combination of Noisy and Enhanced Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Pass Single-Channel Noisy Speech Recognition Using a Combination of Noisy and Enhanced Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191288.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-6|PAPER Tue-O-4-2-6 — Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191777.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-3|PAPER Wed-O-7-5-3 — Incorporating Symbolic Sequential Modeling for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Symbolic Sequential Modeling for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192271.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-3|PAPER Wed-P-8-E-3 — Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-8|PAPER Thu-P-10-C-8 — Duration Modeling with Global Phoneme-Duration Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Duration Modeling with Global Phoneme-Duration Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-10|PAPER Wed-P-8-A-10 — Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191510.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-10|PAPER Thu-P-9-A-10 — Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191508.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-13|PAPER Mon-P-1-A-13 — Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-5|PAPER Wed-O-6-1-5 — Phonological Awareness of French Rising Contours in Japanese Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Awareness of French Rising Contours in Japanese Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-5|PAPER Thu-P-9-A-5 — Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198005.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-1|PAPER Tue-S&T-2-1 — Directional Audio Rendering Using a Neural Network Based Personalized HRTF]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Directional Audio Rendering Using a Neural Network Based Personalized HRTF</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-1|PAPER Wed-P-8-A-1 — Parameter Enhancement for MELP Speech Codec in Noisy Communication Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parameter Enhancement for MELP Speech Codec in Noisy Communication Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191442.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-12|PAPER Thu-P-9-A-12 — Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191440.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-13|PAPER Thu-P-10-A-13 — Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192120.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-10|PAPER Wed-P-6-A-10 — Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-5|PAPER Thu-O-9-4-5 — Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-4|PAPER Mon-P-1-E-4 — Rare Sound Event Detection Using Deep Learning and Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rare Sound Event Detection Using Deep Learning and Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-3|PAPER Tue-O-3-3-3 — Iterative Delexicalization for Improved Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Delexicalization for Improved Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-4|PAPER Tue-O-4-3-4 — Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191736.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-10|PAPER Mon-P-1-D-10 — Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191390.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-7|PAPER Wed-P-6-B-7 —  Kite: Automatic Speech Recognition for Unmanned Aerial Vehicles]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Kite: Automatic Speech Recognition for Unmanned Aerial Vehicles</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-4|PAPER Wed-O-8-3-4 — Speech Emotion Recognition Based on Multi-Label Emotion Existence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition Based on Multi-Label Emotion Existence Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191831.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-7|PAPER Wed-P-7-C-7 — Development of Emotion Rankers Based on Intended and Perceived Emotion Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Emotion Rankers Based on Intended and Perceived Emotion Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-15|PAPER Thu-P-10-C-15 — Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191519.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-2|PAPER Wed-P-6-E-2 — Noise Adaptive Speech Enhancement Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noise Adaptive Speech Enhancement Using Domain Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192425.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-6|PAPER Wed-P-6-E-6 — Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-6|PAPER Tue-P-3-D-6 — Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191400.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-12|PAPER Thu-P-10-C-12 — A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-3|PAPER Tue-O-4-5-3 — Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192423.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-5|PAPER Thu-P-10-E-5 — CNN-LSTM Models for Multi-Speaker Source Separation Using Bayesian Hyper Parameter Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-LSTM Models for Multi-Speaker Source Separation Using Bayesian Hyper Parameter Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-1|PAPER Wed-P-7-C-1 — Cross-Corpus Speech Emotion Recognition Using Semi-Supervised Transfer Non-Negative Matrix Factorization with Adaptation Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Corpus Speech Emotion Recognition Using Semi-Supervised Transfer Non-Negative Matrix Factorization with Adaptation Regularization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-3|PAPER Thu-O-9-4-3 — Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191567.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-2|PAPER Tue-P-3-E-2 — UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-8|PAPER Wed-P-6-E-8 — Investigation of Cost Function for Supervised Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Cost Function for Supervised Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-8|PAPER Wed-P-6-C-8 — Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193214.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-1|PAPER Mon-P-1-D-1 — Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192663.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-2|PAPER Mon-P-2-A-2 — One-Shot Voice Conversion by Separating Speaker and Content Representations with Instance Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion by Separating Speaker and Content Representations with Instance Normalization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191265.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-13|PAPER Mon-P-2-A-13 — Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192048.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-5|PAPER Tue-SS-5-6-5 — Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-5|PAPER Tue-O-4-5-5 — Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-5|PAPER Tue-P-4-B-5 — Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192730.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-5|PAPER Tue-P-5-A-5 — End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191519.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-2|PAPER Wed-P-6-E-2 — Noise Adaptive Speech Enhancement Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noise Adaptive Speech Enhancement Using Domain Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191696.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-5|PAPER Thu-P-9-C-5 — Personalized Dialogue Response Generation Learned from Monologues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalized Dialogue Response Generation Learned from Monologues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-2|PAPER Tue-P-5-E-2 — A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191997.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-9|PAPER Wed-SS-7-A-9 — The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-9|PAPER Wed-P-6-A-9 — Device Feature Extractor for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Device Feature Extractor for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-11|PAPER Wed-P-8-E-11 — Semi-Supervised Audio Classification with Consistency-Based Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Audio Classification with Consistency-Based Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-7|PAPER Wed-P-8-E-7 — A Robust Framework for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Robust Framework for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-3|PAPER Mon-P-1-E-3 — Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191991.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-15|PAPER Tue-SS-4-4-15 — Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191982.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-3|PAPER Tue-O-4-1-3 — RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191722.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-2|PAPER Wed-O-6-3-2 — Adversarially Trained End-to-End Korean Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained End-to-End Korean Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192397.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-9|PAPER Thu-P-9-E-9 — End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-15|PAPER Tue-P-5-C-15 — Performance Monitoring for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Performance Monitoring for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191343.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-8|PAPER Wed-P-6-B-8 — Exploring Methods for the Automatic Detection of Errors in Manual Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Methods for the Automatic Detection of Errors in Manual Transcription</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192723.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-1|PAPER Wed-P-8-B-1 — Modulation Vectors as Robust Feature Representation for ASR in Domain Mismatched Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modulation Vectors as Robust Feature Representation for ASR in Domain Mismatched Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192397.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-9|PAPER Thu-P-9-E-9 — End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-5|PAPER Thu-P-9-A-5 — Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-8|PAPER Mon-P-1-B-8 — NIESR: Nuisance Invariant End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NIESR: Nuisance Invariant End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-2|PAPER Tue-P-3-A-2 — Selection and Training Schemes for Improving TTS Voice Built on Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Selection and Training Schemes for Improving TTS Voice Built on Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-4|PAPER Tue-O-5-4-4 — BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-7|PAPER Wed-P-8-E-7 — A Robust Framework for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Robust Framework for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-2|PAPER Wed-S&T-5-2 —  GFM-Voc: A Real-Time Voice Quality Modification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> GFM-Voc: A Real-Time Voice Quality Modification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191606.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-7|PAPER Thu-P-9-A-7 — An Effective Deep Embedding Learning Architecture for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Deep Embedding Learning Architecture for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191256.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-15|PAPER Thu-P-9-A-15 — A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193116.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-6|PAPER Mon-P-1-A-6 — Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-1|PAPER Tue-P-3-B-1 — Attention Model for Articulatory Features Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Model for Articulatory Features Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192462.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-3|PAPER Tue-SS-3-6-3 — ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191728.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-6|PAPER Wed-O-6-5-6 — Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192366.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-4|PAPER Tue-O-3-3-4 — End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192169.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-6|PAPER Thu-P-10-E-6 — Towards Joint Sound Scene and Polyphonic Sound Event Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Joint Sound Scene and Polyphonic Sound Event Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192791.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-4|PAPER Wed-P-6-C-4 — Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}}
</p></div>

{{Author Index Link Row}}
|cpborderless|k
|cpaidxlinkrowtable|k
|<$link to="AUTHOR LIST — A"><div class="cpaidxlinkrowstyle">A</div></$link>|<$link to="AUTHOR LIST — B"><div class="cpaidxlinkrowstyle">B</div></$link>|<$link to="AUTHOR LIST — C"><div class="cpaidxlinkrowstyle">C</div></$link>|<$link to="AUTHOR LIST — D"><div class="cpaidxlinkrowstyle">D</div></$link>|<$link to="AUTHOR LIST — E"><div class="cpaidxlinkrowstyle">E</div></$link>|<$link to="AUTHOR LIST — F"><div class="cpaidxlinkrowstyle">F</div></$link>|<$link to="AUTHOR LIST — G"><div class="cpaidxlinkrowstyle">G</div></$link>|<$link to="AUTHOR LIST — H"><div class="cpaidxlinkrowstyle">H</div></$link>|<$link to="AUTHOR LIST — I"><div class="cpaidxlinkrowstyle">I</div></$link>|<$link to="AUTHOR LIST — J"><div class="cpaidxlinkrowstyle">J</div></$link>|<$link to="AUTHOR LIST — K"><div class="cpaidxlinkrowstyle">K</div></$link>|<$link to="AUTHOR LIST — L"><div class="cpaidxlinkrowstyle">L</div></$link>|<$link to="AUTHOR LIST — M"><div class="cpaidxlinkrowstyle">M</div></$link>|
|<$link to="AUTHOR LIST — N"><div class="cpaidxlinkrowstyle">N</div></$link>|<$link to="AUTHOR LIST — O"><div class="cpaidxlinkrowstyle">O</div></$link>|<$link to="AUTHOR LIST — P"><div class="cpaidxlinkrowstyle">P</div></$link>|<$link to="AUTHOR LIST — Q"><div class="cpaidxlinkrowstyle">Q</div></$link>|<$link to="AUTHOR LIST — R"><div class="cpaidxlinkrowstyle">R</div></$link>|<$link to="AUTHOR LIST — S"><div class="cpaidxlinkrowstyle">S</div></$link>|<$link to="AUTHOR LIST — T"><div class="cpaidxlinkrowstyle">T</div></$link>|<$link to="AUTHOR LIST — U"><div class="cpaidxlinkrowstyle">U</div></$link>|<$link to="AUTHOR LIST — V"><div class="cpaidxlinkrowstyle">V</div></$link>|<$link to="AUTHOR LIST — W"><div class="cpaidxlinkrowstyle">W</div></$link>|<$link to="AUTHOR LIST — X"><div class="cpaidxlinkrowstyle">X</div></$link>|<$link to="AUTHOR LIST — Y"><div class="cpaidxlinkrowstyle">Y</div></$link>|<$link to="AUTHOR LIST — Z"><div class="cpaidxlinkrowstyle">Z</div></$link>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-3|PAPER Thu-P-9-B-3 — “Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Althingi ASR System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191790.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-1|PAPER Thu-O-9-3-1 — Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-4|PAPER Mon-O-2-4-4 — Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-10|PAPER Tue-P-3-C-10 — Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191658.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-4|PAPER Thu-O-10-2-4 — Robust Speech Emotion Recognition Under Different Encoding Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Speech Emotion Recognition Under Different Encoding Conditions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-6|PAPER Thu-O-10-4-6 — Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-6|PAPER Thu-O-10-4-6 — Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-10|PAPER Thu-P-10-D-10 — Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-5|PAPER Tue-P-3-D-5 — “ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-5|PAPER Mon-P-2-D-5 — Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-4|PAPER Mon-O-2-4-4 — Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-6|PAPER Thu-O-10-4-6 — Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-4|PAPER Wed-O-6-1-4 — The Role of Voice Quality in the Perception of Prominence in Synthetic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Voice Quality in the Perception of Prominence in Synthetic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192772.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-4|PAPER Tue-P-5-B-4 — Recognition of Latin American Spanish Using Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Latin American Spanish Using Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192607.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-5|PAPER Wed-O-8-4-5 — Effects of Waveform PMF on Anti-Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Waveform PMF on Anti-Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-6|PAPER Tue-O-3-4-6 — Speech Quality Evaluation of Synthesized Japanese Speech Using EEG]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Quality Evaluation of Synthesized Japanese Speech Using EEG</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-2|PAPER Mon-P-1-C-2 — Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-7|PAPER Tue-P-4-C-7 — Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191997.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-9|PAPER Wed-SS-7-A-9 — The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-6|PAPER Wed-P-7-B-6 — Keyword Spotting for Hearing Assistive Devices Robust to External Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Keyword Spotting for Hearing Assistive Devices Robust to External Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191943.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-11|PAPER Mon-P-1-A-11 — Joint Speech Recognition and Speaker Diarization via Sequence Transduction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speech Recognition and Speaker Diarization via Sequence Transduction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191715.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-7|PAPER Mon-P-1-E-7 — Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191405.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-12|PAPER Mon-P-1-C-12 — Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192080.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-10|PAPER Wed-P-6-C-10 — Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-5|PAPER Thu-O-9-3-5 — Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191541.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-8|PAPER Tue-P-5-E-8 — ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-1|PAPER Mon-O-2-5-1 — VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-1|PAPER Mon-O-2-5-1 — VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192512.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-4|PAPER Wed-O-8-4-4 — A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-9|PAPER Mon-P-2-A-9 — Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193146.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-1|PAPER Tue-O-3-2-1 — Deep Speaker Recognition: Modular or Monolithic?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Recognition: Modular or Monolithic?</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192956.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-1|PAPER Wed-P-6-A-1 — Blind Channel Response Estimation for Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Channel Response Estimation for Replay Attack Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-11|PAPER Wed-P-6-B-11 — CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192974.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-2|PAPER Thu-P-9-A-2 — Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192549.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-5|PAPER Tue-O-3-5-5 — Unsupervised Training of Neural Mask-Based Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Training of Neural Mask-Based Beamforming</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192335.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-7|PAPER Tue-P-5-A-7 — Unified Language-Independent DNN-Based G2P Converter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Language-Independent DNN-Based G2P Converter</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198013.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-1|PAPER Wed-S&T-5-1 — Web-Based Speech Synthesis Editor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Web-Based Speech Synthesis Editor</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192599.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-5|PAPER Mon-O-1-1-5 — Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191473.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-1|PAPER Mon-O-1-4-1 — An Unsupervised Autoregressive Model for Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Autoregressive Model for Speech Representation Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191718.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-5|PAPER Mon-O-2-3-5 — Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-3|PAPER Mon-P-1-A-3 — MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191736.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-10|PAPER Mon-P-1-D-10 — Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191262.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-13|PAPER Mon-P-2-C-13 — A Comparison of Deep Learning Methods for Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Deep Learning Methods for Language Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192731.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-4|PAPER Wed-O-6-2-4 — A Deep Residual Network for Large-Scale Acoustic Scene Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Residual Network for Large-Scale Acoustic Scene Analysis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192653.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-2|PAPER Wed-O-7-4-2 — Multiple Sound Source Localization with SVD-PHAT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiple Sound Source Localization with SVD-PHAT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-6|PAPER Wed-O-8-5-6 — VoiceID Loss: Speech Enhancement for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceID Loss: Speech Enhancement for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-10|PAPER Wed-P-7-B-10 — Transfer Learning from Audio-Visual Grounding to Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning from Audio-Visual Grounding to Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192224.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-2|PAPER Mon-O-2-3-2 — Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192813.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-1|PAPER Mon-P-1-A-1 — Bayesian HMM Based x-Vector Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian HMM Based x-Vector Clustering for Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-11|PAPER Tue-P-5-C-11 — Lattice Generation in Attention-Based Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Generation in Attention-Based Speech Recognition Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192720.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-3|PAPER Thu-P-10-B-3 — Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-5|PAPER Mon-O-2-4-5 — PASCAL and DPA: A Pilot Study on Using Prosodic Competence Scores to Predict Communicative Skills for Team Working and Public Speaking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PASCAL and DPA: A Pilot Study on Using Prosodic Competence Scores to Predict Communicative Skills for Team Working and Public Speaking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-6|PAPER Mon-O-2-4-6 — Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192702.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-2|PAPER Mon-O-1-1-2 — Very Deep Self-Attention Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Very Deep Self-Attention Networks for End-to-End Speech Recognition</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-1|PAPER Tue-O-3-1-1 — Survey Talk: A Survey on Speech Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: A Survey on Speech Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191861.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-1|PAPER Mon-P-2-E-1 — Salient Speech Representations Based on Cloned Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Salient Speech Representations Based on Cloned Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191255.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-4|PAPER Wed-P-8-A-4 — A Real-Time Wideband Neural Vocoder at 1.6kb/s Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Wideband Neural Vocoder at 1.6kb/s Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198011.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-3|PAPER Wed-S&T-3-3 — Multimodal Dialog with the MALACH Audiovisual Archive]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Dialog with the MALACH Audiovisual Archive</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191385.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-4|PAPER Tue-SS-3-6-4 — UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192082.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-8|PAPER Tue-P-4-D-8 — Perceptual Evaluation of Early versus Late F0 Peaks in the Intonation Structure of Czech Question-Word Questions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Evaluation of Early versus Late F0 Peaks in the Intonation Structure of Czech Question-Word Questions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191812.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-9|PAPER Mon-P-2-D-9 — Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198015.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-2|PAPER Wed-S&T-4-2 — A User-Friendly and Adaptable Re-Implementation of an Acoustic Prominence Detection and Annotation Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A User-Friendly and Adaptable Re-Implementation of an Acoustic Prominence Detection and Annotation Tool</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191703.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-4|PAPER Thu-SS-9-6-4 — Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192910.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-6|PAPER Wed-SS-8-6-6 — Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-1|PAPER Thu-S&T-6-1 — Elpis, an Accessible Speech-to-Text Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Elpis, an Accessible Speech-to-Text Tool</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-7|PAPER Wed-P-6-A-7 — An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191806.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-11|PAPER Tue-P-3-D-11 — An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191824.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-8|PAPER Tue-P-3-A-8 — Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-4|PAPER Tue-P-5-A-4 — Analysis of Pronunciation Learning in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Pronunciation Learning in End-to-End Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-3|PAPER Thu-P-9-B-3 — “Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192347.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-8|PAPER Wed-P-8-C-8 — Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191444.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-11|PAPER Thu-P-9-A-11 — Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192616.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-2|PAPER Thu-P-10-A-2 — Self Multi-Head Attention for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self Multi-Head Attention for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191839.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-8|PAPER Mon-P-1-D-8 — EpaDB: A Database for Development of Pronunciation Assessment Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EpaDB: A Database for Development of Pronunciation Assessment Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-5|PAPER Wed-SS-8-6-5 — Analysis and Synthesis of Vocal Flutter and Vocal Jitter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis and Synthesis of Vocal Flutter and Vocal Jitter</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192607.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-5|PAPER Wed-O-8-4-5 — Effects of Waveform PMF on Anti-Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Waveform PMF on Anti-Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191255.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-4|PAPER Wed-P-8-A-4 — A Real-Time Wideband Neural Vocoder at 1.6kb/s Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Real-Time Wideband Neural Vocoder at 1.6kb/s Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191877.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-5|PAPER Wed-P-8-B-5 — Binary Speech Features for Keyword Spotting Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Binary Speech Features for Keyword Spotting Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191975.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-9|PAPER Tue-P-3-B-9 — Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-3|PAPER Mon-P-1-E-3 — Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191991.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-15|PAPER Tue-SS-4-4-15 — Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191982.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-3|PAPER Tue-O-4-1-3 — RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192210.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-5|PAPER Wed-P-6-D-5 — Perceiving Older Adults Producing Clear and Lombard Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceiving Older Adults Producing Clear and Lombard Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191795.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-12|PAPER Tue-P-5-D-12 — The Different Roles of Expectations in Phonetic and Lexical Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Different Roles of Expectations in Phonetic and Lexical Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-2|PAPER Tue-O-5-3-2 — Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192108.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-7|PAPER Wed-P-6-E-7 — Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191383.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-11|PAPER Mon-P-2-C-11 — Meta Learning for Hyperparameter Optimization in Dialogue System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Learning for Hyperparameter Optimization in Dialogue System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191548.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-2|PAPER Tue-O-4-3-2 — Self Attention in Variational Sequential Learning for Summarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self Attention in Variational Sequential Learning for Summarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-4|PAPER Thu-P-10-A-4 — Variational Domain Adversarial Learning for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Domain Adversarial Learning for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192044.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-3|PAPER Mon-O-2-1-3 — Attentive to Individual: A Multimodal Emotion Recognition Network with Personalized Attention Profile]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attentive to Individual: A Multimodal Emotion Recognition Network with Personalized Attention Profile</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-8|PAPER Wed-P-6-C-8 — Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193210.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-6|PAPER Thu-O-10-3-6 — The Vowel System of Korebaju]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Vowel System of Korebaju</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192684.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-6|PAPER Tue-P-4-D-6 — Testing the Distinctiveness of Intonational Tunes: Evidence from Imitative Productions in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Testing the Distinctiveness of Intonational Tunes: Evidence from Imitative Productions in American English</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-6|PAPER Tue-P-5-D-6 — Perception of Pitch Contours in Speech and Nonspeech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perception of Pitch Contours in Speech and Nonspeech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-9|PAPER Tue-SS-4-4-9 — Speech Replay Detection with x-Vector Attack Embeddings and Spectral Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Replay Detection with x-Vector Attack Embeddings and Spectral Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-6|PAPER Thu-O-10-2-6 — Disentangling Style Factors from Speaker Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disentangling Style Factors from Speaker Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-6|PAPER Mon-O-2-5-6 — How to Annotate 100 Hours in 45 Minutes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How to Annotate 100 Hours in 45 Minutes</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191553.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-7|PAPER Thu-P-9-D-7 — Spot the Pleasant People! Navigating the Cocktail Party Buzz]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Pleasant People! Navigating the Cocktail Party Buzz</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192244.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-3|PAPER Mon-O-1-2-3 — Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-7|PAPER Thu-P-10-B-7 — Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191492.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-9|PAPER Tue-P-5-E-9 — Analyzing Intra-Speaker and Inter-Speaker Vocal Tract Impedance Characteristics in a Low-Dimensional Feature Space Using t-SNE]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Intra-Speaker and Inter-Speaker Vocal Tract Impedance Characteristics in a Low-Dimensional Feature Space Using t-SNE</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-6|PAPER Wed-P-6-B-6 — The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192432.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-7|PAPER Thu-P-10-D-7 — Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-3|PAPER Tue-O-4-5-3 — Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192423.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-5|PAPER Thu-P-10-E-5 — CNN-LSTM Models for Multi-Speaker Source Separation Using Bayesian Hyper Parameter Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-LSTM Models for Multi-Speaker Source Separation Using Bayesian Hyper Parameter Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191332.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-1|PAPER Wed-P-8-C-1 — Reverse Transfer Learning: Can Word Embeddings Trained for Different NLP Tasks Improve Neural Language Models?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reverse Transfer Learning: Can Word Embeddings Trained for Different NLP Tasks Improve Neural Language Models?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192929.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-4|PAPER Thu-P-9-E-4 — Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191625.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-6|PAPER Thu-P-9-E-6 — Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-3|PAPER Tue-O-5-1-3 — Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-6|PAPER Wed-P-7-B-6 — Keyword Spotting for Hearing Assistive Devices Robust to External Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Keyword Spotting for Hearing Assistive Devices Robust to External Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192719.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-6|PAPER Thu-O-9-2-6 — Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191794.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-1|PAPER Tue-SS-4-4-1 — ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-12|PAPER Wed-P-6-A-12 — Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192397.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-9|PAPER Thu-P-9-E-9 — End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192295.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-3|PAPER Mon-P-2-E-3 — Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192127.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-4|PAPER Mon-P-1-B-4 — Full-Sentence Correlation: A Method to Handle Unpredictable Noise for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Full-Sentence Correlation: A Method to Handle Unpredictable Noise for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192872.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-1|PAPER Thu-P-9-B-1 — An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-5|PAPER Mon-O-2-2-5 — Large Margin Training for Attention Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Training for Attention Based End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-4|PAPER Thu-P-10-B-4 — An Online Attention-Based Model for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Online Attention-Based Model for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-3|PAPER Wed-O-8-5-3 — Large Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Softmax Loss for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191614.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-11|PAPER Tue-P-3-A-11 — A Chinese Dataset for Identifying Speakers in Novels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Chinese Dataset for Identifying Speakers in Novels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-1|PAPER Thu-P-9-D-1 — On the Role of Style in Parsing Speech with Neural Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Style in Parsing Speech with Neural Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192961.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-7|PAPER Mon-P-1-A-7 — Multi-PLDA Diarization on Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-PLDA Diarization on Children’s Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-9|PAPER Thu-P-10-E-9 — Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-6|PAPER Wed-O-8-4-6 — Nonparallel Emotional Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191577.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-11|PAPER Tue-P-4-C-11 — Conversational Emotion Analysis via Attention Mechanisms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Analysis via Attention Mechanisms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-1|PAPER Thu-O-9-4-1 — Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-4|PAPER Tue-P-4-E-4 — Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191541.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-8|PAPER Tue-P-5-E-8 — ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192792.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-5|PAPER Wed-O-7-5-5 — Coarse-to-Fine Optimization for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Coarse-to-Fine Optimization for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192228.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-6|PAPER Tue-O-4-3-6 — Latent Topic Attention for Domain Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Topic Attention for Domain Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191704.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-5|PAPER Wed-O-8-5-5 — Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-3|PAPER Mon-O-1-3-3 — Individual Difference of Relative Tongue Size and its Acoustic Effects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Difference of Relative Tongue Size and its Acoustic Effects</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191554.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-4|PAPER Thu-O-9-2-4 — Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192203.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-5|PAPER Thu-P-10-B-5 — Self-Attention Transducers for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Transducers for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191940.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-7|PAPER Thu-P-10-E-7 — Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191577.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-11|PAPER Tue-P-4-C-11 — Conversational Emotion Analysis via Attention Mechanisms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Analysis via Attention Mechanisms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191554.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-4|PAPER Thu-O-9-2-4 — Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-1|PAPER Thu-O-9-4-1 — Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192203.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-5|PAPER Thu-P-10-B-5 — Self-Attention Transducers for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Transducers for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191617.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-12|PAPER Thu-P-10-D-12 — Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191940.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-7|PAPER Thu-P-10-E-7 — Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191701.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-4|PAPER Thu-O-10-4-4 — CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-8|PAPER Tue-P-5-D-8 — Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-7|PAPER Tue-P-5-C-7 — Joint Decoding of CTC Based Systems for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Decoding of CTC Based Systems for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191418.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-11|PAPER Thu-P-10-C-11 — Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192320.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-4|PAPER Thu-O-9-5-4 — Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191887.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-10|PAPER Tue-SS-4-4-10 — Long Range Acoustic Features for Spoofed Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Long Range Acoustic Features for Spoofed Speech Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-9|PAPER Wed-P-6-A-9 — Device Feature Extractor for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Device Feature Extractor for Replay Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-4|PAPER Tue-P-4-E-4 — Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192872.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-1|PAPER Thu-P-9-B-1 — An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-4|PAPER Tue-P-5-E-4 — DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193201.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-5|PAPER Mon-SS-2-6-5 — Fusion Techniques for Utterance-Level Emotion Recognition Combining Speech and Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Techniques for Utterance-Level Emotion Recognition Combining Speech and Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192228.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-6|PAPER Tue-O-4-3-6 — Latent Topic Attention for Domain Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Topic Attention for Domain Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198013.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-1|PAPER Wed-S&T-5-1 — Web-Based Speech Synthesis Editor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Web-Based Speech Synthesis Editor</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198009.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-2|PAPER Thu-S&T-6-2 — Framework for Conducting Tasks Requiring Human Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framework for Conducting Tasks Requiring Human Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191407.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-10|PAPER Mon-P-1-E-10 — An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-8|PAPER Thu-P-10-C-8 — Duration Modeling with Global Phoneme-Duration Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Duration Modeling with Global Phoneme-Duration Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-1|PAPER Wed-P-6-D-1 — Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191973.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-4|PAPER Wed-O-8-2-4 — Multi-Stride Self-Attention for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stride Self-Attention for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192076.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-4|PAPER Tue-O-4-5-4 — Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-9|PAPER Thu-P-10-E-9 — Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-6|PAPER Tue-P-3-C-6 — Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191683.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-11|PAPER Tue-P-3-C-11 — Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-4|PAPER Thu-O-9-4-4 — Multi-Scale Time-Frequency Attention for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Time-Frequency Attention for Acoustic Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-6|PAPER Tue-P-3-C-6 — Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-3|PAPER Wed-O-7-1-3 — Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-13|PAPER Wed-P-8-D-13 — The Production of Chinese Affricates /ts/ and /ts^^h^^/ by Native Urdu Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Production of Chinese Affricates /ts/ and /ts^^h^^/ by Native Urdu Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-4|PAPER Thu-O-9-4-4 — Multi-Scale Time-Frequency Attention for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Time-Frequency Attention for Acoustic Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191343.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-8|PAPER Wed-P-6-B-8 — Exploring Methods for the Automatic Detection of Errors in Manual Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Methods for the Automatic Detection of Errors in Manual Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-4|PAPER Mon-O-2-2-4 — Speaker Adaptation for Attention-Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Attention-Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192971.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-3|PAPER Tue-O-5-2-3 — Layer Trajectory BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Layer Trajectory BLSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193056.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-14|PAPER Tue-P-5-C-14 — Acoustic-to-Phrase Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-to-Phrase Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-1|PAPER Wed-P-7-C-1 — Cross-Corpus Speech Emotion Recognition Using Semi-Supervised Transfer Non-Negative Matrix Factorization with Adaptation Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Corpus Speech Emotion Recognition Using Semi-Supervised Transfer Non-Negative Matrix Factorization with Adaptation Regularization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-3|PAPER Thu-O-9-4-3 — Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-5|PAPER Thu-O-9-4-5 — Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191691.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-3|PAPER Tue-O-5-4-3 — Multi-Lingual Dialogue Act Recognition with Deep Learning Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Lingual Dialogue Act Recognition with Deep Learning Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-5|PAPER Tue-O-3-4-5 — The Neural Correlates Underlying Lexically-Guided Perceptual Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Neural Correlates Underlying Lexically-Guided Perceptual Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-7|PAPER Mon-P-2-E-7 — On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-6|PAPER Tue-P-3-B-6 — Lattice-Based Lightly-Supervised Acoustic Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Based Lightly-Supervised Acoustic Model Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191553.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-7|PAPER Thu-P-9-D-7 — Spot the Pleasant People! Navigating the Cocktail Party Buzz]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Pleasant People! Navigating the Cocktail Party Buzz</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-3|PAPER Wed-S&T-5-3 — Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-2|PAPER Thu-P-10-C-2 — Spontaneous Conversational Speech Synthesis from Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spontaneous Conversational Speech Synthesis from Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-4|PAPER Mon-O-1-4-4 — Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-3|PAPER Tue-P-3-E-3 — Towards Generalized Speech Enhancement with Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Generalized Speech Enhancement with Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-9|PAPER Tue-SS-4-4-9 — Speech Replay Detection with x-Vector Attack Embeddings and Spectral Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Replay Detection with x-Vector Attack Embeddings and Spectral Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-7|PAPER Mon-SS-1-6-7 — Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192974.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-2|PAPER Thu-P-9-A-2 — Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192772.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-4|PAPER Tue-P-5-B-4 — Recognition of Latin American Spanish Using Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Latin American Spanish Using Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-1|PAPER Mon-P-2-C-1 — Mitigating Noisy Inputs for Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Noisy Inputs for Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192813.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-1|PAPER Mon-P-1-A-1 — Bayesian HMM Based x-Vector Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian HMM Based x-Vector Clustering for Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-1|PAPER Wed-O-8-5-1 — Self-Supervised Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Speaker Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191674.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-11|PAPER Wed-P-8-D-11 — No Distributional Learning in Adults from Attended Listening to Non-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">No Distributional Learning in Adults from Attended Listening to Non-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193074.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-5|PAPER Wed-O-6-2-5 — Supervised Classifiers for Audio Impairments with Noisy Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Classifiers for Audio Impairments with Noisy Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-4|PAPER Wed-O-7-5-4 — Maximum a posteriori Speech Enhancement Based on Double Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Maximum a posteriori Speech Enhancement Based on Double Spectrum</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-10|PAPER Tue-P-3-C-10 — Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193075.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-1|PAPER Tue-O-3-3-1 — Neural Transition Systems for Modeling Hierarchical Semantic Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transition Systems for Modeling Hierarchical Semantic Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-11|PAPER Mon-P-1-E-11 — Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-8|PAPER Wed-P-7-C-8 — Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-15|PAPER Mon-P-1-A-15 — Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-2|PAPER Tue-P-5-E-2 — A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191852.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-8|PAPER Wed-P-6-D-8 — Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192983.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-1|PAPER Thu-P-9-A-1 — Adversarial Regularization for End-to-End Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Regularization for End-to-End Robust Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191850.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-5|PAPER Thu-P-9-E-5 — Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191827.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-8|PAPER Thu-P-10-E-8 — Probabilistic Permutation Invariant Training for Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Probabilistic Permutation Invariant Training for Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191708.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-5|PAPER Wed-O-8-3-5 — Gender De-Biasing in Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gender De-Biasing in Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-8|PAPER Wed-P-8-B-8 — Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193038.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-7|PAPER Thu-O-10-5-7 — End-to-End Multilingual Multi-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multilingual Multi-Speaker Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192763.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-2|PAPER Wed-O-7-1-2 — The Effects of Time Expansion on English as a Second Language Individuals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effects of Time Expansion on English as a Second Language Individuals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192378.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-6|PAPER Mon-P-1-D-6 — Automatic Lyric Transcription from Karaoke Vocal Tracks: Resources and a Baseline System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Lyric Transcription from Karaoke Vocal Tracks: Resources and a Baseline System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191326.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-10|PAPER Tue-P-4-D-10 — F0 Variability Measures Based on Glottal Closure Instants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Variability Measures Based on Glottal Closure Instants</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Althingi ASR System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191790.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-1|PAPER Thu-O-9-3-1 — Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192367.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-6|PAPER Thu-P-10-C-6 — Bootstrapping a Text Normalization System for an Inflected Language. Numbers as a Test Case]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bootstrapping a Text Normalization System for an Inflected Language. Numbers as a Test Case</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-3|PAPER Wed-S&T-5-3 — Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-2|PAPER Thu-P-10-C-2 — Spontaneous Conversational Speech Synthesis from Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spontaneous Conversational Speech Synthesis from Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191775.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-4|PAPER Mon-O-2-3-4 — Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191781.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-10|PAPER Tue-P-5-A-10 — Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192571.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-3|PAPER Thu-P-10-C-3 — Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192190.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-5|PAPER Tue-P-3-A-5 — Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192115.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-3|PAPER Mon-O-2-4-3 — Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192894.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-8|PAPER Wed-SS-7-A-8 — Intel Far-Field Speaker Recognition System for VOiCES Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intel Far-Field Speaker Recognition System for VOiCES Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-4|PAPER Mon-O-1-1-4 — Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193038.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-7|PAPER Thu-O-10-5-7 — End-to-End Multilingual Multi-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multilingual Multi-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-4|PAPER Thu-O-9-3-4 — Vectorized Beam Search for CTC-Attention-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vectorized Beam Search for CTC-Attention-Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-7|PAPER Wed-P-6-C-7 — Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-1|PAPER Wed-P-6-E-1 — Monaural Speech Enhancement with Dilated Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monaural Speech Enhancement with Dilated Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-2|PAPER Wed-P-8-A-2 — Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193116.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-6|PAPER Mon-P-1-A-6 — Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193114.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-11|PAPER Thu-P-9-E-11 — My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191356.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-14|PAPER Thu-P-9-A-14 — Joint Optimization of Neural Acoustic Beamforming and Dereverberation with x-Vectors for Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Optimization of Neural Acoustic Beamforming and Dereverberation with x-Vectors for Robust Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191356.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-14|PAPER Thu-P-9-A-14 — Joint Optimization of Neural Acoustic Beamforming and Dereverberation with x-Vectors for Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Optimization of Neural Acoustic Beamforming and Dereverberation with x-Vectors for Robust Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-1|PAPER Mon-P-2-C-1 — Mitigating Noisy Inputs for Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Noisy Inputs for Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192967.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-1|PAPER Mon-P-1-E-1 — Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192911.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-2|PAPER Thu-P-10-D-2 — Profiling Speech Motor Impairments in Persons with Amyotrophic Lateral Sclerosis: An Acoustic-Based Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Profiling Speech Motor Impairments in Persons with Amyotrophic Lateral Sclerosis: An Acoustic-Based Approach</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-11|PAPER Thu-P-10-E-11 — End-to-End Music Source Separation: Is it Possible in the Waveform Domain?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Music Source Separation: Is it Possible in the Waveform Domain?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-3|PAPER Thu-P-9-B-3 — “Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Progressive Speech Enhancement with Residual Connections</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-12|PAPER Tue-SS-4-4-12 — A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192217.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-9|PAPER Wed-P-6-C-9 — Assessing Parkinson’s Disease from Speech Using Fisher Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Parkinson’s Disease from Speech Using Fisher Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191735.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-6|PAPER Mon-P-1-E-6 — Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191749.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-5|PAPER Thu-P-9-D-5 — SpeechYOLO: Detection and Localization of Speech Objects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechYOLO: Detection and Localization of Speech Objects</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192663.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-2|PAPER Mon-P-2-A-2 — One-Shot Voice Conversion by Separating Speaker and Content Representations with Instance Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion by Separating Speaker and Content Representations with Instance Normalization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191982.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-3|PAPER Tue-O-4-1-3 — RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192244.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-3|PAPER Mon-O-1-2-3 — Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191603.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-4|PAPER Mon-O-2-1-4 — A Saliency-Based Attention LSTM Model for Cognitive Load Classification from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Saliency-Based Attention LSTM Model for Cognitive Load Classification from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191405.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-12|PAPER Mon-P-1-C-12 — Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192217.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-9|PAPER Wed-P-6-C-9 — Assessing Parkinson’s Disease from Speech Using Fisher Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Parkinson’s Disease from Speech Using Fisher Vectors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198021.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-3|PAPER Wed-S&T-4-3 — PyToBI: A Toolkit for ToBI Labeling Under Python]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PyToBI: A Toolkit for ToBI Labeling Under Python</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191305.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-1|PAPER Wed-O-7-2-1 — Neural Named Entity Recognition from Subword Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Named Entity Recognition from Subword Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Althingi ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191722.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-2|PAPER Wed-O-6-3-2 — Adversarially Trained End-to-End Korean Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained End-to-End Korean Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193113.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-1|PAPER Mon-P-1-C-1 — Predicting Humor by Learning from Time-Aligned Comments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Humor by Learning from Time-Aligned Comments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191349.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-10|PAPER Mon-P-1-C-10 — Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193119.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-4|PAPER Wed-O-7-2-4 — Linguistically-Informed Training of Acoustic Word Embeddings for Low-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linguistically-Informed Training of Acoustic Word Embeddings for Low-Resource Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-1|PAPER Thu-O-10-5-1 — Improving Code-Switched Language Modeling Performance Using Cognate Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Code-Switched Language Modeling Performance Using Cognate Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-4|PAPER Mon-O-1-5-4 — Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192283.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-5|PAPER Tue-O-5-3-5 — Detecting Depression with Word-Level Multimodal Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Depression with Word-Level Multimodal Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193099.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-3|PAPER Mon-O-1-5-3 — Expediting TTS Synthesis with Adversarial Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expediting TTS Synthesis with Adversarial Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-1|PAPER Tue-P-4-D-1 — The Effect of Phoneme Distribution on Perceptual Similarity in English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effect of Phoneme Distribution on Perceptual Similarity in English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192743.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Explaining Sentiment Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Explaining Sentiment Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192361.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-6|PAPER Wed-P-6-A-6 — An Adaptive-Q Cochlear Model for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive-Q Cochlear Model for Replay Spoofing Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191535.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-13|PAPER Wed-P-6-A-13 — Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193252.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-1|PAPER Thu-O-10-2-1 — Direct Modelling of Speech Emotion from Raw Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Modelling of Speech Emotion from Raw Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192251.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-10|PAPER Tue-P-5-D-10 — A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192872.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-1|PAPER Thu-P-9-B-1 — An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192264.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-4|PAPER Tue-O-3-2-4 — Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191746.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-6|PAPER Tue-O-3-2-6 — Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-6|PAPER Tue-P-3-E-6 — KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192511.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-9|PAPER Wed-P-8-A-9 — A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192171.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-4|PAPER Wed-P-8-E-4 — A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-8|PAPER Thu-P-9-D-8 — Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191597.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-9|PAPER Mon-P-1-B-9 — Knowledge Distillation for Throat Microphone Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for Throat Microphone Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192967.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-1|PAPER Mon-P-1-E-1 — Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193109.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-2|PAPER Mon-P-2-D-2 — Towards a Speaker Independent Speech-BCI Using Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Speaker Independent Speech-BCI Using Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193105.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-1|PAPER Tue-O-3-4-1 — Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198005.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-1|PAPER Tue-S&T-2-1 — Directional Audio Rendering Using a Neural Network Based Personalized HRTF]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Directional Audio Rendering Using a Neural Network Based Personalized HRTF</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191722.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-2|PAPER Wed-O-6-3-2 — Adversarially Trained End-to-End Korean Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained End-to-End Korean Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192908.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-2|PAPER Thu-P-9-E-2 — Deep Multitask Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Multitask Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-7|PAPER Wed-P-6-A-7 — An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-4|PAPER Mon-P-1-C-4 — Deep Learning Based Mandarin Accent Identification for Accent Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Mandarin Accent Identification for Accent Robust ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-8|PAPER Mon-P-2-A-8 — GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-4|PAPER Tue-O-4-2-4 — Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191311.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-5|PAPER Tue-O-4-2-5 — Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-10|PAPER Wed-P-7-C-10 — Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-4|PAPER Wed-O-7-1-4 — Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192984.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-2|PAPER Tue-P-4-D-2 — Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-3|PAPER Wed-O-6-1-3 — Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-4|PAPER Thu-S&T-6-4 — The SAIL LABS Media Mining Indexer and the CAVA Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SAIL LABS Media Mining Indexer and the CAVA Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191557.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-8|PAPER Mon-P-1-C-8 — Towards an Annotation Scheme for Complex Laughter in Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an Annotation Scheme for Complex Laughter in Speech Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-7|PAPER Mon-S&T-1-7 — Speech-Based Web Navigation for Limited Mobility Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-Based Web Navigation for Limited Mobility Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192443.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-3|PAPER Tue-O-3-4-3 — Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192611.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-7|PAPER Tue-P-5-D-7 — Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-3|PAPER Mon-O-1-4-3 — Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-2|PAPER Wed-O-7-2-2 — Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-7|PAPER Tue-SS-4-4-7 — IIIT-H Spoofing Countermeasures for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IIIT-H Spoofing Countermeasures for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191997.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-9|PAPER Wed-SS-7-A-9 — The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-11|PAPER Wed-P-8-E-11 — Semi-Supervised Audio Classification with Consistency-Based Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Audio Classification with Consistency-Based Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192182.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-5|PAPER Wed-P-7-B-5 — Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192076.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-4|PAPER Tue-O-4-5-4 — Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-5|PAPER Tue-O-3-2-5 — Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-7|PAPER Tue-P-5-C-7 — Joint Decoding of CTC Based Systems for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Decoding of CTC Based Systems for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192120.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-10|PAPER Wed-P-6-A-10 — Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-2|PAPER Wed-P-8-A-2 — Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-11|PAPER Tue-SS-4-4-11 — Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-11|PAPER Wed-P-8-E-11 — Semi-Supervised Audio Classification with Consistency-Based Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Audio Classification with Consistency-Based Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-5|PAPER Tue-O-3-4-5 — The Neural Correlates Underlying Lexically-Guided Perceptual Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Neural Correlates Underlying Lexically-Guided Perceptual Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-9|PAPER Tue-P-5-C-9 — Active Learning Methods for Low Resource End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning Methods for Low Resource End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-5|PAPER Mon-O-1-4-5 — Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191959.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-7|PAPER Tue-P-5-B-7 — Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198030.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-5|PAPER Tue-S&T-2-5 — FarSpeech: Arabic Natural Language Processing for Live Arabic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FarSpeech: Arabic Natural Language Processing for Live Arabic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193051.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-2|PAPER Thu-P-9-D-2 — On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191736.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-10|PAPER Mon-P-1-D-10 — Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192336.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-6|PAPER Tue-SS-5-6-6 — Zero Resource Speech Synthesis Using Transcripts Derived from Perceptual Acoustic Units]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Zero Resource Speech Synthesis Using Transcripts Derived from Perceptual Acoustic Units</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-7|PAPER Tue-S&T-2-7 —  NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191952.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-10|PAPER Tue-P-3-B-10 — Guiding CTC Posterior Spike Timings for Improved Posterior Fusion and Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guiding CTC Posterior Spike Timings for Improved Posterior Fusion and Knowledge Distillation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-14|PAPER Tue-P-3-B-14 — Multi-Task CTC Training with Auxiliary Feature Reconstruction for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task CTC Training with Auxiliary Feature Reconstruction for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-2|PAPER Wed-O-6-5-2 — Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192793.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-2|PAPER Wed-P-6-B-2 — Detection and Recovery of OOVs for Improved English Broadcast News Captioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection and Recovery of OOVs for Improved English Broadcast News Captioning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-1|PAPER Thu-O-9-2-1 — Advancing Sequence-to-Sequence Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Sequence-to-Sequence Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-5|PAPER Mon-P-2-D-5 — Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193186.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-1|PAPER Tue-P-4-B-1 — A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Path Signature Approach for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192422.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-14|PAPER Tue-P-5-D-14 — End-to-End Convolutional Sequence Learning for ASL Fingerspelling Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Convolutional Sequence Learning for ASL Fingerspelling Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191281.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-11|PAPER Wed-P-6-D-11 — R²SPIN: Re-Recording the Revised Speech Perception in Noise Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R²SPIN: Re-Recording the Revised Speech Perception in Noise Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198028.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-5|PAPER Wed-S&T-4-5 — SLP-AA: Tools for Sign Language Phonetic and Phonological Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SLP-AA: Tools for Sign Language Phonetic and Phonological Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-4|PAPER Tue-O-5-3-4 — Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191750.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-9|PAPER Mon-P-1-D-9 — Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193043.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-6|PAPER Wed-P-8-A-6 — Speech Audio Super-Resolution for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Audio Super-Resolution for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193019.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-1|PAPER Wed-P-8-E-1 — Multi-Stream Network with Temporal Attention for Environmental Sound Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stream Network with Temporal Attention for Environmental Sound Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-7|PAPER Thu-P-9-C-7 — Turn-Taking Prediction Based on Detection of Transition Relevance Place]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Turn-Taking Prediction Based on Detection of Transition Relevance Place</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192931.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-3|PAPER Tue-P-5-D-3 — Effects of Natural Variability in Cross-Modal Temporal Correlations on Audiovisual Speech Recognition Benefit]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Natural Variability in Cross-Modal Temporal Correlations on Audiovisual Speech Recognition Benefit</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-5|PAPER Mon-O-1-5-5 — Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192307.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-4|PAPER Mon-P-2-A-4 — Non-Parallel Voice Conversion with Cyclic Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Cyclic Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-6|PAPER Mon-P-2-A-6 — Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-6|PAPER Wed-P-8-E-6 — Sound Event Detection in Multichannel Audio Using Convolutional Time-Frequency-Channel Squeeze and Excitation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Detection in Multichannel Audio Using Convolutional Time-Frequency-Channel Squeeze and Excitation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192225.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-3|PAPER Thu-O-10-1-3 — Language Modeling with Deep Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling with Deep Transformers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191376.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-4|PAPER Mon-O-1-3-4 — Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-6|PAPER Mon-P-2-A-6 — Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-8|PAPER Tue-P-5-B-8 — Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191495.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-11|PAPER Mon-P-1-B-11 — Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192651.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-3|PAPER Thu-P-9-E-3 — Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-3|PAPER Wed-O-6-2-3 — Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-5|PAPER Wed-P-8-E-5 — Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-8|PAPER Tue-P-4-B-8 — Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-3|PAPER Thu-P-9-D-3 — Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Mon-K-1|PAPER Mon-K-1 — Statistical Approach to Speech Synthesis: Past, Present and Future]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Statistical Approach to Speech Synthesis: Past, Present and Future</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-6|PAPER Mon-O-1-2-6 — Simultaneous Denoising and Dereverberation for Low-Latency Applications Using Frame-by-Frame Online Unified Convolutional Beamformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Denoising and Dereverberation for Low-Latency Applications Using Frame-by-Frame Online Unified Convolutional Beamformer</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191513.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-6|PAPER Wed-O-7-4-6 — Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191864.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-7|PAPER Tue-P-4-E-7 — Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-14|PAPER Mon-P-2-C-14 — Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191313.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-9|PAPER Thu-P-9-C-9 — Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192899.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-1|PAPER Thu-P-10-A-1 — End-to-End Neural Speaker Diarization with Permutation-Free Objectives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Speaker Diarization with Permutation-Free Objectives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191864.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-7|PAPER Tue-P-4-E-7 — Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-11|PAPER Mon-P-1-C-11 — Do not Hesitate! — Unless You Do it Shortly or Nasally: How the Phonetics of Filled Pauses Determine Their Subjective Frequency and Perceived Speaker Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do not Hesitate! — Unless You Do it Shortly or Nasally: How the Phonetics of Filled Pauses Determine Their Subjective Frequency and Perceived Speaker Performance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-10|PAPER Thu-P-10-C-10 — Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-11|PAPER Mon-P-1-E-11 — Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192219.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-7|PAPER Tue-P-5-E-7 — Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-4|PAPER Wed-P-8-B-4 — Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191532.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-10|PAPER Wed-P-8-E-10 — Few-Shot Audio Classification with Attentional Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Few-Shot Audio Classification with Attentional Graph Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193179.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-5|PAPER Tue-O-5-5-5 — Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191523.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-12|PAPER Wed-P-8-D-12 — A Computational Model of Early Language Acquisition from Audiovisual Experiences of Young Infants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Computational Model of Early Language Acquisition from Audiovisual Experiences of Young Infants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191752.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-9|PAPER Mon-P-2-B-9 — An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191557.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-8|PAPER Mon-P-1-C-8 — Towards an Annotation Scheme for Complex Laughter in Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an Annotation Scheme for Complex Laughter in Speech Corpora</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192641.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-6|PAPER Mon-O-2-2-6 — Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-3|PAPER Mon-O-1-3-3 — Individual Difference of Relative Tongue Size and its Acoustic Effects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Difference of Relative Tongue Size and its Acoustic Effects</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198004.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-2|PAPER Mon-S&T-1-2 — Depression State Assessment: Application for Detection of Depression by Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Depression State Assessment: Application for Detection of Depression by Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-8|PAPER Tue-P-4-B-8 — Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-3|PAPER Thu-P-9-D-3 — Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-11|PAPER Tue-P-5-D-11 — Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-7|PAPER Thu-P-9-C-7 — Turn-Taking Prediction Based on Detection of Transition Relevance Place]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Turn-Taking Prediction Based on Detection of Transition Relevance Place</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-1|PAPER Thu-P-10-E-1 — A Modified Algorithm for Multiple Input Spectrogram Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Modified Algorithm for Multiple Input Spectrogram Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191826.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-4|PAPER Thu-P-9-C-4 — An Incremental Turn-Taking Model for Task-Oriented Dialog Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Incremental Turn-Taking Model for Task-Oriented Dialog Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-7|PAPER Thu-P-9-C-7 — Turn-Taking Prediction Based on Detection of Transition Relevance Place]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Turn-Taking Prediction Based on Detection of Transition Relevance Place</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191508.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-13|PAPER Mon-P-1-A-13 — Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-2|PAPER Mon-P-1-A-2 — Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191508.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-13|PAPER Mon-P-1-A-13 — Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193186.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-1|PAPER Tue-P-4-B-1 — A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-9|PAPER Thu-P-9-B-9 — Video-Driven Speech Reconstruction Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Video-Driven Speech Reconstruction Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192612.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-4|PAPER Tue-P-5-A-4 — Analysis of Pronunciation Learning in End-to-End Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Pronunciation Learning in End-to-End Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191804.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-4|PAPER Thu-P-9-B-4 — Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192236.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-5|PAPER Mon-P-2-A-5 — StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-5|PAPER Tue-O-5-1-5 — Using a Manifold Vocoder for Spectral Voice and Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using a Manifold Vocoder for Spectral Voice and Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-1|PAPER Tue-P-5-E-1 — Multiview Shared Subspace Learning Across Speakers and Speech Commands]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiview Shared Subspace Learning Across Speakers and Speech Commands</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-7|PAPER Tue-P-4-C-7 — Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-5|PAPER Tue-P-4-C-5 — Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192751.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-2|PAPER Mon-O-1-2-2 — On Nonlinear Spatial Filtering in Multichannel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Nonlinear Spatial Filtering in Multichannel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-5|PAPER Mon-P-2-C-5 — Ultra-Compact NLU: Neuronal Network Binarization as Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultra-Compact NLU: Neuronal Network Binarization as Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-1|PAPER Thu-P-9-E-1 — On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-5|PAPER Tue-P-4-B-5 — Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-6|PAPER Wed-P-7-C-6 — Speech Emotion Recognition with a Reject Option]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition with a Reject Option</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-3|PAPER Wed-O-8-2-3 — Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191708.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-5|PAPER Wed-O-8-3-5 — Gender De-Biasing in Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gender De-Biasing in Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191274.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-12|PAPER Mon-P-2-C-12 — Zero Shot Intent Classification Using Long-Short Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Zero Shot Intent Classification Using Long-Short Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191722.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-2|PAPER Wed-O-6-3-2 — Adversarially Trained End-to-End Korean Singing Voice Synthesis System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarially Trained End-to-End Korean Singing Voice Synthesis System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-1|PAPER Mon-O-2-1-1 — Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191973.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-4|PAPER Wed-O-8-2-4 — Multi-Stride Self-Attention for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stride Self-Attention for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191500.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-12|PAPER Tue-P-3-A-12 — CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-7|PAPER Wed-P-6-A-7 — An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192443.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-3|PAPER Tue-O-3-4-3 — Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192611.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-7|PAPER Tue-P-5-D-7 — Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192729.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-2|PAPER Tue-O-3-4-2 — ERP Signal Analysis with Temporal Resolution Using a Time Window Bank]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ERP Signal Analysis with Temporal Resolution Using a Time Window Bank</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192443.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-3|PAPER Tue-O-3-4-3 — Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192741.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-4|PAPER Tue-P-5-D-4 — Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192685.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-5|PAPER Tue-P-5-D-5 — Quantifying Expectation Modulation in Human Speech Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Expectation Modulation in Human Speech Processing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192611.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-7|PAPER Tue-P-5-D-7 — Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191691.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-3|PAPER Tue-O-5-4-3 — Multi-Lingual Dialogue Act Recognition with Deep Learning Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Lingual Dialogue Act Recognition with Deep Learning Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-7|PAPER Wed-P-8-E-7 — A Robust Framework for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Robust Framework for Acoustic Scene Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192050.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-6|PAPER Mon-P-2-B-6 — Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-1|PAPER Wed-P-7-B-1 — Acoustic Model Bootstrapping Using Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Bootstrapping Using Semi-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192264.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-4|PAPER Tue-O-3-2-4 — Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191746.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-6|PAPER Tue-O-3-2-6 — Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192486.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-3|PAPER Thu-P-9-A-3 — VAE-Based Regularization for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VAE-Based Regularization for Deep Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192529.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-4|PAPER Tue-P-3-D-4 — The Voicing Contrast in Stops and Affricates in the Western Armenian of Lebanon]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Voicing Contrast in Stops and Affricates in the Western Armenian of Lebanon</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-1|PAPER Mon-P-1-B-1 — Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192552.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-5|PAPER Mon-P-1-C-5 — Calibrating DNN Posterior Probability Estimates of HMM/DNN Models to Improve Social Signal Detection from Audio Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Calibrating DNN Posterior Probability Estimates of HMM/DNN Models to Improve Social Signal Detection from Audio Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-1|PAPER Wed-P-7-D-1 — Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192251.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-10|PAPER Tue-P-5-D-10 — A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191281.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-11|PAPER Wed-P-6-D-11 — R²SPIN: Re-Recording the Revised Speech Perception in Noise Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R²SPIN: Re-Recording the Revised Speech Perception in Noise Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-6|PAPER Wed-O-7-2-6 — Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191943.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-11|PAPER Mon-P-1-A-11 — Joint Speech Recognition and Speaker Diarization via Sequence Transduction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Speech Recognition and Speaker Diarization via Sequence Transduction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-8|PAPER Mon-P-2-A-8 — GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191333.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-1|PAPER Wed-O-8-4-1 — Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-2|PAPER Wed-O-8-4-2 — Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-11|PAPER Tue-P-4-D-11 — Recognition of Creaky Voice from Emergency Calls]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Creaky Voice from Emergency Calls</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-9|PAPER Tue-P-3-D-9 — Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191788.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-8|PAPER Mon-P-2-B-8 — Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192457.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-4|PAPER Wed-P-6-A-4 — Deep Hashing for Speaker Identification and Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hashing for Speaker Identification and Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-2|PAPER Tue-O-4-2-2 — A New GAN-Based End-to-End TTS Training Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New GAN-Based End-to-End TTS Training Algorithm</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191972.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-3|PAPER Tue-O-4-2-3 — Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-7|PAPER Thu-P-10-C-7 — Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193276.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-1|PAPER Thu-O-9-5-1 — Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192544.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-4|PAPER Mon-P-2-B-4 — Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-2|PAPER Tue-O-4-2-2 — A New GAN-Based End-to-End TTS Training Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New GAN-Based End-to-End TTS Training Algorithm</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193191.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-2|PAPER Tue-P-5-A-2 — Building a Mixed-Lingual Neural TTS System with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Mixed-Lingual Neural TTS System with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-11|PAPER Tue-P-5-B-11 — Towards Language-Universal Mandarin-English Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Language-Universal Mandarin-English Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192983.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-1|PAPER Thu-P-9-A-1 — Adversarial Regularization for End-to-End Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Regularization for End-to-End Robust Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-7|PAPER Thu-P-10-C-7 — Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192980.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192961.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-7|PAPER Mon-P-1-A-7 — Multi-PLDA Diarization on Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-PLDA Diarization on Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-2|PAPER Wed-O-6-1-2 — Dimensions of Prosodic Prominence in an Attractor Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dimensions of Prosodic Prominence in an Attractor Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193152.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-1|PAPER Thu-P-9-C-1 — Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191839.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-8|PAPER Mon-P-1-D-8 — EpaDB: A Database for Development of Pronunciation Assessment Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EpaDB: A Database for Development of Pronunciation Assessment Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-5|PAPER Thu-O-9-3-5 — Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191393.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-5|PAPER Wed-O-8-1-5 — LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-6|PAPER Tue-P-3-E-6 — KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192511.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-9|PAPER Wed-P-8-A-9 — A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-8|PAPER Tue-P-5-D-8 — Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-1|PAPER Tue-O-3-5-1 — Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191683.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-11|PAPER Tue-P-3-C-11 — Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-3|PAPER Mon-P-1-B-3 — Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193155.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-3|PAPER Mon-P-2-B-3 — Multi-Accent Adaptation Based on Gate Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Accent Adaptation Based on Gate Mechanism</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192264.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-4|PAPER Tue-O-3-2-4 — Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191746.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-6|PAPER Tue-O-3-2-6 — Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191614.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-11|PAPER Tue-P-3-A-11 — A Chinese Dataset for Identifying Speakers in Novels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Chinese Dataset for Identifying Speakers in Novels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191563.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-3|PAPER Wed-O-6-3-3 — Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191606.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-7|PAPER Thu-P-9-A-7 — An Effective Deep Embedding Learning Architecture for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Deep Embedding Learning Architecture for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-8|PAPER Thu-P-9-D-8 — Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191265.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-13|PAPER Mon-P-2-A-13 — Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191683.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-11|PAPER Tue-P-3-C-11 — Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-3|PAPER Wed-O-8-5-3 — Large Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Softmax Loss for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-4|PAPER Thu-O-9-4-4 — Multi-Scale Time-Frequency Attention for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Time-Frequency Attention for Acoustic Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191467.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-6|PAPER Wed-O-8-2-6 — Self-Teaching Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Teaching Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191344.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-6|PAPER Wed-O-7-5-6 — Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191487.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-5|PAPER Wed-O-7-2-5 — Multimodal Word Discovery and Retrieval with Phone Sequence and Image Concepts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Word Discovery and Retrieval with Phone Sequence and Image Concepts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-8|PAPER Tue-SS-4-4-8 — Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191704.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-5|PAPER Wed-O-8-5-5 — Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-5|PAPER Tue-O-4-5-5 — Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-5|PAPER Tue-P-4-B-5 — Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-4|PAPER Wed-P-6-D-4 — Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193233.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-1|PAPER Tue-P-5-A-1 — Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-1|PAPER Wed-O-6-3-1 — Unsupervised Singing Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Singing Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191674.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-11|PAPER Wed-P-8-D-11 — No Distributional Learning in Adults from Attended Listening to Non-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">No Distributional Learning in Adults from Attended Listening to Non-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-11|PAPER Wed-P-6-B-11 — CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Abad, Alberto|AUTHOR Alberto Abad]]|
|[[Abdelfattah, Mohamed S.|AUTHOR Mohamed S. Abdelfattah]]|
|[[Abdel-Hamid, Ossama|AUTHOR Ossama Abdel-Hamid]]|
|[[Abdul-Mageed, Muhammad|AUTHOR Muhammad Abdul-Mageed]]|
|[[Abraham, Basil|AUTHOR Basil Abraham]]|
|[[Abrol, Vinayak|AUTHOR Vinayak Abrol]]|
|[[Abujabal, Abdalghani|AUTHOR Abdalghani Abujabal]]|
|[[Acharya, Rajul|AUTHOR Rajul Acharya]]|
|[[Adank, Patti|AUTHOR Patti Adank]]|
|[[Adda-Decker, Martine|AUTHOR Martine Adda-Decker]]|
|[[Adeeba, Farah|AUTHOR Farah Adeeba]]|
|[[Adel, Heike|AUTHOR Heike Adel]]|
|[[Adiga, Nagaraj|AUTHOR Nagaraj Adiga]]|
|[[Afouras, Triantafyllos|AUTHOR Triantafyllos Afouras]]|
|[[Afshan, Amber|AUTHOR Amber Afshan]]|
|[[Agenbag, Wiehan|AUTHOR Wiehan Agenbag]]|
|[[Aggarwal, Mansi|AUTHOR Mansi Aggarwal]]|
|[[Aggarwal, Vatsal|AUTHOR Vatsal Aggarwal]]|
|[[Agrawal, Dharmeshkumar|AUTHOR Dharmeshkumar Agrawal]]|
|[[Agrawal, Purvi|AUTHOR Purvi Agrawal]]|
|[[Agurto, Carla|AUTHOR Carla Agurto]]|
|[[Ahlers, Wiebke|AUTHOR Wiebke Ahlers]]|
|[[Ahmed, Mohsin Y.|AUTHOR Mohsin Y. Ahmed]]|
|[[Ai, Wencheng|AUTHOR Wencheng Ai]]|
|[[Ai, Yang|AUTHOR Yang Ai]]|
|[[Aichinger, Philipp|AUTHOR Philipp Aichinger]]|
|[[Aithal, Venkataraja|AUTHOR Venkataraja Aithal]]|
|[[Akagi, Masato|AUTHOR Masato Akagi]]|
|[[Alam, Jahangir|AUTHOR Jahangir Alam]]|
|[[Albar, Rachel|AUTHOR Rachel Albar]]|
|[[Albuquerque, Luciana|AUTHOR Luciana Albuquerque]]|
|[[Al-Dahle, Ahmad|AUTHOR Ahmad Al-Dahle]]|
|[[Aldeneh, Zakaria|AUTHOR Zakaria Aldeneh]]|
|[[Aleksic, Petar|AUTHOR Petar Aleksic]]|
|[[Alex, Beatrice|AUTHOR Beatrice Alex]]|
|[[Algayres, Robin|AUTHOR Robin Algayres]]|
|[[Ali, Ahmed|AUTHOR Ahmed Ali]]|
|[[Ali, Hussnain|AUTHOR Hussnain Ali]]|
|[[Alku, Paavo|AUTHOR Paavo Alku]]|
|[[Allauzen, Cyril|AUTHOR Cyril Allauzen]]|
|[[Allouche, Omri|AUTHOR Omri Allouche]]|
|[[Alluri, K.N.R.K. Raju|AUTHOR K.N.R.K. Raju Alluri]]|
|[[Alm, Cecilia Ovesdotter|AUTHOR Cecilia Ovesdotter Alm]]|
|[[Almeida, Nuno|AUTHOR Nuno Almeida]]|
|[[Alok, Aman|AUTHOR Aman Alok]]|
|[[Aloni-Lavi, Ruth|AUTHOR Ruth Aloni-Lavi]]|
|[[Alowonou, Kowovi Comivi|AUTHOR Kowovi Comivi Alowonou]]|
|[[Al-Radhi, Mohammed Salah|AUTHOR Mohammed Salah Al-Radhi]]|
|[[Alumäe, Tanel|AUTHOR Tanel Alumäe]]|
|[[Alwan, Abeer|AUTHOR Abeer Alwan]]|
|[[Alzantot, Moustafa|AUTHOR Moustafa Alzantot]]|
|[[Ambikairajah, Eliathamby|AUTHOR Eliathamby Ambikairajah]]|
|[[Amir, Ido|AUTHOR Ido Amir]]|
|[[Amiriparian, Shahin|AUTHOR Shahin Amiriparian]]|
|[[Amorese, Terry|AUTHOR Terry Amorese]]|
|[[An, Guozhen|AUTHOR Guozhen An]]|
|[[An, Shounan|AUTHOR Shounan An]]|
|[[Ananthakrishnan, Shankar|AUTHOR Shankar Ananthakrishnan]]|
|[[Ando, Atsushi|AUTHOR Atsushi Ando]]|
|[[Ando, Shintaro|AUTHOR Shintaro Ando]]|
|[[André, Elisabeth|AUTHOR Elisabeth André]]|
|[[Andreeva, Bistra|AUTHOR Bistra Andreeva]]|
|[[Andre-Obrecht, Régine|AUTHOR Régine Andre-Obrecht]]|
|[[Andrés-Ferrer, Jesús|AUTHOR Jesús Andrés-Ferrer]]|
|[[Andrusenko, Andrei|AUTHOR Andrei Andrusenko]]|
|[[Ang, Brian|AUTHOR Brian Ang]]|
|[[Ang, Wilson|AUTHOR Wilson Ang]]|
|[[Angerbauer, Katrin|AUTHOR Katrin Angerbauer]]|
|[[Annand, Colin T.|AUTHOR Colin T. Annand]]|
|[[Antognini, Joe|AUTHOR Joe Antognini]]|
|[[Antoniou, Mark|AUTHOR Mark Antoniou]]|
|[[Aoki, Hitoshi|AUTHOR Hitoshi Aoki]]|
|[[Aono, Yushi|AUTHOR Yushi Aono]]|
|[[Apostoloff, Nicholas|AUTHOR Nicholas Apostoloff]]|
|[[Appan Kandala, Pujitha|AUTHOR Pujitha Appan Kandala]]|
|[[Appeltans, Pieter|AUTHOR Pieter Appeltans]]|
|[[Arai, Kenichi|AUTHOR Kenichi Arai]]|
|[[Araki, Shoko|AUTHOR Shoko Araki]]|
|[[Aralikatte, Rahul|AUTHOR Rahul Aralikatte]]|
|[[Aralikatti, Rohith Chandrashekar|AUTHOR Rohith Chandrashekar Aralikatti]]|
|[[Arantes, Pablo|AUTHOR Pablo Arantes]]|
|[[Arava, Radhika|AUTHOR Radhika Arava]]|
|[[Ardaillon, Luc|AUTHOR Luc Ardaillon]]|
|[[Ardulov, Victor|AUTHOR Victor Ardulov]]|
|[[Arias-Vergara, T.|AUTHOR T. Arias-Vergara]]|
|[[Arimoto, Yoshiko|AUTHOR Yoshiko Arimoto]]|
|[[Arsikere, Harish|AUTHOR Harish Arsikere]]|
|[[Artajew, Adam|AUTHOR Adam Artajew]]|
|[[Aryal, S.|AUTHOR S. Aryal]]|
|[[Asakawa, Satoshi|AUTHOR Satoshi Asakawa]]|
|[[Asgari, Meysam|AUTHOR Meysam Asgari]]|
|[[Ashby, Simone|AUTHOR Simone Ashby]]|
|[[Ashihara, Takanori|AUTHOR Takanori Ashihara]]|
|[[Ashour, Chamran|AUTHOR Chamran Ashour]]|
|[[Assael, Yannis|AUTHOR Yannis Assael]]|
|[[Astudillo, Ramon|AUTHOR Ramon Astudillo]]|
|[[Atchayaram, Nalini|AUTHOR Nalini Atchayaram]]|
|[[Athanasopoulou, Georgia|AUTHOR Georgia Athanasopoulou]]|
|[[Atkins, David C.|AUTHOR David C. Atkins]]|
|[[Aubin, Adèle|AUTHOR Adèle Aubin]]|
|[[Audhkhasi, Kartik|AUTHOR Kartik Audhkhasi]]|
|[[Audibert, Nicolas|AUTHOR Nicolas Audibert]]|
|[[Augustyniak, Łukasz|AUTHOR Łukasz Augustyniak]]|
|[[Auli, Michael|AUTHOR Michael Auli]]|
|[[Avdeeva, Anastasia|AUTHOR Anastasia Avdeeva]]|
|[[Avigal, Mireille|AUTHOR Mireille Avigal]]|
|[[Avila, Anderson R.|AUTHOR Anderson R. Avila]]|
|[[Aylett, Matthew P.|AUTHOR Matthew P. Aylett]]|
|[[Ayllón, David|AUTHOR David Ayllón]]|
|[[Azuh, Emmanuel|AUTHOR Emmanuel Azuh]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Bach, Jörg-Hendrik|AUTHOR Jörg-Hendrik Bach]]|
|[[Bach, Nguyen|AUTHOR Nguyen Bach]]|
|[[Backfried, Gerhard|AUTHOR Gerhard Backfried]]|
|[[Bäckström, Tom|AUTHOR Tom Bäckström]]|
|[[Badaskar, Sameer|AUTHOR Sameer Badaskar]]|
|[[Baevski, Alexei|AUTHOR Alexei Baevski]]|
|[[Bagheri, Saeed|AUTHOR Saeed Bagheri]]|
|[[Bahmaninezhad, Fahimeh|AUTHOR Fahimeh Bahmaninezhad]]|
|[[Bai, Xue|AUTHOR Xue Bai]]|
|[[Bai, Ye|AUTHOR Ye Bai]]|
|[[Bailey, Reynold|AUTHOR Reynold Bailey]]|
|[[Baird, Alice|AUTHOR Alice Baird]]|
|[[Balazs, Peter|AUTHOR Peter Balazs]]|
|[[Bampounis, Nikos|AUTHOR Nikos Bampounis]]|
|[[Bang, Jeong-Uk|AUTHOR Jeong-Uk Bang]]|
|[[Bangalore, Srinivas|AUTHOR Srinivas Bangalore]]|
|[[Bansal, Shubham|AUTHOR Shubham Bansal]]|
|[[Bao, Changchun|AUTHOR Changchun Bao]]|
|[[Bao, Fang|AUTHOR Fang Bao]]|
|[[Bao, Zhongtian|AUTHOR Zhongtian Bao]]|
|[[Bapna, Ankur|AUTHOR Ankur Bapna]]|
|[[Baran, Marcin|AUTHOR Marcin Baran]]|
|[[Barbe, Michael T.|AUTHOR Michael T. Barbe]]|
|[[Barbot, Nelly|AUTHOR Nelly Barbot]]|
|[[Barker, Jon|AUTHOR Jon Barker]]|
|[[Barra-Chicote, Roberto|AUTHOR Roberto Barra-Chicote]]|
|[[Barras, Claude|AUTHOR Claude Barras]]|
|[[Barreda, Santiago|AUTHOR Santiago Barreda]]|
|[[Barrett, Tyson S.|AUTHOR Tyson S. Barrett]]|
|[[Barriac, Vincent|AUTHOR Vincent Barriac]]|
|[[Barrios, Maria A.|AUTHOR Maria A. Barrios]]|
|[[Barrow, Joe|AUTHOR Joe Barrow]]|
|[[Barth, Volker|AUTHOR Volker Barth]]|
|[[Bartók, Márton|AUTHOR Márton Bartók]]|
|[[Baskar, Murali Karthick|AUTHOR Murali Karthick Baskar]]|
|[[Bataev, Vladimir|AUTHOR Vladimir Bataev]]|
|[[Batliner, Anton|AUTHOR Anton Batliner]]|
|[[Batushiren|AUTHOR Batushiren]]|
|[[Baucom, Brian R.W.|AUTHOR Brian R.W. Baucom]]|
|[[Baumeister, Harald|AUTHOR Harald Baumeister]]|
|[[Baxi, Anoosha|AUTHOR Anoosha Baxi]]|
|[[Bayerl, S.|AUTHOR S. Bayerl]]|
|[[Beack, Seungkwon|AUTHOR Seungkwon Beack]]|
|[[Bear, Helen L.|AUTHOR Helen L. Bear]]|
|[[Beaufays, Françoise|AUTHOR Françoise Beaufays]]|
|[[Béchet, Frédéric|AUTHOR Frédéric Béchet]]|
|[[Beck, Eugen|AUTHOR Eugen Beck]]|
|[[Beck, Janet|AUTHOR Janet Beck]]|
|[[Beeferman, Doug|AUTHOR Doug Beeferman]]|
|[[Beigman Klebanov, Beata|AUTHOR Beata Beigman Klebanov]]|
|[[Belinkov, Yonatan|AUTHOR Yonatan Belinkov]]|
|[[Belitz, Chelzy|AUTHOR Chelzy Belitz]]|
|[[Belkin, Mikhail|AUTHOR Mikhail Belkin]]|
|[[Bell, Peter|AUTHOR Peter Bell]]|
|[[Bellegarda, Jerome R.|AUTHOR Jerome R. Bellegarda]]|
|[[Bellet, Aurélien|AUTHOR Aurélien Bellet]]|
|[[Belur, Yamini|AUTHOR Yamini Belur]]|
|[[Benali, Habib|AUTHOR Habib Benali]]|
|[[Benetos, Emmanouil|AUTHOR Emmanouil Benetos]]|
|[[Bengio, Yoshua|AUTHOR Yoshua Bengio]]|
|[[Benjumea, Juan|AUTHOR Juan Benjumea]]|
|[[Benkelfat, Badr-Eddine|AUTHOR Badr-Eddine Benkelfat]]|
|[[Bennett, Lorrayne|AUTHOR Lorrayne Bennett]]|
|[[Bentum, M.|AUTHOR M. Bentum]]|
|[[Beňuš, Štefan|AUTHOR Štefan Beňuš]]|
|[[Bergelson, Elika|AUTHOR Elika Bergelson]]|
|[[Berger, Michael|AUTHOR Michael Berger]]|
|[[Berger, Serge|AUTHOR Serge Berger]]|
|[[Berger, Stephanie|AUTHOR Stephanie Berger]]|
|[[Bergler, Christian|AUTHOR Christian Bergler]]|
|[[Berisha, Visar|AUTHOR Visar Berisha]]|
|[[Bernard, Mathieu|AUTHOR Mathieu Bernard]]|
|[[Bernardo, Luís|AUTHOR Luís Bernardo]]|
|[[Berry, James D.|AUTHOR James D. Berry]]|
|[[Besacier, Laurent|AUTHOR Laurent Besacier]]|
|[[Beskow, Jonas|AUTHOR Jonas Beskow]]|
|[[Best, Catherine T.|AUTHOR Catherine T. Best]]|
|[[Betz, Simon|AUTHOR Simon Betz]]|
|[[Beyrami, Ebrahim|AUTHOR Ebrahim Beyrami]]|
|[[Bhat, Riyaz|AUTHOR Riyaz Bhat]]|
|[[Bhati, Saurabhchand|AUTHOR Saurabhchand Bhati]]|
|[[Bhatia, Deepti|AUTHOR Deepti Bhatia]]|
|[[Bhattacharya, Gautam|AUTHOR Gautam Bhattacharya]]|
|[[Bhosale, Swapnil|AUTHOR Swapnil Bhosale]]|
|[[Bi, Chongke|AUTHOR Chongke Bi]]|
|[[Biadsy, Fadi|AUTHOR Fadi Biadsy]]|
|[[Białobrzeski, Radosław|AUTHOR Radosław Białobrzeski]]|
|[[Biasutto--Lervat, Théo|AUTHOR Théo Biasutto--Lervat]]|
|[[Biemann, Chris|AUTHOR Chris Biemann]]|
|[[Biermann, Henrik|AUTHOR Henrik Biermann]]|
|[[Bilgrami, Zarina R.|AUTHOR Zarina R. Bilgrami]]|
|[[Birkholz, Peter|AUTHOR Peter Birkholz]]|
|[[Biswas, Arijit|AUTHOR Arijit Biswas]]|
|[[Biswas, Astik|AUTHOR Astik Biswas]]|
|[[Black, Alan W.|AUTHOR Alan W. Black]]|
|[[Blackburn, Daniel|AUTHOR Daniel Blackburn]]|
|[[Bleyan, Harry|AUTHOR Harry Bleyan]]|
|[[Blunck, Sharalee|AUTHOR Sharalee Blunck]]|
|[[B.N., Suhas|AUTHOR Suhas B.N.]]|
|[[Bocklet, Tobias|AUTHOR Tobias Bocklet]]|
|[[Boeddeker, Christoph|AUTHOR Christoph Boeddeker]]|
|[[Boldt, Jesper B.|AUTHOR Jesper B. Boldt]]|
|[[Bollepalli, Bajibabu|AUTHOR Bajibabu Bollepalli]]|
|[[Bonafonte, Antonio|AUTHOR Antonio Bonafonte]]|
|[[Bonastre, Jean-François|AUTHOR Jean-François Bonastre]]|
|[[Booker, Sue|AUTHOR Sue Booker]]|
|[[Borgstrom, Jonas|AUTHOR Jonas Borgstrom]]|
|[[Borowik, Bartosz|AUTHOR Bartosz Borowik]]|
|[[Borrie, Stephanie A.|AUTHOR Stephanie A. Borrie]]|
|[[Borský, Michal|AUTHOR Michal Borský]]|
|[[Bos, P.|AUTHOR P. Bos]]|
|[[Bost, Xavier|AUTHOR Xavier Bost]]|
|[[Botros, Rami|AUTHOR Rami Botros]]|
|[[Boulianne, Gilles|AUTHOR Gilles Boulianne]]|
|[[Bourlard, Hervé|AUTHOR Hervé Bourlard]]|
|[[Bousquet, Pierre-Michel|AUTHOR Pierre-Michel Bousquet]]|
|[[Boves, L.|AUTHOR L. Boves]]|
|[[Bowie, Christopher R.|AUTHOR Christopher R. Bowie]]|
|[[Boyce, Suzanne|AUTHOR Suzanne Boyce]]|
|[[Boyd-Graber, Jordan|AUTHOR Jordan Boyd-Graber]]|
|[[Bradlow, Ann R.|AUTHOR Ann R. Bradlow]]|
|[[Braich, Steve|AUTHOR Steve Braich]]|
|[[Braithwaite, D.T.|AUTHOR D.T. Braithwaite]]|
|[[Braley, McKenzie|AUTHOR McKenzie Braley]]|
|[[Brambilla, Leonardo|AUTHOR Leonardo Brambilla]]|
|[[Brannon, William|AUTHOR William Brannon]]|
|[[Brard, Beno^ıt|AUTHOR Beno^ıt Brard]]|
|[[Braude, David A.|AUTHOR David A. Braude]]|
|[[Braudo, Anna|AUTHOR Anna Braudo]]|
|[[Braun, Bettina|AUTHOR Bettina Braun]]|
|[[Bredin, Hervé|AUTHOR Hervé Bredin]]|
|[[Brenner, Michael|AUTHOR Michael Brenner]]|
|[[Broersma, Mirjam|AUTHOR Mirjam Broersma]]|
|[[Brouwer, Alex|AUTHOR Alex Brouwer]]|
|[[Bruguier, Antoine|AUTHOR Antoine Bruguier]]|
|[[Brumberg, Jonathan S.|AUTHOR Jonathan S. Brumberg]]|
|[[Brummer, Niko|AUTHOR Niko Brummer]]|
|[[Brunner, Jana|AUTHOR Jana Brunner]]|
|[[Brutti, Alessio|AUTHOR Alessio Brutti]]|
|[[Brutti, Richard|AUTHOR Richard Brutti]]|
|[[Bryhadyr, Nataliya|AUTHOR Nataliya Bryhadyr]]|
|[[B.T., Balamurali|AUTHOR Balamurali B.T.]]|
|[[Bu, Suliang|AUTHOR Suliang Bu]]|
|[[Buckeridge, Nicholas|AUTHOR Nicholas Buckeridge]]|
|[[Bucy, Erik P.|AUTHOR Erik P. Bucy]]|
|[[Bui, Trung|AUTHOR Trung Bui]]|
|[[Bunescu, Razvan|AUTHOR Razvan Bunescu]]|
|[[Bunn, Daniel|AUTHOR Daniel Bunn]]|
|[[Burget, Lukáš|AUTHOR Lukáš Burget]]|
|[[Busso, Carlos|AUTHOR Carlos Busso]]|
|[[Buyuktosunoglu, Alper|AUTHOR Alper Buyuktosunoglu]]|
|[[Byun, Hyeongmin|AUTHOR Hyeongmin Byun]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[C., Sujitha A.|AUTHOR Sujitha A. C.]]|
|[[Cai, Danwei|AUTHOR Danwei Cai]]|
|[[Cai, Weicheng|AUTHOR Weicheng Cai]]|
|[[Cai, Zexin|AUTHOR Zexin Cai]]|
|[[Callejas, Zoraida|AUTHOR Zoraida Callejas]]|
|[[Camelin, Nathalie|AUTHOR Nathalie Camelin]]|
|[[Campbell, Thomas F.|AUTHOR Thomas F. Campbell]]|
|[[Cangemi, Francesco|AUTHOR Francesco Cangemi]]|
|[[Cannings, Nigel|AUTHOR Nigel Cannings]]|
|[[Cao, Houwei|AUTHOR Houwei Cao]]|
|[[Cao, Miao|AUTHOR Miao Cao]]|
|[[Cao, Xuan-Nga|AUTHOR Xuan-Nga Cao]]|
|[[Cao, Yuanjiang|AUTHOR Yuanjiang Cao]]|
|[[Cao, Yuewen|AUTHOR Yuewen Cao]]|
|[[Cao, Yuhang|AUTHOR Yuhang Cao]]|
|[[Caraty, Marie-José|AUTHOR Marie-José Caraty]]|
|[[Carbajal, Maria Julia|AUTHOR Maria Julia Carbajal]]|
|[[Carmantini, Andrea|AUTHOR Andrea Carmantini]]|
|[[Carmi, Nehory|AUTHOR Nehory Carmi]]|
|[[Carmiel, Yishay|AUTHOR Yishay Carmiel]]|
|[[Carson-Berndsen, Julie|AUTHOR Julie Carson-Berndsen]]|
|[[Castan, Diego|AUTHOR Diego Castan]]|
|[[Cattiau, Julie|AUTHOR Julie Cattiau]]|
|[[Caubrière, Antoine|AUTHOR Antoine Caubrière]]|
|[[Cavaco, Sofia|AUTHOR Sofia Cavaco]]|
|[[Cecchi, Guillermo|AUTHOR Guillermo Cecchi]]|
|[[Cerisara, Christophe|AUTHOR Christophe Cerisara]]|
|[[Cernak, Milos|AUTHOR Milos Cernak]]|
|[[Černocký, Jan|AUTHOR Jan Černocký]]|
|[[Cerutti, Gianmarco|AUTHOR Gianmarco Cerutti]]|
|[[Cerva, Petr|AUTHOR Petr Cerva]]|
|[[Cervone, Alessandra|AUTHOR Alessandra Cervone]]|
|[[Chai, Li|AUTHOR Li Chai]]|
|[[Chakrabarti, Chaitali|AUTHOR Chaitali Chakrabarti]]|
|[[Chakraborty, Deep|AUTHOR Deep Chakraborty]]|
|[[Chakraborty, Rupayan|AUTHOR Rupayan Chakraborty]]|
|[[Chan, James|AUTHOR James Chan]]|
|[[Chan, Wai-Yip|AUTHOR Wai-Yip Chan]]|
|[[Chan, William|AUTHOR William Chan]]|
|[[Chandrasekhar, Vijay Ramaseshan|AUTHOR Vijay Ramaseshan Chandrasekhar]]|
|[[Chandra Shekhar, Meena|AUTHOR Meena Chandra Shekhar]]|
|[[Chang, Ching-Ting|AUTHOR Ching-Ting Chang]]|
|[[Chang, Chun-Min|AUTHOR Chun-Min Chang]]|
|[[Chang, Heng Fai|AUTHOR Heng Fai Chang]]|
|[[Chang, Joon-Hyuk|AUTHOR Joon-Hyuk Chang]]|
|[[Chang, Su-Yu|AUTHOR Su-Yu Chang]]|
|[[Chang, Wonil|AUTHOR Wonil Chang]]|
|[[Chang, Xuankai|AUTHOR Xuankai Chang]]|
|[[Chang, Yi|AUTHOR Yi Chang]]|
|[[Chao, Gao-Yi|AUTHOR Gao-Yi Chao]]|
|[[Chao, Guan-Lin|AUTHOR Guan-Lin Chao]]|
|[[Charbonneau, Taylor|AUTHOR Taylor Charbonneau]]|
|[[Charon, Nicolas|AUTHOR Nicolas Charon]]|
|[[Chatziagapi, Aggelina|AUTHOR Aggelina Chatziagapi]]|
|[[Chauhan, Surbhi|AUTHOR Surbhi Chauhan]]|
|[[Chebiyyam, Venkata|AUTHOR Venkata Chebiyyam]]|
|[[Chen, Chia-Ping|AUTHOR Chia-Ping Chen]]|
|[[Chen, Chieh-Yu|AUTHOR Chieh-Yu Chen]]|
|[[Chen, Fei|AUTHOR Fei Chen]]|
|[[Chen, Hangting|AUTHOR Hangting Chen]]|
|[[Chen, I-Fan|AUTHOR I-Fan Chen]]|
|[[Chen, Jer-Ming|AUTHOR Jer-Ming Chen]]|
|[[Chen, Jiani|AUTHOR Jiani Chen]]|
|[[Chen, Jia-Xiang|AUTHOR Jia-Xiang Chen]]|
|[[Chen, Jiaxu|AUTHOR Jiaxu Chen]]|
|[[Chen, Jing|AUTHOR Jing Chen]]|
|[[Chen, John|AUTHOR John Chen]]|
|[[Chen, Jun|AUTHOR Jun Chen]]|
|[[Chen, Juqiang|AUTHOR Juqiang Chen]]|
|[[Chen, Kai|AUTHOR Kai Chen]]|
|[[Chen, Kai|AUTHOR Kai Chen]]|
|[[Chen, Kai|AUTHOR Kai Chen]]|
|[[Chen, Ko-Chiang|AUTHOR Ko-Chiang Chen]]|
|[[Chen, Kuan-Lin|AUTHOR Kuan-Lin Chen]]|
|[[Chen, Kuan-Yu|AUTHOR Kuan-Yu Chen]]|
|[[Chen, Kuan-Yu|AUTHOR Kuan-Yu Chen]]|
|[[Chen, Langzhou|AUTHOR Langzhou Chen]]|
|[[Chen, Lei|AUTHOR Lei Chen]]|
|[[Chen, Lianwu|AUTHOR Lianwu Chen]]|
|[[Chen, Liming|AUTHOR Liming Chen]]|
|[[Chen, Liu|AUTHOR Liu Chen]]|
|[[Chen, Li-Wei|AUTHOR Li-Wei Chen]]|
|[[Chen, Mengnan|AUTHOR Mengnan Chen]]|
|[[Chen, Minchuan|AUTHOR Minchuan Chen]]|
|[[Chen, Nanxin|AUTHOR Nanxin Chen]]|
|[[Chén, Oliver Y.|AUTHOR Oliver Y. Chén]]|
|[[Chen, Qifeng|AUTHOR Qifeng Chen]]|
|[[Chen, Qinlang|AUTHOR Qinlang Chen]]|
|[[Chen, Shizhe|AUTHOR Shizhe Chen]]|
|[[Chen, S.T.|AUTHOR S.T. Chen]]|
|[[Chen, Wei|AUTHOR Wei Chen]]|
|[[Chen, Wenhu|AUTHOR Wenhu Chen]]|
|[[Chen, Wenjun|AUTHOR Wenjun Chen]]|
|[[Chen, Xi C.|AUTHOR Xi C. Chen]]|
|[[Chen, Yanping|AUTHOR Yanping Chen]]|
|[[Chen, Yuan-Jui|AUTHOR Yuan-Jui Chen]]|
|[[Chen, Zhehuai|AUTHOR Zhehuai Chen]]|
|[[Chen, Zhengyang|AUTHOR Zhengyang Chen]]|
|[[Chen, Zhi|AUTHOR Zhi Chen]]|
|[[Chen, Zhifeng|AUTHOR Zhifeng Chen]]|
|[[Chen, Zhuo|AUTHOR Zhuo Chen]]|
|[[Cheng, Bridget|AUTHOR Bridget Cheng]]|
|[[Cheng, Gaofeng|AUTHOR Gaofeng Cheng]]|
|[[Cheng, Rachael Xi|AUTHOR Rachael Xi Cheng]]|
|[[Cherian, Anoop|AUTHOR Anoop Cherian]]|
|[[Chermaz, Carol|AUTHOR Carol Chermaz]]|
|[[Chettri, Bhusan|AUTHOR Bhusan Chettri]]|
|[[Chevelu, Jonathan|AUTHOR Jonathan Chevelu]]|
|[[Chien, Jen-Tzung|AUTHOR Jen-Tzung Chien]]|
|[[Chien, Yu-Ren|AUTHOR Yu-Ren Chien]]|
|[[Chignoli, Gabriele|AUTHOR Gabriele Chignoli]]|
|[[Chinen, Michael|AUTHOR Michael Chinen]]|
|[[Ching, P.C.|AUTHOR P.C. Ching]]|
|[[Chiu, Chung-Cheng|AUTHOR Chung-Cheng Chiu]]|
|[[Chng, Eng Siong|AUTHOR Eng Siong Chng]]|
|[[Cho, JaeJin|AUTHOR JaeJin Cho]]|
|[[Cho, Janghoon|AUTHOR Janghoon Cho]]|
|[[Cho, Sunghye|AUTHOR Sunghye Cho]]|
|[[Chodroff, Eleanor|AUTHOR Eleanor Chodroff]]|
|[[Choi, Hyeong-Seok|AUTHOR Hyeong-Seok Choi]]|
|[[Choi, Mu-Yeol|AUTHOR Mu-Yeol Choi]]|
|[[Choi, Seungwoo|AUTHOR Seungwoo Choi]]|
|[[Choi, Yeunju|AUTHOR Yeunju Choi]]|
|[[Chollet, Gérard|AUTHOR Gérard Chollet]]|
|[[Chong, Tze Yuang|AUTHOR Tze Yuang Chong]]|
|[[Chorowski, Jan|AUTHOR Jan Chorowski]]|
|[[Chou, Huang-Cheng|AUTHOR Huang-Cheng Chou]]|
|[[Chou, Ju-chieh|AUTHOR Ju-chieh Chou]]|
|[[Christensen, Heidi|AUTHOR Heidi Christensen]]|
|[[Christensen, Mads G.|AUTHOR Mads G. Christensen]]|
|[[Chua, Yansong|AUTHOR Yansong Chua]]|
|[[Chuang, Fu-Kai|AUTHOR Fu-Kai Chuang]]|
|[[Chuang, Shun-Po|AUTHOR Shun-Po Chuang]]|
|[[Chung, Joon Son|AUTHOR Joon Son Chung]]|
|[[Chung, Lau Wing|AUTHOR Lau Wing Chung]]|
|[[Chung, Minhwa|AUTHOR Minhwa Chung]]|
|[[Chung, Yu-An|AUTHOR Yu-An Chung]]|
|[[Church, Kenneth|AUTHOR Kenneth Church]]|
|[[Chýlek, Adam|AUTHOR Adam Chýlek]]|
|[[Ciccarelli, Gregory|AUTHOR Gregory Ciccarelli]]|
|[[Cieri, Christopher|AUTHOR Christopher Cieri]]|
|[[Civera, Jorge|AUTHOR Jorge Civera]]|
|[[Clark, Rob|AUTHOR Rob Clark]]|
|[[Clarke, Georgia|AUTHOR Georgia Clarke]]|
|[[Cohen, Azaria|AUTHOR Azaria Cohen]]|
|[[Cohen, Jonathan M.|AUTHOR Jonathan M. Cohen]]|
|[[Cohen, Samuel|AUTHOR Samuel Cohen]]|
|[[Cohn, Jeffrey|AUTHOR Jeffrey Cohn]]|
|[[Cohn, Michelle|AUTHOR Michelle Cohn]]|
|[[Cola, Meredith|AUTHOR Meredith Cola]]|
|[[Cole, Jennifer S.|AUTHOR Jennifer S. Cole]]|
|[[Collins, Ella|AUTHOR Ella Collins]]|
|[[Collobert, Ronan|AUTHOR Ronan Collobert]]|
|[[Colotte, Vincent|AUTHOR Vincent Colotte]]|
|[[Coman, Andrei C.|AUTHOR Andrei C. Coman]]|
|[[Comstock, Lindy|AUTHOR Lindy Comstock]]|
|[[Connaghan, Kathryn P.|AUTHOR Kathryn P. Connaghan]]|
|[[Cooper, Sarah|AUTHOR Sarah Cooper]]|
|[[Coppin, Ben|AUTHOR Ben Coppin]]|
|[[Corcoran, Cheryl|AUTHOR Cheryl Corcoran]]|
|[[Cordasco, Gennaro|AUTHOR Gennaro Cordasco]]|
|[[Cortes, Elísabet Eir|AUTHOR Elísabet Eir Cortes]]|
|[[Corvol, Jean-Christophe|AUTHOR Jean-Christophe Corvol]]|
|[[Costello, Charles|AUTHOR Charles Costello]]|
|[[Coucheiro-Limeres, Alejandro|AUTHOR Alejandro Coucheiro-Limeres]]|
|[[Coutinho, Eduardo|AUTHOR Eduardo Coutinho]]|
|[[Cox, Felicity|AUTHOR Felicity Cox]]|
|[[Cristia, Alejandrina|AUTHOR Alejandrina Cristia]]|
|[[Crookes, Danny|AUTHOR Danny Crookes]]|
|[[Crow, Dwight|AUTHOR Dwight Crow]]|
|[[Csapó, Tamás Gábor|AUTHOR Tamás Gábor Csapó]]|
|[[Cubuk, Ekin D.|AUTHOR Ekin D. Cubuk]]|
|[[Cuciniello, Marialucia|AUTHOR Marialucia Cuciniello]]|
|[[Cucu, Horia|AUTHOR Horia Cucu]]|
|[[Cui, Jia|AUTHOR Jia Cui]]|
|[[Cui, Xiaodong|AUTHOR Xiaodong Cui]]|
|[[Cullen, Charlie|AUTHOR Charlie Cullen]]|
|[[Cumani, Sandro|AUTHOR Sandro Cumani]]|
|[[Cummins, Nicholas|AUTHOR Nicholas Cummins]]|
|[[Cunha, Conceição|AUTHOR Conceição Cunha]]|
|[[Cutler, Ross|AUTHOR Ross Cutler]]|
|[[Cychosz, Margaret|AUTHOR Margaret Cychosz]]|
|[[Czarnowski, Krzysztof|AUTHOR Krzysztof Czarnowski]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Dahmani, Sara|AUTHOR Sara Dahmani]]|
|[[Dai, Dongyang|AUTHOR Dongyang Dai]]|
|[[Dai, Li-Rong|AUTHOR Li-Rong Dai]]|
|[[Dalmia, Siddharth|AUTHOR Siddharth Dalmia]]|
|[[Damnati, Géraldine|AUTHOR Géraldine Damnati]]|
|[[Dandapat, S.|AUTHOR S. Dandapat]]|
|[[Dang, Jianwu|AUTHOR Jianwu Dang]]|
|[[Dang, Ting|AUTHOR Ting Dang]]|
|[[Dang, Viet|AUTHOR Viet Dang]]|
|[[d’Apolito, Sonia|AUTHOR Sonia d’Apolito]]|
|[[Darwish, Kareem|AUTHOR Kareem Darwish]]|
|[[Das, Partha Pratim|AUTHOR Partha Pratim Das]]|
|[[Das, Rohan Kumar|AUTHOR Rohan Kumar Das]]|
|[[Das, Sneha|AUTHOR Sneha Das]]|
|[[Dash, Debadatta|AUTHOR Debadatta Dash]]|
|[[Datta, Arindrima|AUTHOR Arindrima Datta]]|
|[[David, Jean-Pierre|AUTHOR Jean-Pierre David]]|
|[[Davis, Chris|AUTHOR Chris Davis]]|
|[[de Freitas, Nando|AUTHOR Nando de Freitas]]|
|[[Dehak, Najim|AUTHOR Najim Dehak]]|
|[[Dehak, Réda|AUTHOR Réda Dehak]]|
|[[Delcroix, Marc|AUTHOR Marc Delcroix]]|
|[[de Leeuw, Esther|AUTHOR Esther de Leeuw]]|
|[[Deleforge, Antoine|AUTHOR Antoine Deleforge]]|
|[[Delgado, Héctor|AUTHOR Héctor Delgado]]|
|[[Dellwo, Volker|AUTHOR Volker Dellwo]]|
|[[Delpech, Estelle|AUTHOR Estelle Delpech]]|
|[[Deme, Andrea|AUTHOR Andrea Deme]]|
|[[De Mori, Renato|AUTHOR Renato De Mori]]|
|[[Deng, Jun|AUTHOR Jun Deng]]|
|[[Deng, Shiwen|AUTHOR Shiwen Deng]]|
|[[Deng, Yan|AUTHOR Yan Deng]]|
|[[Denil, Misha|AUTHOR Misha Denil]]|
|[[Denisov, Pavel|AUTHOR Pavel Denisov]]|
|[[Dentel, Laure|AUTHOR Laure Dentel]]|
|[[De Pasquale, Carolina|AUTHOR Carolina De Pasquale]]|
|[[Derinel, Adem|AUTHOR Adem Derinel]]|
|[[Dernoncourt, Franck|AUTHOR Franck Dernoncourt]]|
|[[De Vos, Maarten|AUTHOR Maarten De Vos]]|
|[[de Wet, Febe|AUTHOR Febe de Wet]]|
|[[Dey, Subhadeep|AUTHOR Subhadeep Dey]]|
|[[d’Hereuse, Christian|AUTHOR Christian d’Hereuse]]|
|[[Dhiman, Jitendra Kumar|AUTHOR Jitendra Kumar Dhiman]]|
|[[Diamant, Noa|AUTHOR Noa Diamant]]|
|[[Dietz, Michael|AUTHOR Michael Dietz]]|
|[[Diez, Mireia|AUTHOR Mireia Diez]]|
|[[Di Fabbrizio, Giuseppe|AUTHOR Giuseppe Di Fabbrizio]]|
|[[Di Gangi, Mattia A.|AUTHOR Mattia A. Di Gangi]]|
|[[Dikici, Erinc|AUTHOR Erinc Dikici]]|
|[[Dimas, Paulo|AUTHOR Paulo Dimas]]|
|[[Dimitriadis, Dimitrios|AUTHOR Dimitrios Dimitriadis]]|
|[[Ding, Chenchen|AUTHOR Chenchen Ding]]|
|[[Ding, Guohong|AUTHOR Guohong Ding]]|
|[[Ding, Haisong|AUTHOR Haisong Ding]]|
|[[Ding, Jiande|AUTHOR Jiande Ding]]|
|[[Ding, Shaojin|AUTHOR Shaojin Ding]]|
|[[Ding, Wenhao|AUTHOR Wenhao Ding]]|
|[[Dinh, Tuan|AUTHOR Tuan Dinh]]|
|[[Dinkel, Heinrich|AUTHOR Heinrich Dinkel]]|
|[[Dinkov, Yoan|AUTHOR Yoan Dinkov]]|
|[[Ditter, David|AUTHOR David Ditter]]|
|[[Dodsworth, Robin|AUTHOR Robin Dodsworth]]|
|[[Dolata, Jill|AUTHOR Jill Dolata]]|
|[[Domínguez, Mónica|AUTHOR Mónica Domínguez]]|
|[[Donahue, Chris|AUTHOR Chris Donahue]]|
|[[Dong, Linhao|AUTHOR Linhao Dong]]|
|[[Dong, Mingye|AUTHOR Mingye Dong]]|
|[[Dong, Yuanjie|AUTHOR Yuanjie Dong]]|
|[[Doulaty, Mortaza|AUTHOR Mortaza Doulaty]]|
|[[Douros, Ioannis K.|AUTHOR Ioannis K. Douros]]|
|[[Drechsel, Susanne|AUTHOR Susanne Drechsel]]|
|[[Drioli, Carlo|AUTHOR Carlo Drioli]]|
|[[Dromey, Anita|AUTHOR Anita Dromey]]|
|[[Dromey, Christopher|AUTHOR Christopher Dromey]]|
|[[Drude, Lukas|AUTHOR Lukas Drude]]|
|[[Drugman, Thomas|AUTHOR Thomas Drugman]]|
|[[Du, Dan|AUTHOR Dan Du]]|
|[[Du, Jun|AUTHOR Jun Du]]|
|[[Du, Wenchao|AUTHOR Wenchao Du]]|
|[[Du, Zhihao|AUTHOR Zhihao Du]]|
|[[Dubagunta, S. Pavankumar|AUTHOR S. Pavankumar Dubagunta]]|
|[[Dubey, Akhilesh Kumar|AUTHOR Akhilesh Kumar Dubey]]|
|[[Dubey, Harishchandra|AUTHOR Harishchandra Dubey]]|
|[[Dubnov, Shlomo|AUTHOR Shlomo Dubnov]]|
|[[Dudziak, Łukasz|AUTHOR Łukasz Dudziak]]|
|[[Dufour, Richard|AUTHOR Richard Dufour]]|
|[[Dugan, Sarah|AUTHOR Sarah Dugan]]|
|[[Dugrain, Charlotte|AUTHOR Charlotte Dugrain]]|
|[[Dumpala, Sri Harsha|AUTHOR Sri Harsha Dumpala]]|
|[[Dunbar, Ewan|AUTHOR Ewan Dunbar]]|
|[[Dupoux, Emmanuel|AUTHOR Emmanuel Dupoux]]|
|[[Duran, Daniel|AUTHOR Daniel Duran]]|
|[[Dutoit, Thierry|AUTHOR Thierry Dutoit]]|
|[[Dzodzo, Borislav|AUTHOR Borislav Dzodzo]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Eary, Kathryn J.|AUTHOR Kathryn J. Eary]]|
|[[Ebbers, Janek|AUTHOR Janek Ebbers]]|
|[[Ebersberg, Miriam|AUTHOR Miriam Ebersberg]]|
|[[Edlund, Jens|AUTHOR Jens Edlund]]|
|[[Edraki, Amin|AUTHOR Amin Edraki]]|
|[[Egas López, José Vicente|AUTHOR José Vicente Egas López]]|
|[[Egorow, Olga|AUTHOR Olga Egorow]]|
|[[Eldesouki, Mohamed|AUTHOR Mohamed Eldesouki]]|
|[[El Haddad, Kevin|AUTHOR Kevin El Haddad]]|
|[[Elhilali, Mounya|AUTHOR Mounya Elhilali]]|
|[[Elibol, Oguz H.|AUTHOR Oguz H. Elibol]]|
|[[El-Khamy, Mostafa|AUTHOR Mostafa El-Khamy]]|
|[[Ellinas, Nikolaos|AUTHOR Nikolaos Ellinas]]|
|[[Eloff, Ryan|AUTHOR Ryan Eloff]]|
|[[El Shafey, Laurent|AUTHOR Laurent El Shafey]]|
|[[Elsner, Daniel|AUTHOR Daniel Elsner]]|
|[[El Zarka, Dina|AUTHOR Dina El Zarka]]|
|[[Emanuel, Dotan|AUTHOR Dotan Emanuel]]|
|[[Epps, Julien|AUTHOR Julien Epps]]|
|[[Eriksson, Anders|AUTHOR Anders Eriksson]]|
|[[Ernestus, Mirjam|AUTHOR Mirjam Ernestus]]|
|[[Erzin, Engin|AUTHOR Engin Erzin]]|
|[[Escobar-Grisales, D.|AUTHOR D. Escobar-Grisales]]|
|[[Eshghi, Marziye|AUTHOR Marziye Eshghi]]|
|[[Eshky, Aciel|AUTHOR Aciel Eshky]]|
|[[Eskofier, B.|AUTHOR B. Eskofier]]|
|[[Esposito, Anna|AUTHOR Anna Esposito]]|
|[[Esposito, Antonietta M.|AUTHOR Antonietta M. Esposito]]|
|[[Espy-Wilson, Carol|AUTHOR Carol Espy-Wilson]]|
|[[Estève, Yannick|AUTHOR Yannick Estève]]|
|[[Etemad, Ali|AUTHOR Ali Etemad]]|
|[[Eulitz, Carsten|AUTHOR Carsten Eulitz]]|
|[[Eum, Jungyun|AUTHOR Jungyun Eum]]|
|[[Evanini, Keelan|AUTHOR Keelan Evanini]]|
|[[Evans, Nicholas|AUTHOR Nicholas Evans]]|
|[[Eyigoz, Elif|AUTHOR Elif Eyigoz]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Fainberg, Joachim|AUTHOR Joachim Fainberg]]|
|[[Falk, Tiago H.|AUTHOR Tiago H. Falk]]|
|[[Fallgren, Per|AUTHOR Per Fallgren]]|
|[[Fan, Cunhang|AUTHOR Cunhang Fan]]|
|[[Fan, Lei|AUTHOR Lei Fan]]|
|[[Fan, Ruchao|AUTHOR Ruchao Fan]]|
|[[Fang, Fuming|AUTHOR Fuming Fang]]|
|[[Fang, Shih-Hau|AUTHOR Shih-Hau Fang]]|
|[[Fanioudakis, Eleftherios|AUTHOR Eleftherios Fanioudakis]]|
|[[Farella, Elisabetta|AUTHOR Elisabetta Farella]]|
|[[Farinas, Jér^ome|AUTHOR Jér^ome Farinas]]|
|[[Farooq, Muhammad Umar|AUTHOR Muhammad Umar Farooq]]|
|[[Farrar, David Scott|AUTHOR David Scott Farrar]]|
|[[Farrús, Mireia|AUTHOR Mireia Farrús]]|
|[[Fazel, Amin|AUTHOR Amin Fazel]]|
|[[Federmeier, Kara D.|AUTHOR Kara D. Federmeier]]|
|[[Felblinger, Jacques|AUTHOR Jacques Felblinger]]|
|[[Felker, E.|AUTHOR E. Felker]]|
|[[Fels, Sidney|AUTHOR Sidney Fels]]|
|[[Feng, Gang|AUTHOR Gang Feng]]|
|[[Feng, Junlan|AUTHOR Junlan Feng]]|
|[[Feng, Siyuan|AUTHOR Siyuan Feng]]|
|[[Fenu, Gianni|AUTHOR Gianni Fenu]]|
|[[Ferenc Segedin, Bruno|AUTHOR Bruno Ferenc Segedin]]|
|[[Fernández-Martínez, Fernando|AUTHOR Fernando Fernández-Martínez]]|
|[[Ferragne, Emmanuel|AUTHOR Emmanuel Ferragne]]|
|[[Ferrari, Paul|AUTHOR Paul Ferrari]]|
|[[Ferreiros-López, Javier|AUTHOR Javier Ferreiros-López]]|
|[[Ferrer, Luciana|AUTHOR Luciana Ferrer]]|
|[[Figueiredo, Daniela|AUTHOR Daniela Figueiredo]]|
|[[Figueroa, Carol|AUTHOR Carol Figueroa]]|
|[[Filimonov, Denis|AUTHOR Denis Filimonov]]|
|[[Finkler, Ulrich|AUTHOR Ulrich Finkler]]|
|[[Fischer, Kerstin|AUTHOR Kerstin Fischer]]|
|[[Fischer, Volker|AUTHOR Volker Fischer]]|
|[[Flemotomos, Nikolaos|AUTHOR Nikolaos Flemotomos]]|
|[[Flynn, Michael|AUTHOR Michael Flynn]]|
|[[Fogerty, Daniel|AUTHOR Daniel Fogerty]]|
|[[Foley, Ben|AUTHOR Ben Foley]]|
|[[Foltz, Anouschka|AUTHOR Anouschka Foltz]]|
|[[Fong, Jason|AUTHOR Jason Fong]]|
|[[Fong, Judy Y.|AUTHOR Judy Y. Fong]]|
|[[Foo, Chuan-Sheng|AUTHOR Chuan-Sheng Foo]]|
|[[Fookes, C.|AUTHOR C. Fookes]]|
|[[Ford, Logan|AUTHOR Logan Ford]]|
|[[Foresti, Gian Luca|AUTHOR Gian Luca Foresti]]|
|[[Fougeron, Cécile|AUTHOR Cécile Fougeron]]|
|[[Frahm, Jens|AUTHOR Jens Frahm]]|
|[[Francesca, Gianpiero|AUTHOR Gianpiero Francesca]]|
|[[Frank, Stefan L.|AUTHOR Stefan L. Frank]]|
|[[Fried, Itzhak|AUTHOR Itzhak Fried]]|
|[[Fu, Szu-Wei|AUTHOR Szu-Wei Fu]]|
|[[Fu, Zhen|AUTHOR Zhen Fu]]|
|[[Fuchs, Guillaume|AUTHOR Guillaume Fuchs]]|
|[[Fuchs, Robert|AUTHOR Robert Fuchs]]|
|[[Fuchs, Susanne|AUTHOR Susanne Fuchs]]|
|[[Fuchs, Tzeviya Sylvia|AUTHOR Tzeviya Sylvia Fuchs]]|
|[[Fuegen, Christian|AUTHOR Christian Fuegen]]|
|[[Fujie, Shinya|AUTHOR Shinya Fujie]]|
|[[Fujimoto, Masakiyo|AUTHOR Masakiyo Fujimoto]]|
|[[Fujimura, Hiroshi|AUTHOR Hiroshi Fujimura]]|
|[[Fujita, Yusuke|AUTHOR Yusuke Fujita]]|
|[[Fukuda, Takashi|AUTHOR Takashi Fukuda]]|
|[[Fukunaga, Daisuke|AUTHOR Daisuke Fukunaga]]|
|[[Fukutomi, Takaaki|AUTHOR Takaaki Fukutomi]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[G., Haritha U.|AUTHOR Haritha U. G.]]|
|[[Gabriel, Raefer|AUTHOR Raefer Gabriel]]|
|[[Gadde, Ravi Teja|AUTHOR Ravi Teja Gadde]]|
|[[Gale, Robert|AUTHOR Robert Gale]]|
|[[Gales, Mark J.F.|AUTHOR Mark J.F. Gales]]|
|[[Gallagher, I.|AUTHOR I. Gallagher]]|
|[[Gallardo-Antolín, Ascensión|AUTHOR Ascensión Gallardo-Antolín]]|
|[[Gan, Jun-Wei|AUTHOR Jun-Wei Gan]]|
|[[Ganapathy, Sriram|AUTHOR Sriram Ganapathy]]|
|[[Gangashetty, Suryakanth V.|AUTHOR Suryakanth V. Gangashetty]]|
|[[Gao, Jian|AUTHOR Jian Gao]]|
|[[Gao, Qiang|AUTHOR Qiang Gao]]|
|[[Gao, Xiaoxue|AUTHOR Xiaoxue Gao]]|
|[[Gao, Yingming|AUTHOR Yingming Gao]]|
|[[Gao, Yingying|AUTHOR Yingying Gao]]|
|[[Gao, Yixin|AUTHOR Yixin Gao]]|
|[[Gao, Zhifu|AUTHOR Zhifu Gao]]|
|[[García-Ospina, N.|AUTHOR N. García-Ospina]]|
|[[García-Perera, Leibny Paola|AUTHOR Leibny Paola García-Perera]]|
|[[Garcia-Romero, Daniel|AUTHOR Daniel Garcia-Romero]]|
|[[Garg, Abhinav|AUTHOR Abhinav Garg]]|
|[[Garg, Ashish|AUTHOR Ashish Garg]]|
|[[Garimella, Sri|AUTHOR Sri Garimella]]|
|[[Garner, Philip N.|AUTHOR Philip N. Garner]]|
|[[Garudadri, Harinath|AUTHOR Harinath Garudadri]]|
|[[Gaspers, Judith|AUTHOR Judith Gaspers]]|
|[[Gaur, Yashesh|AUTHOR Yashesh Gaur]]|
|[[Ge, Meng|AUTHOR Meng Ge]]|
|[[Gehrke, Johannes|AUTHOR Johannes Gehrke]]|
|[[Geng, Mengzhe|AUTHOR Mengzhe Geng]]|
|[[Georges, Munir|AUTHOR Munir Georges]]|
|[[Georgeton, Laurianne|AUTHOR Laurianne Georgeton]]|
|[[Georgiou, Efthymios|AUTHOR Efthymios Georgiou]]|
|[[Georgiou, Panayiotis|AUTHOR Panayiotis Georgiou]]|
|[[Gerber, Silvain|AUTHOR Silvain Gerber]]|
|[[Gerkmann, Timo|AUTHOR Timo Gerkmann]]|
|[[Germain, François G.|AUTHOR François G. Germain]]|
|[[Gessinger, Iona|AUTHOR Iona Gessinger]]|
|[[Ghahabi, Omid|AUTHOR Omid Ghahabi]]|
|[[Ghahremani, Pegah|AUTHOR Pegah Ghahremani]]|
|[[Gharsellaoui, Soumaya|AUTHOR Soumaya Gharsellaoui]]|
|[[Ghosh, Prasanta Kumar|AUTHOR Prasanta Kumar Ghosh]]|
|[[Ghosh, Ria|AUTHOR Ria Ghosh]]|
|[[Giacobello, Daniele|AUTHOR Daniele Giacobello]]|
|[[Giakoumis, Dimitrios|AUTHOR Dimitrios Giakoumis]]|
|[[Giannakopoulos, Theodoros|AUTHOR Theodoros Giannakopoulos]]|
|[[Gibson, J.|AUTHOR J. Gibson]]|
|[[Gideon, John|AUTHOR John Gideon]]|
|[[Gili Fivela, Barbara|AUTHOR Barbara Gili Fivela]]|
|[[Gilmartin, Emer|AUTHOR Emer Gilmartin]]|
|[[Giménez, Adrià|AUTHOR Adrià Giménez]]|
|[[Gimeno, Pablo|AUTHOR Pablo Gimeno]]|
|[[Ginsburg, Boris|AUTHOR Boris Ginsburg]]|
|[[Giquel, Mathieu|AUTHOR Mathieu Giquel]]|
|[[Girard, Valérian|AUTHOR Valérian Girard]]|
|[[Gjoreski, Aleksandar|AUTHOR Aleksandar Gjoreski]]|
|[[Gjoreski, Kristijan|AUTHOR Kristijan Gjoreski]]|
|[[Gkinis, Ioannis|AUTHOR Ioannis Gkinis]]|
|[[Glackin, Cornelius|AUTHOR Cornelius Glackin]]|
|[[Glass, James|AUTHOR James Glass]]|
|[[Glembek, Ondřej|AUTHOR Ondřej Glembek]]|
|[[Głowski, Robert|AUTHOR Robert Głowski]]|
|[[Gobl, Christer|AUTHOR Christer Gobl]]|
|[[Goel, Nagendra Kumar|AUTHOR Nagendra Kumar Goel]]|
|[[Goel, Rahul|AUTHOR Rahul Goel]]|
|[[Gök, Alican|AUTHOR Alican Gök]]|
|[[Gokcen, Ajda|AUTHOR Ajda Gokcen]]|
|[[Goldberg, Simon B.|AUTHOR Simon B. Goldberg]]|
|[[Goldrick, Matthew|AUTHOR Matthew Goldrick]]|
|[[Golik, Pavel|AUTHOR Pavel Golik]]|
|[[Gollwitzer, S.|AUTHOR S. Gollwitzer]]|
|[[Golshtein, Eduard|AUTHOR Eduard Golshtein]]|
|[[Gomez, Angel M.|AUTHOR Angel M. Gomez]]|
|[[Gomez-Alanis, Alejandro|AUTHOR Alejandro Gomez-Alanis]]|
|[[Gong, Yifan|AUTHOR Yifan Gong]]|
|[[Gong, Yuan|AUTHOR Yuan Gong]]|
|[[Gonzalez, Jose A.|AUTHOR Jose A. Gonzalez]]|
|[[Gonzalvo, Xavi|AUTHOR Xavi Gonzalvo]]|
|[[Gopalakrishnan, Karthik|AUTHOR Karthik Gopalakrishnan]]|
|[[Gope, Dipanjan|AUTHOR Dipanjan Gope]]|
|[[Gopee, Naassih|AUTHOR Naassih Gopee]]|
|[[Gorlanov, Artem|AUTHOR Artem Gorlanov]]|
|[[Gorman, Kyle|AUTHOR Kyle Gorman]]|
|[[Gorrostieta, Cristina|AUTHOR Cristina Gorrostieta]]|
|[[Goswami, Nabarun|AUTHOR Nabarun Goswami]]|
|[[Gosztolya, Gábor|AUTHOR Gábor Gosztolya]]|
|[[Goto, Tsubasa|AUTHOR Tsubasa Goto]]|
|[[Gottardi, Anna|AUTHOR Anna Gottardi]]|
|[[Gottimukkala, Sravani|AUTHOR Sravani Gottimukkala]]|
|[[Govender, Avashna|AUTHOR Avashna Govender]]|
|[[Gowda, Dhananjaya|AUTHOR Dhananjaya Gowda]]|
|[[Goyal, Mohit|AUTHOR Mohit Goyal]]|
|[[Grabner, Helmut|AUTHOR Helmut Grabner]]|
|[[Gráczi, Tekla Etelka|AUTHOR Tekla Etelka Gráczi]]|
|[[Graham, Calbert|AUTHOR Calbert Graham]]|
|[[Gravano, Agustín|AUTHOR Agustín Gravano]]|
|[[Green, Jordan R.|AUTHOR Jordan R. Green]]|
|[[Greenberg, Craig|AUTHOR Craig Greenberg]]|
|[[Griol, David|AUTHOR David Griol]]|
|[[Grondin, François|AUTHOR François Grondin]]|
|[[Grósz, Tamás|AUTHOR Tamás Grósz]]|
|[[Grůber, Martin|AUTHOR Martin Grůber]]|
|[[Gruenstein, Alexander|AUTHOR Alexander Gruenstein]]|
|[[Gu, Rongzhi|AUTHOR Rongzhi Gu]]|
|[[Gu, Wentao|AUTHOR Wentao Gu]]|
|[[Gu, Zhaoyi|AUTHOR Zhaoyi Gu]]|
|[[Guasch, Oriol|AUTHOR Oriol Guasch]]|
|[[Gubian, Michele|AUTHOR Michele Gubian]]|
|[[Guðnason, Jón|AUTHOR Jón Guðnason]]|
|[[Guha, Satarupa|AUTHOR Satarupa Guha]]|
|[[Guitard-Ivent, Fanny|AUTHOR Fanny Guitard-Ivent]]|
|[[Gundogdu, Batuhan|AUTHOR Batuhan Gundogdu]]|
|[[Gunendradasan, Tharshini|AUTHOR Tharshini Gunendradasan]]|
|[[Guo, Haohan|AUTHOR Haohan Guo]]|
|[[Guo, Jiaqi|AUTHOR Jiaqi Guo]]|
|[[Guo, Ling|AUTHOR Ling Guo]]|
|[[Guo, Pengcheng|AUTHOR Pengcheng Guo]]|
|[[Guo, Wu|AUTHOR Wu Guo]]|
|[[Guo, Ya’nan|AUTHOR Ya’nan Guo]]|
|[[Guo, Yi|AUTHOR Yi Guo]]|
|[[Gupta, Chitralekha|AUTHOR Chitralekha Gupta]]|
|[[Gupta, Deepika|AUTHOR Deepika Gupta]]|
|[[Gupta, Rahul|AUTHOR Rahul Gupta]]|
|[[Gupta, Vishwa|AUTHOR Vishwa Gupta]]|
|[[Gurevich, Tanya|AUTHOR Tanya Gurevich]]|
|[[Gurunath Shivakumar, Prashanth|AUTHOR Prashanth Gurunath Shivakumar]]|
|[[Gusafsson, Joakim|AUTHOR Joakim Gusafsson]]|
|[[Gusev, Aleksei|AUTHOR Aleksei Gusev]]|
|[[Gustafson, Joakim|AUTHOR Joakim Gustafson]]|
|[[Gustavsson, Lisa|AUTHOR Lisa Gustavsson]]|
|[[Gutierrez-Osuna, Ricardo|AUTHOR Ricardo Gutierrez-Osuna]]|
|[[Gutkin, Alexander|AUTHOR Alexander Gutkin]]|
|[[Gutz, Sarah E.|AUTHOR Sarah E. Gutz]]|
|[[Gyawali, Binod|AUTHOR Binod Gyawali]]|
|[[Gyires-Tóth, Bálint|AUTHOR Bálint Gyires-Tóth]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Ha, Sungjoo|AUTHOR Sungjoo Ha]]|
|[[Haake, Kristin|AUTHOR Kristin Haake]]|
|[[Habash, Nizar|AUTHOR Nizar Habash]]|
|[[Haderlein, Tino|AUTHOR Tino Haderlein]]|
|[[Haeb-Umbach, Reinhold|AUTHOR Reinhold Haeb-Umbach]]|
|[[Hagihara, Yuya|AUTHOR Yuya Hagihara]]|
|[[Haider, Fasih|AUTHOR Fasih Haider]]|
|[[Hain, Thomas|AUTHOR Thomas Hain]]|
|[[Hajavi, Amirhossein|AUTHOR Amirhossein Hajavi]]|
|[[Hakkani-Tür, Dilek|AUTHOR Dilek Hakkani-Tür]]|
|[[Hall, Kathleen Currie|AUTHOR Kathleen Currie Hall]]|
|[[Hamanaka, Sayaka|AUTHOR Sayaka Hamanaka]]|
|[[Hamann, Silke|AUTHOR Silke Hamann]]|
|[[Hamilton, William L.|AUTHOR William L. Hamilton]]|
|[[Hamzaoui, Raouf|AUTHOR Raouf Hamzaoui]]|
|[[Han, David K.|AUTHOR David K. Han]]|
|[[Han, Icksang|AUTHOR Icksang Han]]|
|[[Han, Jiqing|AUTHOR Jiqing Han]]|
|[[Han, Kun|AUTHOR Kun Han]]|
|[[Han, Kyu J.|AUTHOR Kyu J. Han]]|
|[[Hannemann, Mirko|AUTHOR Mirko Hannemann]]|
|[[Hannun, Awni|AUTHOR Awni Hannun]]|
|[[Hansen, John H.L.|AUTHOR John H.L. Hansen]]|
|[[Hantke, Simone|AUTHOR Simone Hantke]]|
|[[Hao, Jing|AUTHOR Jing Hao]]|
|[[Hao, Xiang|AUTHOR Xiang Hao]]|
|[[Hara, Kohei|AUTHOR Kohei Hara]]|
|[[Harada, Shouji|AUTHOR Shouji Harada]]|
|[[Harati, Amir|AUTHOR Amir Harati]]|
|[[Härmä, Aki|AUTHOR Aki Härmä]]|
|[[Harrington, Jonathan|AUTHOR Jonathan Harrington]]|
|[[harris, fred|AUTHOR fred harris]]|
|[[Harwath, David|AUTHOR David Harwath]]|
|[[Hasegawa-Johnson, Mark A.|AUTHOR Mark A. Hasegawa-Johnson]]|
|[[Hassidim, Avinatan|AUTHOR Avinatan Hassidim]]|
|[[Hauptman, Yermiyahu|AUTHOR Yermiyahu Hauptman]]|
|[[Hautamäki, Ville|AUTHOR Ville Hautamäki]]|
|[[Hayakawa, Shoji|AUTHOR Shoji Hayakawa]]|
|[[Hayashi, Tomoki|AUTHOR Tomoki Hayashi]]|
|[[Hazan, Valerie|AUTHOR Valerie Hazan]]|
|[[He, Ke-Xin|AUTHOR Ke-Xin He]]|
|[[He, Lei|AUTHOR Lei He]]|
|[[He, Liang|AUTHOR Liang He]]|
|[[He, Mutian|AUTHOR Mutian He]]|
|[[He, Xiaodong|AUTHOR Xiaodong He]]|
|[[He, Yanzhang|AUTHOR Yanzhang He]]|
|[[He, Youjue|AUTHOR Youjue He]]|
|[[He, Zhongjun|AUTHOR Zhongjun He]]|
|[[Heba, Abdelwahab|AUTHOR Abdelwahab Heba]]|
|[[Hedayatnia, Behnam|AUTHOR Behnam Hedayatnia]]|
|[[Hegde, Monica|AUTHOR Monica Hegde]]|
|[[Hegde, Pradyoth|AUTHOR Pradyoth Hegde]]|
|[[Hei, To Ka|AUTHOR To Ka Hei]]|
|[[Heitkaemper, Jens|AUTHOR Jens Heitkaemper]]|
|[[Heldner, Mattias|AUTHOR Mattias Heldner]]|
|[[Helgadóttir, Inga R.|AUTHOR Inga R. Helgadóttir]]|
|[[Henter, Gustav Eje|AUTHOR Gustav Eje Henter]]|
|[[Heo, Hee-Soo|AUTHOR Hee-Soo Heo]]|
|[[Hermansky, Hynek|AUTHOR Hynek Hermansky]]|
|[[Hermes, Anne|AUTHOR Anne Hermes]]|
|[[Hernaez, Inma|AUTHOR Inma Hernaez]]|
|[[Hernandez, Nicolas|AUTHOR Nicolas Hernandez]]|
|[[Hernandez-Cordero, Jaime|AUTHOR Jaime Hernandez-Cordero]]|
|[[Hernando, Javier|AUTHOR Javier Hernando]]|
|[[Hershey, John R.|AUTHOR John R. Hershey]]|
|[[Heylen, Dirk|AUTHOR Dirk Heylen]]|
|[[Heymann, Jahn|AUTHOR Jahn Heymann]]|
|[[Higuchi, Yosuke|AUTHOR Yosuke Higuchi]]|
|[[Hillis, Steven|AUTHOR Steven Hillis]]|
|[[Hinger, Barbara|AUTHOR Barbara Hinger]]|
|[[Hinthorn, William|AUTHOR William Hinthorn]]|
|[[Hirschberg, Julia|AUTHOR Julia Hirschberg]]|
|[[Hirschfeld, Diane|AUTHOR Diane Hirschfeld]]|
|[[Hodari, Zack|AUTHOR Zack Hodari]]|
|[[Hodoshima, Nao|AUTHOR Nao Hodoshima]]|
|[[Hodson, Jeff|AUTHOR Jeff Hodson]]|
|[[Hofer, Gregor|AUTHOR Gregor Hofer]]|
|[[Hoffman, Matthew W.|AUTHOR Matthew W. Hoffman]]|
|[[Hoffmeister, Björn|AUTHOR Björn Hoffmeister]]|
|[[Hogan, Kevin|AUTHOR Kevin Hogan]]|
|[[Hojo, Nobukatsu|AUTHOR Nobukatsu Hojo]]|
|[[Holt, Millie|AUTHOR Millie Holt]]|
|[[Holube, Inga|AUTHOR Inga Holube]]|
|[[Honda, Hiroshi|AUTHOR Hiroshi Honda]]|
|[[Honda, Kiyoshi|AUTHOR Kiyoshi Honda]]|
|[[Hong, Hui-Ting|AUTHOR Hui-Ting Hong]]|
|[[Hong, Qingyang|AUTHOR Qingyang Hong]]|
|[[Hoole, Phil|AUTHOR Phil Hoole]]|
|[[Hoory, Ron|AUTHOR Ron Hoory]]|
|[[Hori, Chiori|AUTHOR Chiori Hori]]|
|[[Hori, Takaaki|AUTHOR Takaaki Hori]]|
|[[Hori, Tatsuro|AUTHOR Tatsuro Hori]]|
|[[Horiguchi, Shota|AUTHOR Shota Horiguchi]]|
|[[Hou, Leijing|AUTHOR Leijing Hou]]|
|[[Hough, Julian|AUTHOR Julian Hough]]|
|[[Hrúz, Marek|AUTHOR Marek Hrúz]]|
|[[Hsieh, Han-Chi|AUTHOR Han-Chi Hsieh]]|
|[[Hsieh, Hsi-Wei|AUTHOR Hsi-Wei Hsieh]]|
|[[Hsu, Aliyah R.|AUTHOR Aliyah R. Hsu]]|
|[[Hsu, I-Hung|AUTHOR I-Hung Hsu]]|
|[[Hsu, Po-chun|AUTHOR Po-chun Hsu]]|
|[[Hsu, Wei-Ning|AUTHOR Wei-Ning Hsu]]|
|[[Hsu, Yi-Te|AUTHOR Yi-Te Hsu]]|
|[[Hsu, Yu-Yin|AUTHOR Yu-Yin Hsu]]|
|[[Hu, Bingyan|AUTHOR Bingyan Hu]]|
|[[Hu, Bojie|AUTHOR Bojie Hu]]|
|[[Hu, Fang|AUTHOR Fang Hu]]|
|[[Hu, Ke|AUTHOR Ke Hu]]|
|[[Hu, Pengfei|AUTHOR Pengfei Hu]]|
|[[Hu, Shoukang|AUTHOR Shoukang Hu]]|
|[[Huang, Che-Wei|AUTHOR Che-Wei Huang]]|
|[[Huang, Chih-Hsiang|AUTHOR Chih-Hsiang Huang]]|
|[[Huang, Fei|AUTHOR Fei Huang]]|
|[[Huang, Feng|AUTHOR Feng Huang]]|
|[[Huang, Jian|AUTHOR Jian Huang]]|
|[[Huang, Jing|AUTHOR Jing Huang]]|
|[[Huang, Jonathan|AUTHOR Jonathan Huang]]|
|[[Huang, Peijie|AUTHOR Peijie Huang]]|
|[[Huang, Peisong|AUTHOR Peisong Huang]]|
|[[Huang, Pin-Tuan|AUTHOR Pin-Tuan Huang]]|
|[[Huang, Qiang|AUTHOR Qiang Huang]]|
|[[Huang, Shan|AUTHOR Shan Huang]]|
|[[Huang, Shen|AUTHOR Shen Huang]]|
|[[Huang, Wen-Chin|AUTHOR Wen-Chin Huang]]|
|[[Huang, Xuedong|AUTHOR Xuedong Huang]]|
|[[Huang, Yinghui|AUTHOR Yinghui Huang]]|
|[[Huang, Yiteng|AUTHOR Yiteng Huang]]|
|[[Huang, Yu-Lin|AUTHOR Yu-Lin Huang]]|
|[[Huang, Zhiqiang|AUTHOR Zhiqiang Huang]]|
|[[Huang, Zhiying|AUTHOR Zhiying Huang]]|
|[[Huber, Jacob|AUTHOR Jacob Huber]]|
|[[Huber, Tobias|AUTHOR Tobias Huber]]|
|[[Huenerfauth, Matt|AUTHOR Matt Huenerfauth]]|
|[[Hughes, Cían|AUTHOR Cían Hughes]]|
|[[Hui, J.|AUTHOR J. Hui]]|
|[[Hui, Like|AUTHOR Like Hui]]|
|[[Hung, Jeih-weih|AUTHOR Jeih-weih Hung]]|
|[[Hunt, Melvyn|AUTHOR Melvyn Hunt]]|
|[[Huo, Qiang|AUTHOR Qiang Huo]]|
|[[Hussain, Nusrah|AUTHOR Nusrah Hussain]]|
|[[Hussain, Sarmad|AUTHOR Sarmad Hussain]]|
|[[Hussain, Shehzeen|AUTHOR Shehzeen Hussain]]|
|[[Hwang, Kyuwoong|AUTHOR Kyuwoong Hwang]]|
|[[Hwang, Mei-Yuh|AUTHOR Mei-Yuh Hwang]]|
|[[Hwang, Min-Jae|AUTHOR Min-Jae Hwang]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Ibrahim, Omnia|AUTHOR Omnia Ibrahim]]|
|[[Ignoto, Patrick|AUTHOR Patrick Ignoto]]|
|[[Ijima, Yusuke|AUTHOR Yusuke Ijima]]|
|[[Illa, Aravind|AUTHOR Aravind Illa]]|
|[[Illium, Steffen|AUTHOR Steffen Illium]]|
|[[Imel, Zac E.|AUTHOR Zac E. Imel]]|
|[[Imran, Ali Shariq|AUTHOR Ali Shariq Imran]]|
|[[İnan, Berkay|AUTHOR Berkay İnan]]|
|[[India, Miquel|AUTHOR Miquel India]]|
|[[Inoue, Koji|AUTHOR Koji Inoue]]|
|[[Iqbal, Zikra|AUTHOR Zikra Iqbal]]|
|[[Iranzo-Sánchez, Javier|AUTHOR Javier Iranzo-Sánchez]]|
|[[Irie, Kazuki|AUTHOR Kazuki Irie]]|
|[[Irino, Toshio|AUTHOR Toshio Irino]]|
|[[Isaieva, Karyna|AUTHOR Karyna Isaieva]]|
|[[Ishi, Carlos|AUTHOR Carlos Ishi]]|
|[[Ishiguro, Hiroshi|AUTHOR Hiroshi Ishiguro]]|
|[[Iturralde Zurita, Alvaro Martin|AUTHOR Alvaro Martin Iturralde Zurita]]|
|[[Ivanov, Artem|AUTHOR Artem Ivanov]]|
|[[Iwata, Kenji|AUTHOR Kenji Iwata]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Jackson, Stephanie|AUTHOR Stephanie Jackson]]|
|[[Jaggi, Martin|AUTHOR Martin Jaggi]]|
|[[Jain, Abhinav|AUTHOR Abhinav Jain]]|
|[[Jain, Mahaveer|AUTHOR Mahaveer Jain]]|
|[[Jaiswal, Ayush|AUTHOR Ayush Jaiswal]]|
|[[Jaiswal, Mimansa|AUTHOR Mimansa Jaiswal]]|
|[[Jalal, Md. Asif|AUTHOR Md. Asif Jalal]]|
|[[Jamakovic, Nisad|AUTHOR Nisad Jamakovic]]|
|[[Janbakhshi, Parvaneh|AUTHOR Parvaneh Janbakhshi]]|
|[[Jankowski, Charles|AUTHOR Charles Jankowski]]|
|[[Jannetts, Stephen|AUTHOR Stephen Jannetts]]|
|[[Jansche, Martin|AUTHOR Martin Jansche]]|
|[[Janse, Esther|AUTHOR Esther Janse]]|
|[[Jansen, Michel-Pierre|AUTHOR Michel-Pierre Jansen]]|
|[[Janson, Johanna|AUTHOR Johanna Janson]]|
|[[Jasserand, Catherine|AUTHOR Catherine Jasserand]]|
|[[Jati, Arindam|AUTHOR Arindam Jati]]|
|[[Jatteau, Adèle|AUTHOR Adèle Jatteau]]|
|[[Jeancolas, Laetitia|AUTHOR Laetitia Jeancolas]]|
|[[Jelil, Sarfaraz|AUTHOR Sarfaraz Jelil]]|
|[[Jenne, Sabrina|AUTHOR Sabrina Jenne]]|
|[[Jensen, Jesper|AUTHOR Jesper Jensen]]|
|[[Jeon, Chang-Bin|AUTHOR Chang-Bin Jeon]]|
|[[Jia, Chen|AUTHOR Chen Jia]]|
|[[Jia, Jia|AUTHOR Jia Jia]]|
|[[Jia, Ye|AUTHOR Ye Jia]]|
|[[Jia, Yuan|AUTHOR Yuan Jia]]|
|[[Jian, Meng|AUTHOR Meng Jian]]|
|[[Jiang, Di|AUTHOR Di Jiang]]|
|[[Jiang, Qing-Yuan|AUTHOR Qing-Yuan Jiang]]|
|[[Jiang, Tao|AUTHOR Tao Jiang]]|
|[[Jiang, Yiheng|AUTHOR Yiheng Jiang]]|
|[[Jin, Hongxia|AUTHOR Hongxia Jin]]|
|[[Jin, Qin|AUTHOR Qin Jin]]|
|[[Jin, Zhenghao|AUTHOR Zhenghao Jin]]|
|[[Joglekar, Aditya|AUTHOR Aditya Joglekar]]|
|[[Johnson, Melvin|AUTHOR Melvin Johnson]]|
|[[Johny, Cibu|AUTHOR Cibu Johny]]|
|[[Jonsson, Ing-Marie|AUTHOR Ing-Marie Jonsson]]|
|[[Jorge, Javier|AUTHOR Javier Jorge]]|
|[[Jorschick, Annett|AUTHOR Annett Jorschick]]|
|[[Joseph, Arun A.|AUTHOR Arun A. Joseph]]|
|[[Joshi, Sonal|AUTHOR Sonal Joshi]]|
|[[Ju, Qi|AUTHOR Qi Ju]]|
|[[Juan, Alfons|AUTHOR Alfons Juan]]|
|[[Jung, Jee-weon|AUTHOR Jee-weon Jung]]|
|[[Jung, Youngmoon|AUTHOR Youngmoon Jung]]|
|[[Jurdak, Raja|AUTHOR Raja Jurdak]]|
|[[Juvela, Lauri|AUTHOR Lauri Juvela]]|
|[[Jůzová, Markéta|AUTHOR Markéta Jůzová]]|
|[[Jyothi, Preethi|AUTHOR Preethi Jyothi]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[K., Mohamed Ismail Yasar Arafath|AUTHOR Mohamed Ismail Yasar Arafath K.]]|
|[[Kaburagi, Tokihiko|AUTHOR Tokihiko Kaburagi]]|
|[[Kachkovskaia, Tatiana|AUTHOR Tatiana Kachkovskaia]]|
|[[Kadiri, Sudarsana Reddy|AUTHOR Sudarsana Reddy Kadiri]]|
|[[Kafle, Sushant|AUTHOR Sushant Kafle]]|
|[[Kageyama, Yuichi|AUTHOR Yuichi Kageyama]]|
|[[Kain, Alexander|AUTHOR Alexander Kain]]|
|[[Kakouros, Sofoklis|AUTHOR Sofoklis Kakouros]]|
|[[Kalinli, Ozlem|AUTHOR Ozlem Kalinli]]|
|[[Kalita, Sishir|AUTHOR Sishir Kalita]]|
|[[Kaltenbacher, Manfred|AUTHOR Manfred Kaltenbacher]]|
|[[Kameoka, Hirokazu|AUTHOR Hirokazu Kameoka]]|
|[[Kaminishi, Ryota|AUTHOR Ryota Kaminishi]]|
|[[Kamiyama, Hosana|AUTHOR Hosana Kamiyama]]|
|[[Kamper, Herman|AUTHOR Herman Kamper]]|
|[[Kamyshev, Pasha|AUTHOR Pasha Kamyshev]]|
|[[Kanagasundaram, A.|AUTHOR A. Kanagasundaram]]|
|[[Kanda, Naoyuki|AUTHOR Naoyuki Kanda]]|
|[[Kane, Benjamin|AUTHOR Benjamin Kane]]|
|[[Kane, John|AUTHOR John Kane]]|
|[[Kaneko, Takuhiro|AUTHOR Takuhiro Kaneko]]|
|[[Kanervisto, Anssi|AUTHOR Anssi Kanervisto]]|
|[[Kang, Hong-Goo|AUTHOR Hong-Goo Kang]]|
|[[Kang, Jian|AUTHOR Jian Kang]]|
|[[Kang, Jintao|AUTHOR Jintao Kang]]|
|[[Kang, Shiyin|AUTHOR Shiyin Kang]]|
|[[Kang, Woo Hyun|AUTHOR Woo Hyun Kang]]|
|[[Kannan, Anjuli|AUTHOR Anjuli Kannan]]|
|[[Kanu, John D.|AUTHOR John D. Kanu]]|
|[[Kanvesky, Dimitri|AUTHOR Dimitri Kanvesky]]|
|[[Kao, Chieh-Chi|AUTHOR Chieh-Chi Kao]]|
|[[Kao, Justine T.|AUTHOR Justine T. Kao]]|
|[[Karadayi, Julien|AUTHOR Julien Karadayi]]|
|[[Karafiát, Martin|AUTHOR Martin Karafiát]]|
|[[Karaulov, Ievgen|AUTHOR Ievgen Karaulov]]|
|[[Karbar, Raj|AUTHOR Raj Karbar]]|
|[[Karhila, Reima|AUTHOR Reima Karhila]]|
|[[Karita, Shigeki|AUTHOR Shigeki Karita]]|
|[[Kashiwagi, Yosuke|AUTHOR Yosuke Kashiwagi]]|
|[[Kaszás, Valér|AUTHOR Valér Kaszás]]|
|[[Katsamanis, Athanasios|AUTHOR Athanasios Katsamanis]]|
|[[Katsos, N.|AUTHOR N. Katsos]]|
|[[Kaur, Preeti|AUTHOR Preeti Kaur]]|
|[[Kaushik, Lakshmish|AUTHOR Lakshmish Kaushik]]|
|[[Kawahara, Hideki|AUTHOR Hideki Kawahara]]|
|[[Kawahara, Tatsuya|AUTHOR Tatsuya Kawahara]]|
|[[Kawai, Hisashi|AUTHOR Hisashi Kawai]]|
|[[Kayi, Abdullah|AUTHOR Abdullah Kayi]]|
|[[Kelly, Niamh E.|AUTHOR Niamh E. Kelly]]|
|[[Kelterer, Anneliese|AUTHOR Anneliese Kelterer]]|
|[[Kenny, Patrick|AUTHOR Patrick Kenny]]|
|[[Kenyon, Robert V.|AUTHOR Robert V. Kenyon]]|
|[[Keren, Gil|AUTHOR Gil Keren]]|
|[[Kersner, Martin|AUTHOR Martin Kersner]]|
|[[Keshet, Joseph|AUTHOR Joseph Keshet]]|
|[[Keshishian, Lara|AUTHOR Lara Keshishian]]|
|[[Keskin, Gokce|AUTHOR Gokce Keskin]]|
|[[Kessler, Lucas|AUTHOR Lucas Kessler]]|
|[[Khalifa, Salam|AUTHOR Salam Khalifa]]|
|[[Khalifa, Sara|AUTHOR Sara Khalifa]]|
|[[Khan, Umair|AUTHOR Umair Khan]]|
|[[Kharaman, Mariya|AUTHOR Mariya Kharaman]]|
|[[Khare, Shreya|AUTHOR Shreya Khare]]|
|[[Khassanov, Yerbolat|AUTHOR Yerbolat Khassanov]]|
|[[Khokhlov, Yuri|AUTHOR Yuri Khokhlov]]|
|[[Khoo, Kevin|AUTHOR Kevin Khoo]]|
|[[Khorram, Soheil|AUTHOR Soheil Khorram]]|
|[[Khorrami, Khazar|AUTHOR Khazar Khorrami]]|
|[[Khoury, Elie|AUTHOR Elie Khoury]]|
|[[Khudanpur, Sanjeev|AUTHOR Sanjeev Khudanpur]]|
|[[Kida, Yusuke|AUTHOR Yusuke Kida]]|
|[[Kilgour, Kevin|AUTHOR Kevin Kilgour]]|
|[[Kim, Beomsu|AUTHOR Beomsu Kim]]|
|[[Kim, Changmin|AUTHOR Changmin Kim]]|
|[[Kim, Chanwoo|AUTHOR Chanwoo Kim]]|
|[[Kim, Dongyoung|AUTHOR Dongyoung Kim]]|
|[[Kim, Heejin|AUTHOR Heejin Kim]]|
|[[Kim, Hoirin|AUTHOR Hoirin Kim]]|
|[[Kim, Hong Kook|AUTHOR Hong Kook Kim]]|
|[[Kim, Hyung Yong|AUTHOR Hyung Yong Kim]]|
|[[Kim, Jae-Min|AUTHOR Jae-Min Kim]]|
|[[Kim, Jeesun|AUTHOR Jeesun Kim]]|
|[[Kim, Jeunghun|AUTHOR Jeunghun Kim]]|
|[[Kim, Ji-Hwan|AUTHOR Ji-Hwan Kim]]|
|[[Kim, Ju-ho|AUTHOR Ju-ho Kim]]|
|[[Kim, Kwangyoun|AUTHOR Kwangyoun Kim]]|
|[[Kim, Minje|AUTHOR Minje Kim]]|
|[[Kim, Nam Soo|AUTHOR Nam Soo Kim]]|
|[[Kim, Sang-Hun|AUTHOR Sang-Hun Kim]]|
|[[Kim, Seong Ju|AUTHOR Seong Ju Kim]]|
|[[Kim, Suyoun|AUTHOR Suyoun Kim]]|
|[[Kim, Tae-Ho|AUTHOR Tae-Ho Kim]]|
|[[Kim, Taehwan|AUTHOR Taehwan Kim]]|
|[[Kim, Taesu|AUTHOR Taesu Kim]]|
|[[Kim, Younggwan|AUTHOR Younggwan Kim]]|
|[[Kim, Youngsoo|AUTHOR Youngsoo Kim]]|
|[[Kindt, Els|AUTHOR Els Kindt]]|
|[[King, Hannah|AUTHOR Hannah King]]|
|[[King, Simon|AUTHOR Simon King]]|
|[[Kingsbury, Brian|AUTHOR Brian Kingsbury]]|
|[[Kinnunen, Tomi H.|AUTHOR Tomi H. Kinnunen]]|
|[[Kinoshita, Keisuke|AUTHOR Keisuke Kinoshita]]|
|[[Kirchhoff, Katrin|AUTHOR Katrin Kirchhoff]]|
|[[Kisler, Thomas|AUTHOR Thomas Kisler]]|
|[[Kiss, Gábor|AUTHOR Gábor Kiss]]|
|[[Kitamura, Tatsuya|AUTHOR Tatsuya Kitamura]]|
|[[Kitaoka, Norihide|AUTHOR Norihide Kitaoka]]|
|[[Kitza, Markus|AUTHOR Markus Kitza]]|
|[[Kiya, Hitoshi|AUTHOR Hitoshi Kiya]]|
|[[Kjaran, Róbert|AUTHOR Róbert Kjaran]]|
|[[Kleijn, W. Bastiaan|AUTHOR W. Bastiaan Kleijn]]|
|[[Klein, Christopher|AUTHOR Christopher Klein]]|
|[[Klein, Eugen|AUTHOR Eugen Klein]]|
|[[Kleinlein, Ricardo|AUTHOR Ricardo Kleinlein]]|
|[[Klejch, Ondřej|AUTHOR Ondřej Klejch]]|
|[[Klimkov, Viacheslav|AUTHOR Viacheslav Klimkov]]|
|[[Klingler, Nicola|AUTHOR Nicola Klingler]]|
|[[Klumpp, Philipp|AUTHOR Philipp Klumpp]]|
|[[Knill, Kate M.|AUTHOR Kate M. Knill]]|
|[[Ko, Tom|AUTHOR Tom Ko]]|
|[[Kobashikawa, Satoshi|AUTHOR Satoshi Kobashikawa]]|
|[[Kobayashi, Kazuhiro|AUTHOR Kazuhiro Kobayashi]]|
|[[Kobayashi, Takao|AUTHOR Takao Kobayashi]]|
|[[Kobayashi, Tetsunori|AUTHOR Tetsunori Kobayashi]]|
|[[Kobayashi, Yuka|AUTHOR Yuka Kobayashi]]|
|[[Koch, Philipp|AUTHOR Philipp Koch]]|
|[[Kocharov, Daniil|AUTHOR Daniil Kocharov]]|
|[[Kochetov, Alexei|AUTHOR Alexei Kochetov]]|
|[[Kodrasi, Ina|AUTHOR Ina Kodrasi]]|
|[[Koemans, Jiska|AUTHOR Jiska Koemans]]|
|[[Koh, Jia Xin|AUTHOR Jia Xin Koh]]|
|[[Koishida, Kazuhito|AUTHOR Kazuhito Koishida]]|
|[[Koltun, Vladlen|AUTHOR Vladlen Koltun]]|
|[[Komatsu, Tatsuya|AUTHOR Tatsuya Komatsu]]|
|[[Kong, Anthony Pak Hin|AUTHOR Anthony Pak Hin Kong]]|
|[[Kons, Zvi|AUTHOR Zvi Kons]]|
|[[Koo, Junghyun|AUTHOR Junghyun Koo]]|
|[[Koolagudi, Shashidhar G.|AUTHOR Shashidhar G. Koolagudi]]|
|[[Kopparapu, Sunil Kumar|AUTHOR Sunil Kumar Kopparapu]]|
|[[Koppelmann, Timm|AUTHOR Timm Koppelmann]]|
|[[Korenevskaya, Mariya|AUTHOR Mariya Korenevskaya]]|
|[[Koriyama, Tomoki|AUTHOR Tomoki Koriyama]]|
|[[Korpusik, Mandy|AUTHOR Mandy Korpusik]]|
|[[Korus, Paweł|AUTHOR Paweł Korus]]|
|[[Korzekwa, Daniel|AUTHOR Daniel Korzekwa]]|
|[[Kose, Oyku Deniz|AUTHOR Oyku Deniz Kose]]|
|[[Koshinaka, Takafumi|AUTHOR Takafumi Koshinaka]]|
|[[Kośmider, Michał|AUTHOR Michał Kośmider]]|
|[[Kostek, Bozena|AUTHOR Bozena Kostek]]|
|[[Kostka, Bartosz|AUTHOR Bartosz Kostka]]|
|[[Kothapally, Vinay|AUTHOR Vinay Kothapally]]|
|[[Koumparoulis, Alexandros|AUTHOR Alexandros Koumparoulis]]|
|[[Koushanfar, Farinaz|AUTHOR Farinaz Koushanfar]]|
|[[Kovács, György|AUTHOR György Kovács]]|
|[[Koychev, Ivan|AUTHOR Ivan Koychev]]|
|[[Kozlov, Alexandr|AUTHOR Alexandr Kozlov]]|
|[[Kracun, Aleks|AUTHOR Aleks Kracun]]|
|[[Krajewski, Jarek|AUTHOR Jarek Krajewski]]|
|[[Král, Pavel|AUTHOR Pavel Král]]|
|[[Kraljevski, Ivan|AUTHOR Ivan Kraljevski]]|
|[[Krishnamurthy, Karthik|AUTHOR Karthik Krishnamurthy]]|
|[[Krishnan, Sridhar|AUTHOR Sridhar Krishnan]]|
|[[Kuang, Jilong|AUTHOR Jilong Kuang]]|
|[[Kubasova, Uliyana|AUTHOR Uliyana Kubasova]]|
|[[Kuchaiev, Oleksii|AUTHOR Oleksii Kuchaiev]]|
|[[Küderle, A.|AUTHOR A. Küderle]]|
|[[Kukanov, Ivan|AUTHOR Ivan Kukanov]]|
|[[Kumakura, Toshiyuki|AUTHOR Toshiyuki Kumakura]]|
|[[Kumar, Anushree Prasanna|AUTHOR Anushree Prasanna Kumar]]|
|[[Kumar, Manoj|AUTHOR Manoj Kumar]]|
|[[Kumar, Mehul|AUTHOR Mehul Kumar]]|
|[[Kumar, Naveen|AUTHOR Naveen Kumar]]|
|[[Kumar, Shashi|AUTHOR Shashi Kumar]]|
|[[Kumar, Yaman|AUTHOR Yaman Kumar]]|
|[[Kunešová, Marie|AUTHOR Marie Kunešová]]|
|[[Kung, David|AUTHOR David Kung]]|
|[[Kuo, F.-Y.|AUTHOR F.-Y. Kuo]]|
|[[Kuo, Tei-Wei|AUTHOR Tei-Wei Kuo]]|
|[[Kurata, Gakuto|AUTHOR Gakuto Kurata]]|
|[[Kurimo, Mikko|AUTHOR Mikko Kurimo]]|
|[[Kurita, Yusuke|AUTHOR Yusuke Kurita]]|
|[[Kuswah, Tejendra Singh|AUTHOR Tejendra Singh Kuswah]]|
|[[Kwatra, Sanjeev|AUTHOR Sanjeev Kwatra]]|
|[[Kwiatkowski, Cezary|AUTHOR Cezary Kwiatkowski]]|
|[[Kwon, Oh-Wook|AUTHOR Oh-Wook Kwon]]|
|[[Kyriakopoulos, Konstantinos|AUTHOR Konstantinos Kyriakopoulos]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Ladkat, Pranav|AUTHOR Pranav Ladkat]]|
|[[Lahiri, Rimita|AUTHOR Rimita Lahiri]]|
|[[Lai, Catherine|AUTHOR Catherine Lai]]|
|[[Lai, Cheng-I|AUTHOR Cheng-I Lai]]|
|[[Lai, Ying-Hui|AUTHOR Ying-Hui Lai]]|
|[[Lajszczak, Mateusz|AUTHOR Mateusz Lajszczak]]|
|[[Lakhdhar, Khaled|AUTHOR Khaled Lakhdhar]]|
|[[Lakomkin, Egor|AUTHOR Egor Lakomkin]]|
|[[Lala, Divesh|AUTHOR Divesh Lala]]|
|[[Lalhminghlui, Wendy|AUTHOR Wendy Lalhminghlui]]|
|[[Lalonde, Kaylah|AUTHOR Kaylah Lalonde]]|
|[[Lam, Max W.Y.|AUTHOR Max W.Y. Lam]]|
|[[Lamb, Maurice|AUTHOR Maurice Lamb]]|
|[[Lambourne, Nicholas|AUTHOR Nicholas Lambourne]]|
|[[Lamel, Lori|AUTHOR Lori Lamel]]|
|[[Lammert, Adam C.|AUTHOR Adam C. Lammert]]|
|[[Lan, Guitang|AUTHOR Guitang Lan]]|
|[[Lan, Xiang|AUTHOR Xiang Lan]]|
|[[Lancelot, François|AUTHOR François Lancelot]]|
|[[Lanchantin, Pierre|AUTHOR Pierre Lanchantin]]|
|[[Łańcucki, Adrian|AUTHOR Adrian Łańcucki]]|
|[[Lane, Ian|AUTHOR Ian Lane]]|
|[[Lane, Nicholas D.|AUTHOR Nicholas D. Lane]]|
|[[Lang, Oran|AUTHOR Oran Lang]]|
|[[Lange, Patrick|AUTHOR Patrick Lange]]|
|[[Langer, Stefan|AUTHOR Stefan Langer]]|
|[[Laoide-Kemp, Caoimhín|AUTHOR Caoimhín Laoide-Kemp]]|
|[[Lapata, Mirella|AUTHOR Mirella Lapata]]|
|[[Lapidot, Itshak|AUTHOR Itshak Lapidot]]|
|[[Laprie, Yves|AUTHOR Yves Laprie]]|
|[[Larcher, Anthony|AUTHOR Anthony Larcher]]|
|[[Laskaridis, Stefanos|AUTHOR Stefanos Laskaridis]]|
|[[Latif, Siddique|AUTHOR Siddique Latif]]|
|[[Latorre, Javier|AUTHOR Javier Latorre]]|
|[[Laurent, Antoine|AUTHOR Antoine Laurent]]|
|[[Laurie, Ben|AUTHOR Ben Laurie]]|
|[[Lavrentyeva, Galina|AUTHOR Galina Lavrentyeva]]|
|[[Lavrukhin, Vitaly|AUTHOR Vitaly Lavrukhin]]|
|[[Lawson, Aaron|AUTHOR Aaron Lawson]]|
|[[Lazaridis, Alexandros|AUTHOR Alexandros Lazaridis]]|
|[[Le, Quoc V.|AUTHOR Quoc V. Le]]|
|[[Leary, Ryan|AUTHOR Ryan Leary]]|
|[[Lebensold, Jonathan|AUTHOR Jonathan Lebensold]]|
|[[Lecomte, Pauline|AUTHOR Pauline Lecomte]]|
|[[Lee, Ann|AUTHOR Ann Lee]]|
|[[Lee, Bong-Jin|AUTHOR Bong-Jin Lee]]|
|[[Lee, Cheng-Kuang|AUTHOR Cheng-Kuang Lee]]|
|[[Lee, Chi-Chun|AUTHOR Chi-Chun Lee]]|
|[[Lee, Ching-Hua|AUTHOR Ching-Hua Lee]]|
|[[Lee, Chin-Hui|AUTHOR Chin-Hui Lee]]|
|[[Lee, Chong Min|AUTHOR Chong Min Lee]]|
|[[Lee, Donghyun|AUTHOR Donghyun Lee]]|
|[[Lee, Geon Woo|AUTHOR Geon Woo Lee]]|
|[[Lee, Grandee|AUTHOR Grandee Lee]]|
|[[Lee, Hung-Shin|AUTHOR Hung-Shin Lee]]|
|[[Lee, Hung-Yi|AUTHOR Hung-Yi Lee]]|
|[[Lee, Hyeonseung|AUTHOR Hyeonseung Lee]]|
|[[Lee, Jinwoo|AUTHOR Jinwoo Lee]]|
|[[Lee, Juheon|AUTHOR Juheon Lee]]|
|[[Lee, Jung Hyuk|AUTHOR Jung Hyuk Lee]]|
|[[Lee, Jungwon|AUTHOR Jungwon Lee]]|
|[[Lee, Kong Aik|AUTHOR Kong Aik Lee]]|
|[[Lee, Kyogu|AUTHOR Kyogu Lee]]|
|[[Lee, Lin-shan|AUTHOR Lin-shan Lee]]|
|[[Lee, Mi Suk|AUTHOR Mi Suk Lee]]|
|[[Lee, Myungwoo|AUTHOR Myungwoo Lee]]|
|[[Lee, Robert|AUTHOR Robert Lee]]|
|[[Lee, Seungji|AUTHOR Seungji Lee]]|
|[[Lee, Soo-Young|AUTHOR Soo-Young Lee]]|
|[[Lee, Tan|AUTHOR Tan Lee]]|
|[[Lee, Yong-cheol|AUTHOR Yong-cheol Lee]]|
|[[Lee, Younggun|AUTHOR Younggun Lee]]|
|[[Lehéricy, Stéphane|AUTHOR Stéphane Lehéricy]]|
|[[Lei, Ming|AUTHOR Ming Lei]]|
|[[Lei, Yun|AUTHOR Yun Lei]]|
|[[Leimkötter, Sarah|AUTHOR Sarah Leimkötter]]|
|[[Lenc, Ladislav|AUTHOR Ladislav Lenc]]|
|[[Lerner, Anat|AUTHOR Anat Lerner]]|
|[[Le Roux, Jonathan|AUTHOR Jonathan Le Roux]]|
|[[Leutnant, Volker|AUTHOR Volker Leutnant]]|
|[[Lev-Ari, Shiri|AUTHOR Shiri Lev-Ari]]|
|[[Levitan, Rivka|AUTHOR Rivka Levitan]]|
|[[Levy, Golan|AUTHOR Golan Levy]]|
|[[Lewandowski, Natalie|AUTHOR Natalie Lewandowski]]|
|[[Lewis, Scott|AUTHOR Scott Lewis]]|
|[[Leykum, Hannah|AUTHOR Hannah Leykum]]|
|[[Li, Aijun|AUTHOR Aijun Li]]|
|[[Li, Bin|AUTHOR Bin Li]]|
|[[Li, Bo|AUTHOR Bo Li]]|
|[[Li, Changliang|AUTHOR Changliang Li]]|
|[[Li, Chenda|AUTHOR Chenda Li]]|
|[[Li, Guangzhi|AUTHOR Guangzhi Li]]|
|[[Li, Guanjun|AUTHOR Guanjun Li]]|
|[[Li, Haizhou|AUTHOR Haizhou Li]]|
|[[Li, Haoqi|AUTHOR Haoqi Li]]|
|[[Li, Jason|AUTHOR Jason Li]]|
|[[Li, Jeng-Lin|AUTHOR Jeng-Lin Li]]|
|[[Li, Jianze|AUTHOR Jianze Li]]|
|[[Li, Jingbei|AUTHOR Jingbei Li]]|
|[[Li, Jingyang|AUTHOR Jingyang Li]]|
|[[Li, Jinyu|AUTHOR Jinyu Li]]|
|[[Li, Lantian|AUTHOR Lantian Li]]|
|[[Li, Lin|AUTHOR Lin Li]]|
|[[Li, Meng|AUTHOR Meng Li]]|
|[[Li, Ming|AUTHOR Ming Li]]|
|[[Li, Mohan|AUTHOR Mohan Li]]|
|[[Li, Nan|AUTHOR Nan Li]]|
|[[Li, Pengcheng|AUTHOR Pengcheng Li]]|
|[[Li, Rongjin|AUTHOR Rongjin Li]]|
|[[Li, Ruizhi|AUTHOR Ruizhi Li]]|
|[[Li, Runnan|AUTHOR Runnan Li]]|
|[[Li, Sarah R.|AUTHOR Sarah R. Li]]|
|[[Li, Sheng|AUTHOR Sheng Li]]|
|[[Li, Ta|AUTHOR Ta Li]]|
|[[Li, Tony Y.|AUTHOR Tony Y. Li]]|
|[[Li, Wei|AUTHOR Wei Li]]|
|[[Li, Wenjie|AUTHOR Wenjie Li]]|
|[[Li, Wu-Jun|AUTHOR Wu-Jun Li]]|
|[[Li, Xiangang|AUTHOR Xiangang Li]]|
|[[Li, Xiaoqi|AUTHOR Xiaoqi Li]]|
|[[Li, Xingfeng|AUTHOR Xingfeng Li]]|
|[[Li, Xinjian|AUTHOR Xinjian Li]]|
|[[Li, Xinwei|AUTHOR Xinwei Li]]|
|[[Li, Xinyu|AUTHOR Xinyu Li]]|
|[[Li, Xiulin|AUTHOR Xiulin Li]]|
|[[Li, Xu|AUTHOR Xu Li]]|
|[[Li, Yaxing|AUTHOR Yaxing Li]]|
|[[Li, Yingjie|AUTHOR Yingjie Li]]|
|[[Li, Yuanchao|AUTHOR Yuanchao Li]]|
|[[Li, Zheng|AUTHOR Zheng Li]]|
|[[Li, Zhixuan|AUTHOR Zhixuan Li]]|
|[[Liakata, Maria|AUTHOR Maria Liakata]]|
|[[Lian, Chongyuan|AUTHOR Chongyuan Lian]]|
|[[Lian, Zheng|AUTHOR Zheng Lian]]|
|[[Liang, Jingjun|AUTHOR Jingjun Liang]]|
|[[Liang, Qiao|AUTHOR Qiao Liang]]|
|[[Liang, Shan|AUTHOR Shan Liang]]|
|[[Liang, Shuang|AUTHOR Shuang Liang]]|
|[[Liang, Yulong|AUTHOR Yulong Liang]]|
|[[Liao, Chien-Feng|AUTHOR Chien-Feng Liao]]|
|[[Liao, Hank|AUTHOR Hank Liao]]|
|[[Liberman, Mark|AUTHOR Mark Liberman]]|
|[[Lieow, Wei Xiang|AUTHOR Wei Xiang Lieow]]|
|[[Likhomanenko, Tatiana|AUTHOR Tatiana Likhomanenko]]|
|[[Lim, Felicia S.C.|AUTHOR Felicia S.C. Lim]]|
|[[Lim, Hyungjun|AUTHOR Hyungjun Lim]]|
|[[Lim, Minkyu|AUTHOR Minkyu Lim]]|
|[[Lin, Huibin|AUTHOR Huibin Lin]]|
|[[Lin, Ju|AUTHOR Ju Lin]]|
|[[Lin, Meng-Han|AUTHOR Meng-Han Lin]]|
|[[Lin, Qingjian|AUTHOR Qingjian Lin]]|
|[[Lin, Ruixi|AUTHOR Ruixi Lin]]|
|[[Lin, Yonghua|AUTHOR Yonghua Lin]]|
|[[Lin, Yu-Chen|AUTHOR Yu-Chen Lin]]|
|[[Lin, Yun-Shao|AUTHOR Yun-Shao Lin]]|
|[[Linarès, Georges|AUTHOR Georges Linarès]]|
|[[Ling, Zhen-Hua|AUTHOR Zhen-Hua Ling]]|
|[[Lingenfelser, Florian|AUTHOR Florian Lingenfelser]]|
|[[Liss, Julie M.|AUTHOR Julie M. Liss]]|
|[[Litman, Diane|AUTHOR Diane Litman]]|
|[[Liu, Andy T.|AUTHOR Andy T. Liu]]|
|[[Liu, Bin|AUTHOR Bin Liu]]|
|[[Liu, Chang|AUTHOR Chang Liu]]|
|[[Liu, Chang|AUTHOR Chang Liu]]|
|[[Liu, Chaoran|AUTHOR Chaoran Liu]]|
|[[Liu, Chunxi|AUTHOR Chunxi Liu]]|
|[[Liu, Da-Rong|AUTHOR Da-Rong Liu]]|
|[[Liu, Diyuan|AUTHOR Diyuan Liu]]|
|[[Liu, Gang|AUTHOR Gang Liu]]|
|[[Liu, Gang|AUTHOR Gang Liu]]|
|[[Liu, Hongzhi|AUTHOR Hongzhi Liu]]|
|[[Liu, Jia|AUTHOR Jia Liu]]|
|[[Liu, Lei|AUTHOR Lei Liu]]|
|[[Liu, Li|AUTHOR Li Liu]]|
|[[Liu, Liu|AUTHOR Liu Liu]]|
|[[Liu, Min|AUTHOR Min Liu]]|
|[[Liu, Rujie|AUTHOR Rujie Liu]]|
|[[Liu, Shan|AUTHOR Shan Liu]]|
|[[Liu, Shansong|AUTHOR Shansong Liu]]|
|[[Liu, Songxiang|AUTHOR Songxiang Liu]]|
|[[Liu, Tianchi|AUTHOR Tianchi Liu]]|
|[[Liu, Tie-Yan|AUTHOR Tie-Yan Liu]]|
|[[Liu, Wenju|AUTHOR Wenju Liu]]|
|[[Liu, Xunying|AUTHOR Xunying Liu]]|
|[[Liu, Yang|AUTHOR Yang Liu]]|
|[[Liu, Yi|AUTHOR Yi Liu]]|
|[[Liu, Yi-Ching|AUTHOR Yi-Ching Liu]]|
|[[Liu, Ying|AUTHOR Ying Liu]]|
|[[Liu, Yi-Wen|AUTHOR Yi-Wen Liu]]|
|[[Liu, Yuan|AUTHOR Yuan Liu]]|
|[[Liu, Yuchen|AUTHOR Yuchen Liu]]|
|[[Liu, Yun|AUTHOR Yun Liu]]|
|[[Liu, Zhicheng|AUTHOR Zhicheng Liu]]|
|[[Liu, Zoe|AUTHOR Zoe Liu]]|
|[[Livescu, Karen|AUTHOR Karen Livescu]]|
|[[Liwicki, Marcus|AUTHOR Marcus Liwicki]]|
|[[Lleida, Eduardo|AUTHOR Eduardo Lleida]]|
|[[Llombart, Jorge|AUTHOR Jorge Llombart]]|
|[[Lluís, Francesc|AUTHOR Francesc Lluís]]|
|[[Lo, Chen-Chou|AUTHOR Chen-Chou Lo]]|
|[[Lo, Roger Yu-Hsiang|AUTHOR Roger Yu-Hsiang Lo]]|
|[[Lolive, Damien|AUTHOR Damien Lolive]]|
|[[Lopes, Vanessa|AUTHOR Vanessa Lopes]]|
|[[López-Espejo, Iván|AUTHOR Iván López-Espejo]]|
|[[Lopez Moreno, Ignacio|AUTHOR Ignacio Lopez Moreno]]|
|[[Lorenzo-Trueba, Jaime|AUTHOR Jaime Lorenzo-Trueba]]|
|[[Lorré, Jean-Pierre|AUTHOR Jean-Pierre Lorré]]|
|[[Lotfian, Reza|AUTHOR Reza Lotfian]]|
|[[Loukina, Anastassia|AUTHOR Anastassia Loukina]]|
|[[Loweimi, Erfan|AUTHOR Erfan Loweimi]]|
|[[Lozo, Carina|AUTHOR Carina Lozo]]|
|[[Lu, Hui|AUTHOR Hui Lu]]|
|[[Lu, Jing|AUTHOR Jing Lu]]|
|[[Lu, Kangkang|AUTHOR Kangkang Lu]]|
|[[Lu, Liang|AUTHOR Liang Lu]]|
|[[Lu, Wenhuan|AUTHOR Wenhuan Lu]]|
|[[Lu, Xugang|AUTHOR Xugang Lu]]|
|[[Lu, Y.|AUTHOR Y. Lu]]|
|[[Lu, Yang|AUTHOR Yang Lu]]|
|[[Lu, Zheng-Chi|AUTHOR Zheng-Chi Lu]]|
|[[Luan, Jian|AUTHOR Jian Luan]]|
|[[Lubold, Nichola|AUTHOR Nichola Lubold]]|
|[[Ludusan, Bogdan|AUTHOR Bogdan Ludusan]]|
|[[Lugosch, Loren|AUTHOR Loren Lugosch]]|
|[[Łukaszewicz, Anna|AUTHOR Anna Łukaszewicz]]|
|[[Łukaszewicz, Beata|AUTHOR Beata Łukaszewicz]]|
|[[Lumban Tobing, Patrick|AUTHOR Patrick Lumban Tobing]]|
|[[Luna Jiménez, Cristina|AUTHOR Cristina Luna Jiménez]]|
|[[Luo, Hongyin|AUTHOR Hongyin Luo]]|
|[[Luo, Hui|AUTHOR Hui Luo]]|
|[[Luo, Shan|AUTHOR Shan Luo]]|
|[[Luong, Hieu-Thi|AUTHOR Hieu-Thi Luong]]|
|[[Lüscher, Christoph|AUTHOR Christoph Lüscher]]|
|[[Luttenberger, Jan|AUTHOR Jan Luttenberger]]|
|[[Luz, Saturnino|AUTHOR Saturnino Luz]]|
|[[Lv, Zhiqiang|AUTHOR Zhiqiang Lv]]|
|[[Lyons, Terry|AUTHOR Terry Lyons]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[M., Gurunath Reddy|AUTHOR Gurunath Reddy M.]]|
|[[M., Rejisha T.|AUTHOR Rejisha T. M.]]|
|[[Ma, Bin|AUTHOR Bin Ma]]|
|[[Ma, Feng|AUTHOR Feng Ma]]|
|[[M.A., Harsha Vardhan|AUTHOR Harsha Vardhan M.A.]]|
|[[Ma, Jun|AUTHOR Jun Ma]]|
|[[Ma, Pingchuan|AUTHOR Pingchuan Ma]]|
|[[Ma, Siyuan|AUTHOR Siyuan Ma]]|
|[[Ma, Tao|AUTHOR Tao Ma]]|
|[[Ma, Yide|AUTHOR Yide Ma]]|
|[[Ma, Yong|AUTHOR Yong Ma]]|
|[[Maas, Roland|AUTHOR Roland Maas]]|
|[[Mac, Khoi-Nguyen C.|AUTHOR Khoi-Nguyen C. Mac]]|
|[[Macherey, Wolfgang|AUTHOR Wolfgang Macherey]]|
|[[MacKnight, Mitchell|AUTHOR Mitchell MacKnight]]|
|[[Madhavi, Maulik|AUTHOR Maulik Madhavi]]|
|[[Madnani, Nitin|AUTHOR Nitin Madnani]]|
|[[Maekaku, Takashi|AUTHOR Takashi Maekaku]]|
|[[Maekawa, Kikuo|AUTHOR Kikuo Maekawa]]|
|[[Magalhães, João|AUTHOR João Magalhães]]|
|[[Magimai-Doss, Mathew|AUTHOR Mathew Magimai-Doss]]|
|[[Mahalingam, Neeraja|AUTHOR Neeraja Mahalingam]]|
|[[Mahata, Debanjan|AUTHOR Debanjan Mahata]]|
|[[Mahon, Eoin|AUTHOR Eoin Mahon]]|
|[[Maier, Andreas|AUTHOR Andreas Maier]]|
|[[Mak, Brian|AUTHOR Brian Mak]]|
|[[Mak, Man-Wai|AUTHOR Man-Wai Mak]]|
|[[Malhotra, Karan|AUTHOR Karan Malhotra]]|
|[[Malisz, Zofia|AUTHOR Zofia Malisz]]|
|[[Mallela, Jhansi|AUTHOR Jhansi Mallela]]|
|[[Mallidi, Sri Harish|AUTHOR Sri Harish Mallidi]]|
|[[Mallol-Ragolta, Adria|AUTHOR Adria Mallol-Ragolta]]|
|[[Mamun, Nursadul|AUTHOR Nursadul Mamun]]|
|[[Manakul, P.|AUTHOR P. Manakul]]|
|[[Mangone, Graziella|AUTHOR Graziella Mangone]]|
|[[Mani, Senthil|AUTHOR Senthil Mani]]|
|[[Manilow, Ethan|AUTHOR Ethan Manilow]]|
|[[Mannem, Renuka|AUTHOR Renuka Mannem]]|
|[[Manocha, Dinesh|AUTHOR Dinesh Manocha]]|
|[[Manohar, Vimal|AUTHOR Vimal Manohar]]|
|[[Manor, Yael|AUTHOR Yael Manor]]|
|[[Mansfield, Courtney|AUTHOR Courtney Mansfield]]|
|[[Mantena, Gautam|AUTHOR Gautam Mantena]]|
|[[Mao, Shuiyang|AUTHOR Shuiyang Mao]]|
|[[Marcel, Sébastien|AUTHOR Sébastien Marcel]]|
|[[Marchi, Erik|AUTHOR Erik Marchi]]|
|[[Margam, Dilip Kumar|AUTHOR Dilip Kumar Margam]]|
|[[Marinelli, Federico|AUTHOR Federico Marinelli]]|
|[[Marklund, Ellen|AUTHOR Ellen Marklund]]|
|[[Markó, Alexandra|AUTHOR Alexandra Markó]]|
|[[Marks, Tim K.|AUTHOR Tim K. Marks]]|
|[[Marras, Mirko|AUTHOR Mirko Marras]]|
|[[Martin, Rainer|AUTHOR Rainer Martin]]|
|[[Martín-Doñas, Juan M.|AUTHOR Juan M. Martín-Doñas]]|
|[[Martínek, Jiří|AUTHOR Jiří Martínek]]|
|[[Martinez, Victor R.|AUTHOR Victor R. Martinez]]|
|[[Martínez Ramírez, Marco A.|AUTHOR Marco A. Martínez Ramírez]]|
|[[Martins, Paula|AUTHOR Paula Martins]]|
|[[Marzinotto, Gabriel|AUTHOR Gabriel Marzinotto]]|
|[[Mason, Celeste|AUTHOR Celeste Mason]]|
|[[Mason, Lisa|AUTHOR Lisa Mason]]|
|[[Mast, T. Douglas|AUTHOR T. Douglas Mast]]|
|[[Masterson, Jack A.|AUTHOR Jack A. Masterson]]|
|[[Masumura, Ryo|AUTHOR Ryo Masumura]]|
|[[Masuyama, Yoshiki|AUTHOR Yoshiki Masuyama]]|
|[[Matějka, Pavel|AUTHOR Pavel Matějka]]|
|[[Mateju, Lukas|AUTHOR Lukas Mateju]]|
|[[Matias, Yossi|AUTHOR Yossi Matias]]|
|[[Matoušek, Jindřich|AUTHOR Jindřich Matoušek]]|
|[[Matsoukas, Spyros|AUTHOR Spyros Matsoukas]]|
|[[Matsui, Kiyoaki|AUTHOR Kiyoaki Matsui]]|
|[[Matsuyama, Yoichi|AUTHOR Yoichi Matsuyama]]|
|[[Matton, Katie|AUTHOR Katie Matton]]|
|[[Matuszewski, Mateusz|AUTHOR Mateusz Matuszewski]]|
|[[Maurer, Dieter|AUTHOR Dieter Maurer]]|
|[[Mayle, Alex|AUTHOR Alex Mayle]]|
|[[Mazuka, Reiko|AUTHOR Reiko Mazuka]]|
|[[Mazzawi, Hanna|AUTHOR Hanna Mazzawi]]|
|[[McAllaster, Don|AUTHOR Don McAllaster]]|
|[[McAuley, Julian|AUTHOR Julian McAuley]]|
|[[McCree, Alan|AUTHOR Alan McCree]]|
|[[McGraw, Ian|AUTHOR Ian McGraw]]|
|[[McInnis, Melvin G.|AUTHOR Melvin G. McInnis]]|
|[[McKelvey, Tamsin M.|AUTHOR Tamsin M. McKelvey]]|
|[[McLaren, Mitchell|AUTHOR Mitchell McLaren]]|
|[[McLoughlin, Ian|AUTHOR Ian McLoughlin]]|
|[[McNally, Maeve|AUTHOR Maeve McNally]]|
|[[McQuinn, Emmett|AUTHOR Emmett McQuinn]]|
|[[Mdhaffar, Salima|AUTHOR Salima Mdhaffar]]|
|[[Mead, Rebecca|AUTHOR Rebecca Mead]]|
|[[Medennikov, Ivan|AUTHOR Ivan Medennikov]]|
|[[Meer, Philipp|AUTHOR Philipp Meer]]|
|[[Mefferd, Antje S.|AUTHOR Antje S. Mefferd]]|
|[[Mehrabi, Adib|AUTHOR Adib Mehrabi]]|
|[[Mehta, Anuj|AUTHOR Anuj Mehta]]|
|[[Mehta, Daryush D.|AUTHOR Daryush D. Mehta]]|
|[[Meier, Moritz|AUTHOR Moritz Meier]]|
|[[Memon, Nasir|AUTHOR Nasir Memon]]|
|[[Ménard, Pierre-André|AUTHOR Pierre-André Ménard]]|
|[[Mendes, Carlos|AUTHOR Carlos Mendes]]|
|[[Meng, Helen|AUTHOR Helen Meng]]|
|[[Meng, Zhong|AUTHOR Zhong Meng]]|
|[[Menne, Tobias|AUTHOR Tobias Menne]]|
|[[Menon, Raghav|AUTHOR Raghav Menon]]|
|[[Merboldt, André|AUTHOR André Merboldt]]|
|[[Merkx, Danny|AUTHOR Danny Merkx]]|
|[[Merritt, Thomas|AUTHOR Thomas Merritt]]|
|[[Mertins, Alfred|AUTHOR Alfred Mertins]]|
|[[Messner, Eva-Maria|AUTHOR Eva-Maria Messner]]|
|[[Metcalf, Katherine|AUTHOR Katherine Metcalf]]|
|[[Metze, Florian|AUTHOR Florian Metze]]|
|[[Meyer, Bernd T.|AUTHOR Bernd T. Meyer]]|
|[[Meyer, Julien|AUTHOR Julien Meyer]]|
|[[Miao, Haoran|AUTHOR Haoran Miao]]|
|[[Miao, Xiaoxiao|AUTHOR Xiaoxiao Miao]]|
|[[Michael, Thilo|AUTHOR Thilo Michael]]|
|[[Michalsky, Jan|AUTHOR Jan Michalsky]]|
|[[Michel, Wilfried|AUTHOR Wilfried Michel]]|
|[[Mielke, Jeff|AUTHOR Jeff Mielke]]|
|[[Miguel, Antonio|AUTHOR Antonio Miguel]]|
|[[Milanovic, Melissa|AUTHOR Melissa Milanovic]]|
|[[Milde, Benjamin|AUTHOR Benjamin Milde]]|
|[[Minematsu, Nobuaki|AUTHOR Nobuaki Minematsu]]|
|[[Ming, Ji|AUTHOR Ji Ming]]|
|[[Mingote, Victoria|AUTHOR Victoria Mingote]]|
|[[Mirheidari, Bahman|AUTHOR Bahman Mirheidari]]|
|[[Mirshekarian, Sadegh|AUTHOR Sadegh Mirshekarian]]|
|[[Misaki, Masayuki|AUTHOR Masayuki Misaki]]|
|[[Miskic, Lucie|AUTHOR Lucie Miskic]]|
|[[Mislan, Aqilah|AUTHOR Aqilah Mislan]]|
|[[Misra, Abhinav|AUTHOR Abhinav Misra]]|
|[[Mitra, Vikramjit|AUTHOR Vikramjit Mitra]]|
|[[Mitrofanov, Anton|AUTHOR Anton Mitrofanov]]|
|[[Mitsufuji, Yuki|AUTHOR Yuki Mitsufuji]]|
|[[Mittag, Gabriel|AUTHOR Gabriel Mittag]]|
|[[Miwardelli, A.|AUTHOR A. Miwardelli]]|
|[[Miyamoto, Haruna|AUTHOR Haruna Miyamoto]]|
|[[Miyazaki, Noboru|AUTHOR Noboru Miyazaki]]|
|[[Mizera, Petr|AUTHOR Petr Mizera]]|
|[[Mizgajski, Jan|AUTHOR Jan Mizgajski]]|
|[[Möbius, Bernd|AUTHOR Bernd Möbius]]|
|[[Mochary, Ran|AUTHOR Ran Mochary]]|
|[[Mohammadi, Seyed Hamidreza|AUTHOR Seyed Hamidreza Mohammadi]]|
|[[Mohan, Anand|AUTHOR Anand Mohan]]|
|[[Mohapatra, Debasish Ray|AUTHOR Debasish Ray Mohapatra]]|
|[[Mohtarami, Mitra|AUTHOR Mitra Mohtarami]]|
|[[Moinet, Alexis|AUTHOR Alexis Moinet]]|
|[[Mołczanow, Janina|AUTHOR Janina Mołczanow]]|
|[[Möller, Sebastian|AUTHOR Sebastian Möller]]|
|[[Moniz, Helena|AUTHOR Helena Moniz]]|
|[[Montacié, Claude|AUTHOR Claude Montacié]]|
|[[Monteiro, João|AUTHOR João Monteiro]]|
|[[Montero, Juan Manuel|AUTHOR Juan Manuel Montero]]|
|[[Moore, Johanna D.|AUTHOR Johanna D. Moore]]|
|[[Moore, Meredith|AUTHOR Meredith Moore]]|
|[[Moore, Roger K.|AUTHOR Roger K. Moore]]|
|[[Mooshammer, Christine|AUTHOR Christine Mooshammer]]|
|[[Morchid, Mohamed|AUTHOR Mohamed Morchid]]|
|[[Morency, Louis-Philippe|AUTHOR Louis-Philippe Morency]]|
|[[Moreno, Pedro J.|AUTHOR Pedro J. Moreno]]|
|[[Morfi, Veronica|AUTHOR Veronica Morfi]]|
|[[Mori, Hiroki|AUTHOR Hiroki Mori]]|
|[[Morin, Emmanuel|AUTHOR Emmanuel Morin]]|
|[[Morita, Tomoki|AUTHOR Tomoki Morita]]|
|[[Moritz, Niko|AUTHOR Niko Moritz]]|
|[[Moriya, Takafumi|AUTHOR Takafumi Moriya]]|
|[[Moro-Velazquez, Laureano|AUTHOR Laureano Moro-Velazquez]]|
|[[Mortensen, Jonas Fromseier|AUTHOR Jonas Fromseier Mortensen]]|
|[[Morzy, Mikołaj|AUTHOR Mikołaj Morzy]]|
|[[Mošner, Ladislav|AUTHOR Ladislav Mošner]]|
|[[Motlicek, Petr|AUTHOR Petr Motlicek]]|
|[[Mou, Zhiwei|AUTHOR Zhiwei Mou]]|
|[[Mower Provost, Emily|AUTHOR Emily Mower Provost]]|
|[[Mowlaee, Pejman|AUTHOR Pejman Mowlaee]]|
|[[Mrech, Tarik|AUTHOR Tarik Mrech]]|
|[[Mruthyunjaya, Vishwas|AUTHOR Vishwas Mruthyunjaya]]|
|[[Mücke, Doris|AUTHOR Doris Mücke]]|
|[[Muckenhirn, Hannah|AUTHOR Hannah Muckenhirn]]|
|[[Mueller, Robert|AUTHOR Robert Mueller]]|
|[[Mulc, Thomas|AUTHOR Thomas Mulc]]|
|[[Mulder, K.|AUTHOR K. Mulder]]|
|[[Mulimani, Manjunath|AUTHOR Manjunath Mulimani]]|
|[[Müller, Markus|AUTHOR Markus Müller]]|
|[[Mulville, Marie|AUTHOR Marie Mulville]]|
|[[Murase, Yukitoshi|AUTHOR Yukitoshi Murase]]|
|[[Murphy, Andy|AUTHOR Andy Murphy]]|
|[[Murray, Gabriel|AUTHOR Gabriel Murray]]|
|[[Murthy, Hema A.|AUTHOR Hema A. Murthy]]|
|[[Murty, K. Sri Rama|AUTHOR K. Sri Rama Murty]]|
|[[Mustafa, Ahmed|AUTHOR Ahmed Mustafa]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Nachmani, Eliya|AUTHOR Eliya Nachmani]]|
|[[Nagamatsu, Kenji|AUTHOR Kenji Nagamatsu]]|
|[[Nagarsheth, Parav|AUTHOR Parav Nagarsheth]]|
|[[Nagata, Tomohiro|AUTHOR Tomohiro Nagata]]|
|[[Naik, Devang|AUTHOR Devang Naik]]|
|[[Naini, Abinay Reddy|AUTHOR Abinay Reddy Naini]]|
|[[Nakamura, Satoshi|AUTHOR Satoshi Nakamura]]|
|[[Nakamura, Shizuka|AUTHOR Shizuka Nakamura]]|
|[[Nakatani, Tomohiro|AUTHOR Tomohiro Nakatani]]|
|[[Nakov, Preslav|AUTHOR Preslav Nakov]]|
|[[Nallan Chakravarthula, Sandeep|AUTHOR Sandeep Nallan Chakravarthula]]|
|[[Nallanthighal, Venkata Srikanth|AUTHOR Venkata Srikanth Nallanthighal]]|
|[[Nanchen, Alexandre|AUTHOR Alexandre Nanchen]]|
|[[Nandwana, Mahesh Kumar|AUTHOR Mahesh Kumar Nandwana]]|
|[[Naor, Stav|AUTHOR Stav Naor]]|
|[[Narayanan, Shrikanth|AUTHOR Shrikanth Narayanan]]|
|[[Narisetty, Chaitanya|AUTHOR Chaitanya Narisetty]]|
|[[Nasir, Md.|AUTHOR Md. Nasir]]|
|[[Natarajan, Premkumar|AUTHOR Premkumar Natarajan]]|
|[[Nathwani, Karan|AUTHOR Karan Nathwani]]|
|[[Nautsch, Andreas|AUTHOR Andreas Nautsch]]|
|[[Navas, Eva|AUTHOR Eva Navas]]|
|[[Nayak, Shekhar|AUTHOR Shekhar Nayak]]|
|[[Nazareth, Deniece S.|AUTHOR Deniece S. Nazareth]]|
|[[Neekhara, Paarth|AUTHOR Paarth Neekhara]]|
|[[Negri, Matteo|AUTHOR Matteo Negri]]|
|[[Nellore, Bhanu Teja|AUTHOR Bhanu Teja Nellore]]|
|[[Nelson, Hanna|AUTHOR Hanna Nelson]]|
|[[Nelus, Alexandru|AUTHOR Alexandru Nelus]]|
|[[Németh, Géza|AUTHOR Géza Németh]]|
|[[Neto, João Paulo|AUTHOR João Paulo Neto]]|
|[[Neubig, Graham|AUTHOR Graham Neubig]]|
|[[Neumann, Michael|AUTHOR Michael Neumann]]|
|[[Nevado-Holgado, Alejo J.|AUTHOR Alejo J. Nevado-Holgado]]|
|[[Ney, Hermann|AUTHOR Hermann Ney]]|
|[[Ng, Charmaine|AUTHOR Charmaine Ng]]|
|[[Ng, Chip-Jin|AUTHOR Chip-Jin Ng]]|
|[[Ng, Manwa L.|AUTHOR Manwa L. Ng]]|
|[[Nguyen, Huyen|AUTHOR Huyen Nguyen]]|
|[[Nguyen, Patrick|AUTHOR Patrick Nguyen]]|
|[[Nguyen, Thai-Son|AUTHOR Thai-Son Nguyen]]|
|[[Nguyen, Truc|AUTHOR Truc Nguyen]]|
|[[Nguyen, Trung Hieu|AUTHOR Trung Hieu Nguyen]]|
|[[Ni, Chongjia|AUTHOR Chongjia Ni]]|
|[[Ni, Hao|AUTHOR Hao Ni]]|
|[[Ni, Jinfu|AUTHOR Jinfu Ni]]|
|[[Ní Chasaide, Ailbhe|AUTHOR Ailbhe Ní Chasaide]]|
|[[Nidadavolu, Phani Sankar|AUTHOR Phani Sankar Nidadavolu]]|
|[[Nie, Shuai|AUTHOR Shuai Nie]]|
|[[Niebuhr, Oliver|AUTHOR Oliver Niebuhr]]|
|[[Niehues, Jan|AUTHOR Jan Niehues]]|
|[[Niesler, Thomas|AUTHOR Thomas Niesler]]|
|[[Nijveld, Annika|AUTHOR Annika Nijveld]]|
|[[Nikandrou, Malvina|AUTHOR Malvina Nikandrou]]|
|[[Nikulásdóttir, Anna Björk|AUTHOR Anna Björk Nikulásdóttir]]|
|[[Nishida, Masafumi|AUTHOR Masafumi Nishida]]|
|[[Nishimura, Masafumi|AUTHOR Masafumi Nishimura]]|
|[[Nishimura, Ryota|AUTHOR Ryota Nishimura]]|
|[[Nishizaki, Hiromitsu|AUTHOR Hiromitsu Nishizaki]]|
|[[Nishizawa, Nobuyuki|AUTHOR Nobuyuki Nishizawa]]|
|[[Nissen, Rebecca|AUTHOR Rebecca Nissen]]|
|[[Nissen, Shawn|AUTHOR Shawn Nissen]]|
|[[Niu, Mingyue|AUTHOR Mingyue Niu]]|
|[[Niu, Sufeng|AUTHOR Sufeng Niu]]|
|[[Nolan, Francis|AUTHOR Francis Nolan]]|
|[[Nolasco, In^es|AUTHOR In^es Nolasco]]|
|[[Noll, Anton|AUTHOR Anton Noll]]|
|[[Nollstadt, Melissa|AUTHOR Melissa Nollstadt]]|
|[[Norel, Raquel|AUTHOR Raquel Norel]]|
|[[Nortje, André|AUTHOR André Nortje]]|
|[[Nortje, Leanne|AUTHOR Leanne Nortje]]|
|[[Nota, Yukiko|AUTHOR Yukiko Nota]]|
|[[Nöth, Elmar|AUTHOR Elmar Nöth]]|
|[[Noufi, Camille|AUTHOR Camille Noufi]]|
|[[Novak III, John S.|AUTHOR John S. Novak III]]|
|[[Novitasari, Sashi|AUTHOR Sashi Novitasari]]|
|[[Novoselov, Sergey|AUTHOR Sergey Novoselov]]|
|[[Novotný, Ondřej|AUTHOR Ondřej Novotný]]|
|[[Nozaki, Kazunori|AUTHOR Kazunori Nozaki]]|
|[[Nyberg, Eric|AUTHOR Eric Nyberg]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Oates, Christopher|AUTHOR Christopher Oates]]|
|[[Oba, Takanobu|AUTHOR Takanobu Oba]]|
|[[Ochiai, Tsubasa|AUTHOR Tsubasa Ochiai]]|
|[[Odille, Freddy|AUTHOR Freddy Odille]]|
|[[Ogata, Jun|AUTHOR Jun Ogata]]|
|[[Ogawa, Atsunori|AUTHOR Atsunori Ogawa]]|
|[[Ogawa, Tetsuji|AUTHOR Tetsuji Ogawa]]|
|[[Oh, Insoo|AUTHOR Insoo Oh]]|
|[[Oh, Junseok|AUTHOR Junseok Oh]]|
|[[Okabe, Koji|AUTHOR Koji Okabe]]|
|[[Okamoto, Takuma|AUTHOR Takuma Okamoto]]|
|[[Okawa, Masaki|AUTHOR Masaki Okawa]]|
|[[Öktem, Alp|AUTHOR Alp Öktem]]|
|[[Olaleye, Olaitan|AUTHOR Olaitan Olaleye]]|
|[[Olfati, Negar|AUTHOR Negar Olfati]]|
|[[Oliveira, Catarina|AUTHOR Catarina Oliveira]]|
|[[Ondel, Lucas|AUTHOR Lucas Ondel]]|
|[[Oneață, Dan|AUTHOR Dan Oneață]]|
|[[O’Neill, Emma|AUTHOR Emma O’Neill]]|
|[[Onnela, J.P.|AUTHOR J.P. Onnela]]|
|[[Onu, Charles C.|AUTHOR Charles C. Onu]]|
|[[Ooster, Jasper|AUTHOR Jasper Ooster]]|
|[[Oparin, Ilya|AUTHOR Ilya Oparin]]|
|[[Opher, Irit|AUTHOR Irit Opher]]|
|[[Opitz, Andreas|AUTHOR Andreas Opitz]]|
|[[Oplustil Gallegos, Pilar|AUTHOR Pilar Oplustil Gallegos]]|
|[[Ó Raghallaigh, Brian|AUTHOR Brian Ó Raghallaigh]]|
|[[Orozco-Arroyave, Juan Rafael|AUTHOR Juan Rafael Orozco-Arroyave]]|
|[[Ortega, Alfonso|AUTHOR Alfonso Ortega]]|
|[[O’Shaughnessy, Douglas|AUTHOR Douglas O’Shaughnessy]]|
|[[Ostendorf, Mari|AUTHOR Mari Ostendorf]]|
|[[Oualil, Youssef|AUTHOR Youssef Oualil]]|
|[[Ouni, Slim|AUTHOR Slim Ouni]]|
|[[Ouyang, Anda|AUTHOR Anda Ouyang]]|
|[[Ouyang, I.C.|AUTHOR I.C. Ouyang]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[P., Prathosh A.|AUTHOR Prathosh A. P.]]|
|[[Padi, Bharat|AUTHOR Bharat Padi]]|
|[[Paganoni, Sabrina|AUTHOR Sabrina Paganoni]]|
|[[Pagel, Lena|AUTHOR Lena Pagel]]|
|[[Pagel, Vincent|AUTHOR Vincent Pagel]]|
|[[Paine, Thomas|AUTHOR Thomas Paine]]|
|[[Pal, Monisankha|AUTHOR Monisankha Pal]]|
|[[Palaniappan, Ramaswamy|AUTHOR Ramaswamy Palaniappan]]|
|[[Palaz, Dimitri|AUTHOR Dimitri Palaz]]|
|[[Pan, Huashan|AUTHOR Huashan Pan]]|
|[[Pan, Jingshen|AUTHOR Jingshen Pan]]|
|[[Pan, Shifeng|AUTHOR Shifeng Pan]]|
|[[Pan, Xin|AUTHOR Xin Pan]]|
|[[Pan, Yilin|AUTHOR Yilin Pan]]|
|[[Pan, Zihan|AUTHOR Zihan Pan]]|
|[[Panchanathan, Sethuraman|AUTHOR Sethuraman Panchanathan]]|
|[[Panda, Ashish|AUTHOR Ashish Panda]]|
|[[Pandey, Prakhar|AUTHOR Prakhar Pandey]]|
|[[Pandharipande, Meghna|AUTHOR Meghna Pandharipande]]|
|[[Pandia D. S., Karthik|AUTHOR Karthik Pandia D. S.]]|
|[[Pang, Ruoming|AUTHOR Ruoming Pang]]|
|[[Pantazis, Yannis|AUTHOR Yannis Pantazis]]|
|[[Pantazopoulos, Georgios|AUTHOR Georgios Pantazopoulos]]|
|[[Pantic, Maja|AUTHOR Maja Pantic]]|
|[[Papadimitriou, Katerina|AUTHOR Katerina Papadimitriou]]|
|[[Papaioannou, Charilaos|AUTHOR Charilaos Papaioannou]]|
|[[Paradis, Matthew|AUTHOR Matthew Paradis]]|
|[[Paraskevopoulos, Georgios|AUTHOR Georgios Paraskevopoulos]]|
|[[Parcollet, Titouan|AUTHOR Titouan Parcollet]]|
|[[Parhammer, Sandra I.|AUTHOR Sandra I. Parhammer]]|
|[[Pariente, Manuel|AUTHOR Manuel Pariente]]|
|[[Parish-Morris, Julia|AUTHOR Julia Parish-Morris]]|
|[[Park, Daniel S.|AUTHOR Daniel S. Park]]|
|[[Park, Hosung|AUTHOR Hosung Park]]|
|[[Park, Hyun Jin|AUTHOR Hyun Jin Park]]|
|[[Park, Junho|AUTHOR Junho Park]]|
|[[Park, Kyubyong|AUTHOR Kyubyong Park]]|
|[[Park, Sangwook|AUTHOR Sangwook Park]]|
|[[Park, Soo Jin|AUTHOR Soo Jin Park]]|
|[[Park, Tae Jin|AUTHOR Tae Jin Park]]|
|[[Parmonangan, Ivan Halim|AUTHOR Ivan Halim Parmonangan]]|
|[[Parra-Gallego, L.F.|AUTHOR L.F. Parra-Gallego]]|
|[[Parry, Jack|AUTHOR Jack Parry]]|
|[[Parthasaarathy, Sudarsanam|AUTHOR Sudarsanam Parthasaarathy]]|
|[[Parthasarathi, Sree Hari Krishnan|AUTHOR Sree Hari Krishnan Parthasarathi]]|
|[[Pasad, Ankita|AUTHOR Ankita Pasad]]|
|[[Pascual, Santiago|AUTHOR Santiago Pascual]]|
|[[Pastätter, Manfred|AUTHOR Manfred Pastätter]]|
|[[Patel, Deep|AUTHOR Deep Patel]]|
|[[Patil, Ankur T.|AUTHOR Ankur T. Patil]]|
|[[Patil, Hemant A.|AUTHOR Hemant A. Patil]]|
|[[Patino, Jose|AUTHOR Jose Patino]]|
|[[Paul, Dipjyoti|AUTHOR Dipjyoti Paul]]|
|[[Paul, Shachi|AUTHOR Shachi Paul]]|
|[[Paulus, Maximillian|AUTHOR Maximillian Paulus]]|
|[[Peic Tukuljac, Helena|AUTHOR Helena Peic Tukuljac]]|
|[[Peinado, Antonio M.|AUTHOR Antonio M. Peinado]]|
|[[Peitz, Ute Dorothea|AUTHOR Ute Dorothea Peitz]]|
|[[Pekhovsky, Timur|AUTHOR Timur Pekhovsky]]|
|[[Pellegrini, Thomas|AUTHOR Thomas Pellegrini]]|
|[[Pena, Rodrigo C.G.|AUTHOR Rodrigo C.G. Pena]]|
|[[Peng, Shouye|AUTHOR Shouye Peng]]|
|[[Peng, Yiping|AUTHOR Yiping Peng]]|
|[[Peng, Zhiyuan|AUTHOR Zhiyuan Peng]]|
|[[Penn, Gerald|AUTHOR Gerald Penn]]|
|[[Peperkamp, Sharon|AUTHOR Sharon Peperkamp]]|
|[[Percival, Maida|AUTHOR Maida Percival]]|
|[[Perez-Toro, P.A.|AUTHOR P.A. Perez-Toro]]|
|[[Pérez Zarazaga, Pablo|AUTHOR Pablo Pérez Zarazaga]]|
|[[Peri, Raghuveer|AUTHOR Raghuveer Peri]]|
|[[Pernkopf, Franz|AUTHOR Franz Pernkopf]]|
|[[Perrotin, Olivier|AUTHOR Olivier Perrotin]]|
|[[Peskov, Denis|AUTHOR Denis Peskov]]|
|[[Petridis, Stavros|AUTHOR Stavros Petridis]]|
|[[Petrov, Oleg|AUTHOR Oleg Petrov]]|
|[[Petrovska-Delacrétaz, Dijana|AUTHOR Dijana Petrovska-Delacrétaz]]|
|[[Peyser, Cal|AUTHOR Cal Peyser]]|
|[[Pham, Lam|AUTHOR Lam Pham]]|
|[[Pham, Ngoc-Quan|AUTHOR Ngoc-Quan Pham]]|
|[[Pham, Van Tung|AUTHOR Van Tung Pham]]|
|[[Phan, Huy|AUTHOR Huy Phan]]|
|[[Picheny, Michael|AUTHOR Michael Picheny]]|
|[[Pienaar, Wikus|AUTHOR Wikus Pienaar]]|
|[[Pierucci, Piero|AUTHOR Piero Pierucci]]|
|[[Pietrowicz, Mary|AUTHOR Mary Pietrowicz]]|
|[[Pietrzak, Piotr|AUTHOR Piotr Pietrzak]]|
|[[Pirhosseinloo, Shadi|AUTHOR Shadi Pirhosseinloo]]|
|[[Piunova, Anna|AUTHOR Anna Piunova]]|
|[[Plata, Marcin|AUTHOR Marcin Plata]]|
|[[Plchot, Oldřich|AUTHOR Oldřich Plchot]]|
|[[Poellabauer, Christian|AUTHOR Christian Poellabauer]]|
|[[Pokorny, Florian B.|AUTHOR Florian B. Pokorny]]|
|[[Pons, Jordi|AUTHOR Jordi Pons]]|
|[[Pool, Jamie|AUTHOR Jamie Pool]]|
|[[Porysek Moreta, Pia Nancy|AUTHOR Pia Nancy Porysek Moreta]]|
|[[Potamianos, Alexandros|AUTHOR Alexandros Potamianos]]|
|[[Potamianos, Gerasimos|AUTHOR Gerasimos Potamianos]]|
|[[Potamitis, Ilyas|AUTHOR Ilyas Potamitis]]|
|[[Pouplier, Marianne|AUTHOR Marianne Pouplier]]|
|[[Povey, Daniel|AUTHOR Daniel Povey]]|
|[[Prabhavalkar, Rohit|AUTHOR Rohit Prabhavalkar]]|
|[[Prabhu, Utsav|AUTHOR Utsav Prabhu]]|
|[[Prachi, S.|AUTHOR S. Prachi]]|
|[[Prasad, Manasa|AUTHOR Manasa Prasad]]|
|[[Prasad, Rahul|AUTHOR Rahul Prasad]]|
|[[Prasad, Rashmi|AUTHOR Rashmi Prasad]]|
|[[Prasanna, S.R. Mahadeva|AUTHOR S.R. Mahadeva Prasanna]]|
|[[Precup, Doina|AUTHOR Doina Precup]]|
|[[Pretorius, Arnu|AUTHOR Arnu Pretorius]]|
|[[Prieto, Ramon|AUTHOR Ramon Prieto]]|
|[[Prisyach, Tatiana|AUTHOR Tatiana Prisyach]]|
|[[Proctor, Michael|AUTHOR Michael Proctor]]|
|[[Pu, Shiliang|AUTHOR Shiliang Pu]]|
|[[Pucher, Michael|AUTHOR Michael Pucher]]|
|[[Puckette, Miller|AUTHOR Miller Puckette]]|
|[[Pulman, Stephen|AUTHOR Stephen Pulman]]|
|[[Pundak, Golan|AUTHOR Golan Pundak]]|
|[[Purver, Matthew|AUTHOR Matthew Purver]]|
|[[Pusateri, Ernest|AUTHOR Ernest Pusateri]]|
|[[Putrycz, Bartosz|AUTHOR Bartosz Putrycz]]|
|[[Putze, Felix|AUTHOR Felix Putze]]|
|[[P.V., Muhammed Shifas|AUTHOR Muhammed Shifas P.V.]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Qi, Xiaoke|AUTHOR Xiaoke Qi]]|
|[[Qian, Qian|AUTHOR Qian Qian]]|
|[[Qian, Yanmin|AUTHOR Yanmin Qian]]|
|[[Qian, Yao|AUTHOR Yao Qian]]|
|[[Qin, Tao|AUTHOR Tao Qin]]|
|[[Qin, Xiaoyi|AUTHOR Xiaoyi Qin]]|
|[[Qin, Ying|AUTHOR Ying Qin]]|
|[[Qin, Yong|AUTHOR Yong Qin]]|
|[[Qu, Leyuan|AUTHOR Leyuan Qu]]|
|[[Quatieri, Thomas F.|AUTHOR Thomas F. Quatieri]]|
|[[Quiniou, Solen|AUTHOR Solen Quiniou]]|
|[[Quinn, John|AUTHOR John Quinn]]|
|[[Quintas, Sebastião|AUTHOR Sebastião Quintas]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Rabiee, Azam|AUTHOR Azam Rabiee]]|
|[[Rabinovitz, Carmel|AUTHOR Carmel Rabinovitz]]|
|[[Radostev, Vasiliy|AUTHOR Vasiliy Radostev]]|
|[[Rahman, Md. Mahbubur|AUTHOR Md. Mahbubur Rahman]]|
|[[Raj, Dabre|AUTHOR Dabre Raj]]|
|[[Rajan, Rajeev|AUTHOR Rajeev Rajan]]|
|[[Raju, Anirudh|AUTHOR Anirudh Raju]]|
|[[Rajwadi, Marvin|AUTHOR Marvin Rajwadi]]|
|[[Rakhi, Alina|AUTHOR Alina Rakhi]]|
|[[Rakowski, Alexander|AUTHOR Alexander Rakowski]]|
|[[Rallabandi, SaiKrishna|AUTHOR SaiKrishna Rallabandi]]|
|[[Ramabhadran, Bhuvana|AUTHOR Bhuvana Ramabhadran]]|
|[[Raman, Sneha|AUTHOR Sneha Raman]]|
|[[Ramanathi, Manoj Kumar|AUTHOR Manoj Kumar Ramanathi]]|
|[[Ramsay, David B.|AUTHOR David B. Ramsay]]|
|[[Ramteke, Pravin Bhaskar|AUTHOR Pravin Bhaskar Ramteke]]|
|[[Rana, Rajib|AUTHOR Rajib Rana]]|
|[[Rao, Bhaskar D.|AUTHOR Bhaskar D. Rao]]|
|[[Rao, Kanishka|AUTHOR Kanishka Rao]]|
|[[Rao, K. Sreenivasa|AUTHOR K. Sreenivasa Rao]]|
|[[Rao, Nithin|AUTHOR Nithin Rao]]|
|[[Rao, Wei|AUTHOR Wei Rao]]|
|[[Rao M.V., Achuth|AUTHOR Achuth Rao M.V.]]|
|[[Räsänen, Okko|AUTHOR Okko Räsänen]]|
|[[Rasskazova, Oksana|AUTHOR Oksana Rasskazova]]|
|[[Rastrow, Ariya|AUTHOR Ariya Rastrow]]|
|[[Rath, Shakti P.|AUTHOR Shakti P. Rath]]|
|[[Ratko, Louise|AUTHOR Louise Ratko]]|
|[[Rauf, Sahar|AUTHOR Sahar Rauf]]|
|[[Ravanelli, Mirco|AUTHOR Mirco Ravanelli]]|
|[[Raveh, Eran|AUTHOR Eran Raveh]]|
|[[Ravi, Vijay|AUTHOR Vijay Ravi]]|
|[[Ray, Avik|AUTHOR Avik Ray]]|
|[[Raymond, Christian|AUTHOR Christian Raymond]]|
|[[Razavi, S. Zahra|AUTHOR S. Zahra Razavi]]|
|[[Reblin, Maija|AUTHOR Maija Reblin]]|
|[[Rebout, Lise|AUTHOR Lise Rebout]]|
|[[Rech, Silas|AUTHOR Silas Rech]]|
|[[Reddy, Chandan K.A.|AUTHOR Chandan K.A. Reddy]]|
|[[Reddy, Pradeep|AUTHOR Pradeep Reddy]]|
|[[Rehr, Robert|AUTHOR Robert Rehr]]|
|[[Reichart, Roi|AUTHOR Roi Reichart]]|
|[[Reinhold, Jacob|AUTHOR Jacob Reinhold]]|
|[[Ren, Zongze|AUTHOR Zongze Ren]]|
|[[Renals, Steve|AUTHOR Steve Renals]]|
|[[Renduchintala, Adithya|AUTHOR Adithya Renduchintala]]|
|[[Reshef, Eilon|AUTHOR Eilon Reshef]]|
|[[Reuber, Markus|AUTHOR Markus Reuber]]|
|[[Reynolds, Douglas|AUTHOR Douglas Reynolds]]|
|[[Ribas, Dayana|AUTHOR Dayana Ribas]]|
|[[Ribeiro, Manuel Sam|AUTHOR Manuel Sam Ribeiro]]|
|[[Ricaud, Benjamin|AUTHOR Benjamin Ricaud]]|
|[[Riccardi, Giuseppe|AUTHOR Giuseppe Riccardi]]|
|[[Richardson, Brigitte|AUTHOR Brigitte Richardson]]|
|[[Richardson, Fred|AUTHOR Fred Richardson]]|
|[[Richburg, Brian|AUTHOR Brian Richburg]]|
|[[Richey, Colleen|AUTHOR Colleen Richey]]|
|[[Richmond, Korin|AUTHOR Korin Richmond]]|
|[[Ridouane, Rachid|AUTHOR Rachid Ridouane]]|
|[[Riedler, Jürgen|AUTHOR Jürgen Riedler]]|
|[[Riley, Michael A.|AUTHOR Michael A. Riley]]|
|[[Rim, Daniel Jun|AUTHOR Daniel Jun Rim]]|
|[[Rios-Urrego, Cristian David|AUTHOR Cristian David Rios-Urrego]]|
|[[Ritchie, Sandy|AUTHOR Sandy Ritchie]]|
|[[Ritz, Fabian|AUTHOR Fabian Ritz]]|
|[[Riviello, Alexandre|AUTHOR Alexandre Riviello]]|
|[[Riviello, Maria Teresa|AUTHOR Maria Teresa Riviello]]|
|[[Roa Dabike, Gerardo|AUTHOR Gerardo Roa Dabike]]|
|[[Robinson, Catherine|AUTHOR Catherine Robinson]]|
|[[Roblek, Dominik|AUTHOR Dominik Roblek]]|
|[[Rodriguez, Pedro|AUTHOR Pedro Rodriguez]]|
|[[Roebel, Axel|AUTHOR Axel Roebel]]|
|[[Roelen, Sonja-Dana|AUTHOR Sonja-Dana Roelen]]|
|[[Roessig, Simon|AUTHOR Simon Roessig]]|
|[[Rohanian, Morteza|AUTHOR Morteza Rohanian]]|
|[[Rohdin, Johan|AUTHOR Johan Rohdin]]|
|[[Rohleder, Nicolas|AUTHOR Nicolas Rohleder]]|
|[[Rohlin, Tracy|AUTHOR Tracy Rohlin]]|
|[[Rohnke, Jonas|AUTHOR Jonas Rohnke]]|
|[[Rohrer, Patrick Louis|AUTHOR Patrick Louis Rohrer]]|
|[[Romanenko, Aleksei|AUTHOR Aleksei Romanenko]]|
|[[Ronanki, Srikanth|AUTHOR Srikanth Ronanki]]|
|[[Rondon, Pat|AUTHOR Pat Rondon]]|
|[[Rong, Panying|AUTHOR Panying Rong]]|
|[[Rosenberg, Andrew|AUTHOR Andrew Rosenberg]]|
|[[Rossi, Sonja|AUTHOR Sonja Rossi]]|
|[[Roth, N.|AUTHOR N. Roth]]|
|[[Routray, Aurobinda|AUTHOR Aurobinda Routray]]|
|[[Rouvier, Mickael|AUTHOR Mickael Rouvier]]|
|[[Rowe, Hannah P.|AUTHOR Hannah P. Rowe]]|
|[[Rownicka, Joanna|AUTHOR Joanna Rownicka]]|
|[[Roy, Deb|AUTHOR Deb Roy]]|
|[[Roy, Sharad|AUTHOR Sharad Roy]]|
|[[Rozgic, Viktor|AUTHOR Viktor Rozgic]]|
|[[Rueda, Alice|AUTHOR Alice Rueda]]|
|[[Rúnarsdóttir, Anna V.|AUTHOR Anna V. Rúnarsdóttir]]|
|[[Rutowski, Tomasz|AUTHOR Tomasz Rutowski]]|
|[[Ryant, Neville|AUTHOR Neville Ryant]]|
|[[Rybach, David|AUTHOR David Rybach]]|
|[[Rybakov, Oleg|AUTHOR Oleg Rybakov]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Sabatini, John|AUTHOR John Sabatini]]|
|[[Sabzi Shahrebabaki, Abdolreza|AUTHOR Abdolreza Sabzi Shahrebabaki]]|
|[[Sacchi, Niccolò|AUTHOR Niccolò Sacchi]]|
|[[Sa-Couto, Pedro|AUTHOR Pedro Sa-Couto]]|
|[[Sadhu, Samik|AUTHOR Samik Sadhu]]|
|[[Sadjadi, Seyed Omid|AUTHOR Seyed Omid Sadjadi]]|
|[[Safari, Pooyan|AUTHOR Pooyan Safari]]|
|[[Sagar, Adithya|AUTHOR Adithya Sagar]]|
|[[Sager, Jacob|AUTHOR Jacob Sager]]|
|[[Saha, Atreyee|AUTHOR Atreyee Saha]]|
|[[Saha, Pramit|AUTHOR Pramit Saha]]|
|[[Sahidullah, Md.|AUTHOR Md. Sahidullah]]|
|[[Sahrawat, Dhruva|AUTHOR Dhruva Sahrawat]]|
|[[Sahu, Saurabh|AUTHOR Saurabh Sahu]]|
|[[Sai, Pulikonda Aditya|AUTHOR Pulikonda Aditya Sai]]|
|[[Sailor, Hardik B.|AUTHOR Hardik B. Sailor]]|
|[[Sainath, Tara N.|AUTHOR Tara N. Sainath]]|
|[[Saito, Daisuke|AUTHOR Daisuke Saito]]|
|[[Saito, Takuya|AUTHOR Takuya Saito]]|
|[[Sak, Hasim|AUTHOR Hasim Sak]]|
|[[Sakakibara, Ken-Ichi|AUTHOR Ken-Ichi Sakakibara]]|
|[[Sakti, Sakriani|AUTHOR Sakriani Sakti]]|
|[[Salvati, Daniele|AUTHOR Daniele Salvati]]|
|[[Sánchez-Hevia, Héctor A.|AUTHOR Héctor A. Sánchez-Hevia]]|
|[[Sanchis, Albert|AUTHOR Albert Sanchis]]|
|[[Sangwan, Abhijeet|AUTHOR Abhijeet Sangwan]]|
|[[San-Segundo, Rubén|AUTHOR Rubén San-Segundo]]|
|[[Saon, George|AUTHOR George Saon]]|
|[[Sapru, Ashtosh|AUTHOR Ashtosh Sapru]]|
|[[Saraclar, Murat|AUTHOR Murat Saraclar]]|
|[[Sarı, Leda|AUTHOR Leda Sarı]]|
|[[Sarma, Kandarpa Kumar|AUTHOR Kandarpa Kumar Sarma]]|
|[[Sarma, Mousmita|AUTHOR Mousmita Sarma]]|
|[[Sarma, Prathusha K.|AUTHOR Prathusha K. Sarma]]|
|[[Sarmah, Priyankoo|AUTHOR Priyankoo Sarmah]]|
|[[Sato, Hiroshi|AUTHOR Hiroshi Sato]]|
|[[Saunders, Kate|AUTHOR Kate Saunders]]|
|[[Saurous, Rif A.|AUTHOR Rif A. Saurous]]|
|[[Sawada, Naoki|AUTHOR Naoki Sawada]]|
|[[Saxena, Astitwa|AUTHOR Astitwa Saxena]]|
|[[Saxon, Michael|AUTHOR Michael Saxon]]|
|[[Schaeffler, Felix|AUTHOR Felix Schaeffler]]|
|[[Schallhart, Christian|AUTHOR Christian Schallhart]]|
|[[Scharenborg, Odette|AUTHOR Odette Scharenborg]]|
|[[Schatten, Heather T.|AUTHOR Heather T. Schatten]]|
|[[Schepker, Henning|AUTHOR Henning Schepker]]|
|[[Scheran, Daniel|AUTHOR Daniel Scheran]]|
|[[Schiel, Florian|AUTHOR Florian Schiel]]|
|[[Schiller, Dominik|AUTHOR Dominik Schiller]]|
|[[Schimke, Sarah|AUTHOR Sarah Schimke]]|
|[[Schjoedt, Uffe|AUTHOR Uffe Schjoedt]]|
|[[Schlüter, Ralf|AUTHOR Ralf Schlüter]]|
|[[Schmid, Carolin|AUTHOR Carolin Schmid]]|
|[[Schmitt, Manuel|AUTHOR Manuel Schmitt]]|
|[[Schmitt, Maximilian|AUTHOR Maximilian Schmitt]]|
|[[Schneider, Steffen|AUTHOR Steffen Schneider]]|
|[[Schneider, Thomas|AUTHOR Thomas Schneider]]|
|[[Schnieder, Sebastian|AUTHOR Sebastian Schnieder]]|
|[[Schoentgen, Jean|AUTHOR Jean Schoentgen]]|
|[[Schoormann, Heike|AUTHOR Heike Schoormann]]|
|[[Schottenhamml, Julia|AUTHOR Julia Schottenhamml]]|
|[[Schröer, Marin|AUTHOR Marin Schröer]]|
|[[Schubert, Lenhart K.|AUTHOR Lenhart K. Schubert]]|
|[[Schuller, Björn W.|AUTHOR Björn W. Schuller]]|
|[[Schultz, Robert T.|AUTHOR Robert T. Schultz]]|
|[[Schultz, Tanja|AUTHOR Tanja Schultz]]|
|[[Schultze, Thomas|AUTHOR Thomas Schultze]]|
|[[Schuppler, Barbara|AUTHOR Barbara Schuppler]]|
|[[Schuster, M.|AUTHOR M. Schuster]]|
|[[Scott, Kristen M.|AUTHOR Kristen M. Scott]]|
|[[Sebastian, Jilt|AUTHOR Jilt Sebastian]]|
|[[Seelamantula, Chandra Sekhar|AUTHOR Chandra Sekhar Seelamantula]]|
|[[Segal, Yael|AUTHOR Yael Segal]]|
|[[Seiderer, Andreas|AUTHOR Andreas Seiderer]]|
|[[Seidl, Amanda|AUTHOR Amanda Seidl]]|
|[[Seki, Hiroshi|AUTHOR Hiroshi Seki]]|
|[[Sell, Gregory|AUTHOR Gregory Sell]]|
|[[Selouani, Sid Ahmed|AUTHOR Sid Ahmed Selouani]]|
|[[Seltzer, Michael L.|AUTHOR Michael L. Seltzer]]|
|[[Seneviratne, Nadee|AUTHOR Nadee Seneviratne]]|
|[[Senior, Andrew|AUTHOR Andrew Senior]]|
|[[Sennema, Anke|AUTHOR Anke Sennema]]|
|[[Seo, Seokjun|AUTHOR Seokjun Seo]]|
|[[Seo, Soonshin|AUTHOR Soonshin Seo]]|
|[[Serrà, Joan|AUTHOR Joan Serrà]]|
|[[Serra, Xavier|AUTHOR Xavier Serra]]|
|[[Serrano, Luis|AUTHOR Luis Serrano]]|
|[[Serrino, Jack|AUTHOR Jack Serrino]]|
|[[Seshadri, Shreyas|AUTHOR Shreyas Seshadri]]|
|[[Sethares, William|AUTHOR William Sethares]]|
|[[Sethu, Vidhyasaharan|AUTHOR Vidhyasaharan Sethu]]|
|[[Seward, Reneé|AUTHOR Reneé Seward]]|
|[[Sezgin, T. Metin|AUTHOR T. Metin Sezgin]]|
|[[Sgouropoulos, Dimitris|AUTHOR Dimitris Sgouropoulos]]|
|[[Shabestary, Turaj Z.|AUTHOR Turaj Z. Shabestary]]|
|[[Shafran, Izhak|AUTHOR Izhak Shafran]]|
|[[Shah, Nirmesh J.|AUTHOR Nirmesh J. Shah]]|
|[[Shah, Rajiv Ratn|AUTHOR Rajiv Ratn Shah]]|
|[[Shamsi, Meysam|AUTHOR Meysam Shamsi]]|
|[[Shankar, Ravi|AUTHOR Ravi Shankar]]|
|[[Sharifi, Matthew|AUTHOR Matthew Sharifi]]|
|[[Sharma, Bidisha|AUTHOR Bidisha Sharma]]|
|[[Sharma, Dravyansh|AUTHOR Dravyansh Sharma]]|
|[[Sharma, Tanay|AUTHOR Tanay Sharma]]|
|[[Shechtman, Slava|AUTHOR Slava Shechtman]]|
|[[Sheikh, Imran|AUTHOR Imran Sheikh]]|
|[[Shekhawat, Hanumant Singh|AUTHOR Hanumant Singh Shekhawat]]|
|[[Shen, Peng|AUTHOR Peng Shen]]|
|[[Shen, Yi|AUTHOR Yi Shen]]|
|[[Shen, Yilin|AUTHOR Yilin Shen]]|
|[[Shen, Yu-Han|AUTHOR Yu-Han Shen]]|
|[[Sheth, Janaki|AUTHOR Janaki Sheth]]|
|[[Shi, Anyan|AUTHOR Anyan Shi]]|
|[[Shi, Bowen|AUTHOR Bowen Shi]]|
|[[Shi, Hao|AUTHOR Hao Shi]]|
|[[Shi, Jing|AUTHOR Jing Shi]]|
|[[Shi, Qiuying|AUTHOR Qiuying Shi]]|
|[[Shi, Shuju|AUTHOR Shuju Shi]]|
|[[Shi, Ziqiang|AUTHOR Ziqiang Shi]]|
|[[Shiga, Yoshinori|AUTHOR Yoshinori Shiga]]|
|[[Shih, Chilin|AUTHOR Chilin Shih]]|
|[[Shillingford, Brendan|AUTHOR Brendan Shillingford]]|
|[[Shim, Hye-jin|AUTHOR Hye-jin Shim]]|
|[[Shimodaira, Hiroshi|AUTHOR Hiroshi Shimodaira]]|
|[[Shin, Beomjun|AUTHOR Beomjun Shin]]|
|[[Shin, Minkyu|AUTHOR Minkyu Shin]]|
|[[Shinoda, Koichi|AUTHOR Koichi Shinoda]]|
|[[Shinohara, Yusuke|AUTHOR Yusuke Shinohara]]|
|[[Shiota, Sayaka|AUTHOR Sayaka Shiota]]|
|[[Shirley, Ben|AUTHOR Ben Shirley]]|
|[[Shon, Suwon|AUTHOR Suwon Shon]]|
|[[Shor, Joel|AUTHOR Joel Shor]]|
|[[Shrem, Yosi|AUTHOR Yosi Shrem]]|
|[[Shriberg, Elizabeth|AUTHOR Elizabeth Shriberg]]|
|[[Shrivastava, Abhishek|AUTHOR Abhishek Shrivastava]]|
|[[Shrivastava, Nilay|AUTHOR Nilay Shrivastava]]|
|[[Shulipa, Andrey|AUTHOR Andrey Shulipa]]|
|[[Si, Yuke|AUTHOR Yuke Si]]|
|[[Sidi Yakoub, Mohammed|AUTHOR Mohammed Sidi Yakoub]]|
|[[Siegert, Ingo|AUTHOR Ingo Siegert]]|
|[[Silnova, Anna|AUTHOR Anna Silnova]]|
|[[Silva, Samuel|AUTHOR Samuel Silva]]|
|[[Sim, Khe Chai|AUTHOR Khe Chai Sim]]|
|[[Šimko, Juraj|AUTHOR Juraj Šimko]]|
|[[Singer, Elliot|AUTHOR Elliot Singer]]|
|[[Singh, Mittul|AUTHOR Mittul Singh]]|
|[[Singh, Prachi|AUTHOR Prachi Singh]]|
|[[Singh, Vishwanath P.|AUTHOR Vishwanath P. Singh]]|
|[[Sinha, Rohit|AUTHOR Rohit Sinha]]|
|[[Siniscalchi, Sabato Marco|AUTHOR Sabato Marco Siniscalchi]]|
|[[Sisman, Berrak|AUTHOR Berrak Sisman]]|
|[[Sitman, Raquel|AUTHOR Raquel Sitman]]|
|[[Sivaraman, Ganesh|AUTHOR Ganesh Sivaraman]]|
|[[Sjons, Johan|AUTHOR Johan Sjons]]|
|[[Skantze, Gabriel|AUTHOR Gabriel Skantze]]|
|[[Skerry-Ryan, R.J.|AUTHOR R.J. Skerry-Ryan]]|
|[[Skidmore, Lucy|AUTHOR Lucy Skidmore]]|
|[[Skilling, Adrian|AUTHOR Adrian Skilling]]|
|[[Sklyar, Ilya|AUTHOR Ilya Sklyar]]|
|[[Skoglund, Jan|AUTHOR Jan Skoglund]]|
|[[Skrelin, Pavel|AUTHOR Pavel Skrelin]]|
|[[Smeele, L.E.|AUTHOR L.E. Smeele]]|
|[[Šmídl, Luboš|AUTHOR Luboš Šmídl]]|
|[[Smit, Peter|AUTHOR Peter Smit]]|
|[[Smith, Cybelle|AUTHOR Cybelle Smith]]|
|[[Smith, Melissa C.|AUTHOR Melissa C. Smith]]|
|[[Smoczyk, Daniel|AUTHOR Daniel Smoczyk]]|
|[[Smolander, Anna-Riikka|AUTHOR Anna-Riikka Smolander]]|
|[[Snyder, Cathryn|AUTHOR Cathryn Snyder]]|
|[[Snyder, David|AUTHOR David Snyder]]|
|[[So, R.H.Y.|AUTHOR R.H.Y. So]]|
|[[Sokolov, Alex|AUTHOR Alex Sokolov]]|
|[[Soler-Company, Juan|AUTHOR Juan Soler-Company]]|
|[[Soltau, Hagen|AUTHOR Hagen Soltau]]|
|[[Somandepalli, Krishna|AUTHOR Krishna Somandepalli]]|
|[[Song, Eunwoo|AUTHOR Eunwoo Song]]|
|[[Song, Hongwei|AUTHOR Hongwei Song]]|
|[[Song, Wei|AUTHOR Wei Song]]|
|[[Song, Yan|AUTHOR Yan Song]]|
|[[Song, Yuanfeng|AUTHOR Yuanfeng Song]]|
|[[Soni, Meet|AUTHOR Meet Soni]]|
|[[Soomro, Bilal|AUTHOR Bilal Soomro]]|
|[[Soong, Frank K.|AUTHOR Frank K. Soong]]|
|[[Sørensen, Charlotte|AUTHOR Charlotte Sørensen]]|
|[[Sorin, Alex|AUTHOR Alex Sorin]]|
|[[Sorokin, Ivan|AUTHOR Ivan Sorokin]]|
|[[Soto, Victor|AUTHOR Victor Soto]]|
|[[Speier, William|AUTHOR William Speier]]|
|[[Spencer, Caroline|AUTHOR Caroline Spencer]]|
|[[Spinu, Laura|AUTHOR Laura Spinu]]|
|[[Springenberg, Sebastian|AUTHOR Sebastian Springenberg]]|
|[[Sproat, Richard|AUTHOR Richard Sproat]]|
|[[Sridhar, Kusha|AUTHOR Kusha Sridhar]]|
|[[Sridhar, Prashant|AUTHOR Prashant Sridhar]]|
|[[Sridharan, S.|AUTHOR S. Sridharan]]|
|[[Srinivasan, Aparna|AUTHOR Aparna Srinivasan]]|
|[[Srinivasan, Sriram|AUTHOR Sriram Srinivasan]]|
|[[Sriram, G.|AUTHOR G. Sriram]]|
|[[Srivastava, Brij Mohan Lal|AUTHOR Brij Mohan Lal Srivastava]]|
|[[Srivastava, Mani B.|AUTHOR Mani B. Srivastava]]|
|[[Srivastava, Varun|AUTHOR Varun Srivastava]]|
|[[Stafylakis, Themos|AUTHOR Themos Stafylakis]]|
|[[Stahl, Johannes|AUTHOR Johannes Stahl]]|
|[[Stan, Adriana|AUTHOR Adriana Stan]]|
|[[Stappen, Lukas|AUTHOR Lukas Stappen]]|
|[[Stärk, Katja|AUTHOR Katja Stärk]]|
|[[Steiner, Ingmar|AUTHOR Ingmar Steiner]]|
|[[Stent, Amanda|AUTHOR Amanda Stent]]|
|[[Stepanov, Evgeny A.|AUTHOR Evgeny A. Stepanov]]|
|[[Stephenson, Cory|AUTHOR Cory Stephenson]]|
|[[Stevens, Mary|AUTHOR Mary Stevens]]|
|[[Stipancic, Kaila L.|AUTHOR Kaila L. Stipancic]]|
|[[Stolcke, Andreas|AUTHOR Andreas Stolcke]]|
|[[Stoll, Sabine|AUTHOR Sabine Stoll]]|
|[[Stoller, Daniel|AUTHOR Daniel Stoller]]|
|[[Stone, Simon|AUTHOR Simon Stone]]|
|[[Strauss, M.|AUTHOR M. Strauss]]|
|[[Strik, Helmer|AUTHOR Helmer Strik]]|
|[[Strohman, Trevor|AUTHOR Trevor Strohman]]|
|[[Strom, Nikko|AUTHOR Nikko Strom]]|
|[[Stuefer, Jonathan|AUTHOR Jonathan Stuefer]]|
|[[Sturim, Douglas|AUTHOR Douglas Sturim]]|
|[[Sturm, Bob L.|AUTHOR Bob L. Sturm]]|
|[[Šturm, Pavel|AUTHOR Pavel Šturm]]|
|[[Sturmbauer, Sarah|AUTHOR Sarah Sturmbauer]]|
|[[Stylianou, Yannis|AUTHOR Yannis Stylianou]]|
|[[Su, Bo-Hao|AUTHOR Bo-Hao Su]]|
|[[Su, Dan|AUTHOR Dan Su]]|
|[[Su, Feng-Guang|AUTHOR Feng-Guang Su]]|
|[[Su, Hang|AUTHOR Hang Su]]|
|[[Su, Ming-Hsiang|AUTHOR Ming-Hsiang Su]]|
|[[Su, Rongfeng|AUTHOR Rongfeng Su]]|
|[[Su, Xiangdong|AUTHOR Xiangdong Su]]|
|[[Subrahmanya, Niranjan|AUTHOR Niranjan Subrahmanya]]|
|[[Sudhakara, Sweekar|AUTHOR Sweekar Sudhakara]]|
|[[Sudro, Protima Nomo|AUTHOR Protima Nomo Sudro]]|
|[[Sugiyama, Akihiko|AUTHOR Akihiko Sugiyama]]|
|[[Sun, Eric|AUTHOR Eric Sun]]|
|[[Sun, Hanwu|AUTHOR Hanwu Sun]]|
|[[Sun, Hao|AUTHOR Hao Sun]]|
|[[Sun, Kewei|AUTHOR Kewei Sun]]|
|[[Sun, Lifa|AUTHOR Lifa Sun]]|
|[[Sun, Ming|AUTHOR Ming Sun]]|
|[[Sun, Shutao|AUTHOR Shutao Sun]]|
|[[Sun, Sining|AUTHOR Sining Sun]]|
|[[Sun, Xinrong|AUTHOR Xinrong Sun]]|
|[[Sun, Yang|AUTHOR Yang Sun]]|
|[[Sun, Zhongkai|AUTHOR Zhongkai Sun]]|
|[[Sung, Jongmo|AUTHOR Jongmo Sung]]|
|[[Suni, Antti|AUTHOR Antti Suni]]|
|[[Suo, Hongbin|AUTHOR Hongbin Suo]]|
|[[Supanekar, Sujata|AUTHOR Sujata Supanekar]]|
|[[Suter, Heidy|AUTHOR Heidy Suter]]|
|[[Suzuki, Hisami|AUTHOR Hisami Suzuki]]|
|[[Suzuki, Masayuki|AUTHOR Masayuki Suzuki]]|
|[[Suzuki, Motoyuki|AUTHOR Motoyuki Suzuki]]|
|[[Suzuki, Takahito|AUTHOR Takahito Suzuki]]|
|[[Švec, Jan|AUTHOR Jan Švec]]|
|[[Svendsen, Torbjørn|AUTHOR Torbjørn Svendsen]]|
|[[Swarup, Prakhar|AUTHOR Prakhar Swarup]]|
|[[Synnaeve, Gabriel|AUTHOR Gabriel Synnaeve]]|
|[[Szaszák, György|AUTHOR György Szaszák]]|
|[[Székely, Éva|AUTHOR Éva Székely]]|
|[[Sztahó, Dávid|AUTHOR Dávid Sztahó]]|
|[[Szymański, Piotr|AUTHOR Piotr Szymański]]|
|[[Szymczak, Adrian|AUTHOR Adrian Szymczak]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Tabrizi, Justin|AUTHOR Justin Tabrizi]]|
|[[Taherian, Hassan|AUTHOR Hassan Taherian]]|
|[[Tai, Tsung-Ming|AUTHOR Tsung-Ming Tai]]|
|[[Tai, Yu-Wen|AUTHOR Yu-Wen Tai]]|
|[[Takahashi, Naoya|AUTHOR Naoya Takahashi]]|
|[[Takaki, Shinji|AUTHOR Shinji Takaki]]|
|[[Takamichi, Shinnosuke|AUTHOR Shinnosuke Takamichi]]|
|[[Takanashi, Katsuya|AUTHOR Katsuya Takanashi]]|
|[[Takashima, Ryoichi|AUTHOR Ryoichi Takashima]]|
|[[Takatsu, Hiroaki|AUTHOR Hiroaki Takatsu]]|
|[[Takeda, Kazuya|AUTHOR Kazuya Takeda]]|
|[[Takemoto, Hironori|AUTHOR Hironori Takemoto]]|
|[[Tammewar, Aniruddha|AUTHOR Aniruddha Tammewar]]|
|[[Tan, Ke|AUTHOR Ke Tan]]|
|[[Tan, Kye Min|AUTHOR Kye Min Tan]]|
|[[Tan, Xu|AUTHOR Xu Tan]]|
|[[Tan, Ying-Ying|AUTHOR Ying-Ying Tan]]|
|[[Tan, Zheng-Hua|AUTHOR Zheng-Hua Tan]]|
|[[Tanaka, Hiroki|AUTHOR Hiroki Tanaka]]|
|[[Tanaka, Kou|AUTHOR Kou Tanaka]]|
|[[Tanaka, Tomohiro|AUTHOR Tomohiro Tanaka]]|
|[[Tanaka, Yoshiki|AUTHOR Yoshiki Tanaka]]|
|[[Taneja, Karan|AUTHOR Karan Taneja]]|
|[[Tang, Hao|AUTHOR Hao Tang]]|
|[[Tang, Min|AUTHOR Min Tang]]|
|[[Tang, Yun|AUTHOR Yun Tang]]|
|[[Tang, Zhenyu|AUTHOR Zhenyu Tang]]|
|[[Tankus, Ariel|AUTHOR Ariel Tankus]]|
|[[Tånnander, Christina|AUTHOR Christina Tånnander]]|
|[[Tao, Jianhua|AUTHOR Jianhua Tao]]|
|[[Tarantino, Lorenzo|AUTHOR Lorenzo Tarantino]]|
|[[Taschenberger, Linda|AUTHOR Linda Taschenberger]]|
|[[Tavarez, David|AUTHOR David Tavarez]]|
|[[Tavi, Lauri|AUTHOR Lauri Tavi]]|
|[[Tawara, Naohiro|AUTHOR Naohiro Tawara]]|
|[[Taylor, Jason|AUTHOR Jason Taylor]]|
|[[Taylor, Kye|AUTHOR Kye Taylor]]|
|[[Teh, Kah Kuan|AUTHOR Kah Kuan Teh]]|
|[[Teixeira, António|AUTHOR António Teixeira]]|
|[[Tembine, Hamidou|AUTHOR Hamidou Tembine]]|
|[[ten Bosch, L.|AUTHOR L. ten Bosch]]|
|[[Terasawa, Hiroko|AUTHOR Hiroko Terasawa]]|
|[[Terhiija, Viyazonuo|AUTHOR Viyazonuo Terhiija]]|
|[[Tesch, Kristina|AUTHOR Kristina Tesch]]|
|[[Teves, Ermine|AUTHOR Ermine Teves]]|
|[[Thaine, Patricia|AUTHOR Patricia Thaine]]|
|[[Thanda, Abhinav|AUTHOR Abhinav Thanda]]|
|[[Theobald, Barry-John|AUTHOR Barry-John Theobald]]|
|[[Thies, Tabea|AUTHOR Tabea Thies]]|
|[[Thomas, Anil|AUTHOR Anil Thomas]]|
|[[Thomas, Samuel|AUTHOR Samuel Thomas]]|
|[[Tian, Leimin|AUTHOR Leimin Tian]]|
|[[Tian, Xiaohai|AUTHOR Xiaohai Tian]]|
|[[Tian, Zhengkun|AUTHOR Zhengkun Tian]]|
|[[Tihelka, Daniel|AUTHOR Daniel Tihelka]]|
|[[Tilsen, Sam|AUTHOR Sam Tilsen]]|
|[[Tippmann, Jenny|AUTHOR Jenny Tippmann]]|
|[[Tits, Noé|AUTHOR Noé Tits]]|
|[[Tiwari, Gautam|AUTHOR Gautam Tiwari]]|
|[[Tjaden, Kris|AUTHOR Kris Tjaden]]|
|[[Tjandra, Andros|AUTHOR Andros Tjandra]]|
|[[Tkanov, Dmytro|AUTHOR Dmytro Tkanov]]|
|[[Toda, Tomoki|AUTHOR Tomoki Toda]]|
|[[Todisco, Massimiliano|AUTHOR Massimiliano Todisco]]|
|[[Togami, Masahito|AUTHOR Masahito Togami]]|
|[[Tokuda, Keiichi|AUTHOR Keiichi Tokuda]]|
|[[Tomar, Vikrant Singh|AUTHOR Vikrant Singh Tomar]]|
|[[Tomashenko, Natalia|AUTHOR Natalia Tomashenko]]|
|[[Tomita, Sho|AUTHOR Sho Tomita]]|
|[[Tommasi, Marc|AUTHOR Marc Tommasi]]|
|[[Tong, Rong|AUTHOR Rong Tong]]|
|[[Tong, Sibo|AUTHOR Sibo Tong]]|
|[[Tong, Ying|AUTHOR Ying Tong]]|
|[[Torres-Carrasquillo, Pedro A.|AUTHOR Pedro A. Torres-Carrasquillo]]|
|[[Tortoreto, Giuliano|AUTHOR Giuliano Tortoreto]]|
|[[Toshniwal, Shubham|AUTHOR Shubham Toshniwal]]|
|[[Tóth, László|AUTHOR László Tóth]]|
|[[Tournier, Ellen|AUTHOR Ellen Tournier]]|
|[[Tran, Huy Dat|AUTHOR Huy Dat Tran]]|
|[[Tran, Michelle|AUTHOR Michelle Tran]]|
|[[Tran, Trang|AUTHOR Trang Tran]]|
|[[Trancoso, Isabel|AUTHOR Isabel Trancoso]]|
|[[Travadi, Ruchir|AUTHOR Ruchir Travadi]]|
|[[Trebeschi, S.|AUTHOR S. Trebeschi]]|
|[[Treiber, Amos|AUTHOR Amos Treiber]]|
|[[Tremblay, Annie|AUTHOR Annie Tremblay]]|
|[[Triantafyllopoulos, Andreas|AUTHOR Andreas Triantafyllopoulos]]|
|[[Trisitichoke, Tasavat|AUTHOR Tasavat Trisitichoke]]|
|[[Troncone, Alda|AUTHOR Alda Troncone]]|
|[[Trong, Trung Ngo|AUTHOR Trung Ngo Trong]]|
|[[Trouvain, Jürgen|AUTHOR Jürgen Trouvain]]|
|[[Truong, Khiet P.|AUTHOR Khiet P. Truong]]|
|[[Tsai, Che-Ping|AUTHOR Che-Ping Tsai]]|
|[[Tsai, Yin-Chun|AUTHOR Yin-Chun Tsai]]|
|[[Tsao, Yu|AUTHOR Yu Tsao]]|
|[[Tseng, Chiu-Wang|AUTHOR Chiu-Wang Tseng]]|
|[[Tseng, Shao-Yen|AUTHOR Shao-Yen Tseng]]|
|[[Tseren, Andzhukaev|AUTHOR Andzhukaev Tseren]]|
|[[Tsiaras, Vassilis|AUTHOR Vassilis Tsiaras]]|
|[[Tsukanova, Anastasiia|AUTHOR Anastasiia Tsukanova]]|
|[[Tsunakawa, Takashi|AUTHOR Takashi Tsunakawa]]|
|[[Tsunoo, Emiru|AUTHOR Emiru Tsunoo]]|
|[[Tu, Tao|AUTHOR Tao Tu]]|
|[[Tu, Youzhi|AUTHOR Youzhi Tu]]|
|[[Tuan, Chao-I|AUTHOR Chao-I Tuan]]|
|[[Tuan, Yi-Lin|AUTHOR Yi-Lin Tuan]]|
|[[Tucker, Katherine M.|AUTHOR Katherine M. Tucker]]|
|[[Tündik, Máté Ákos|AUTHOR Máté Ákos Tündik]]|
|[[Tuo, Deyi|AUTHOR Deyi Tuo]]|
|[[Tuomainen, Outi|AUTHOR Outi Tuomainen]]|
|[[Turchi, Marco|AUTHOR Marco Turchi]]|
|[[Turner, Daniel R.|AUTHOR Daniel R. Turner]]|
|[[Tüske, Zoltán|AUTHOR Zoltán Tüske]]|
|[[Tuval, Omry|AUTHOR Omry Tuval]]|
|[[Tzinis, Efthymios|AUTHOR Efthymios Tzinis]]|
|[[Tzovaras, Dimitrios|AUTHOR Dimitrios Tzovaras]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Uttam, Shashwat|AUTHOR Shashwat Uttam]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[V., V. Vidyadhara Raju|AUTHOR V. Vidyadhara Raju V.]]|
|[[Vafeiadis, Anastasios|AUTHOR Anastasios Vafeiadis]]|
|[[Vainio, Martti|AUTHOR Martti Vainio]]|
|[[Valentini-Botinhao, Cassia|AUTHOR Cassia Valentini-Botinhao]]|
|[[Valin, Jean-Marc|AUTHOR Jean-Marc Valin]]|
|[[Valluri, Saikiran|AUTHOR Saikiran Valluri]]|
|[[van Alphen, M.J.A.|AUTHOR M.J.A. van Alphen]]|
|[[van Biljon, Elan|AUTHOR Elan van Biljon]]|
|[[Van Compernolle, Dirk|AUTHOR Dirk Van Compernolle]]|
|[[van den Bosch, A.|AUTHOR A. van den Bosch]]|
|[[van den Heuvel, Henk|AUTHOR Henk van den Heuvel]]|
|[[van der Heijden, F.|AUTHOR F. van der Heijden]]|
|[[van der Westhuizen, Ewald|AUTHOR Ewald van der Westhuizen]]|
|[[van de Weijer, Jeroen|AUTHOR Jeroen van de Weijer]]|
|[[Vaněk, Jan|AUTHOR Jan Vaněk]]|
|[[van Esch, Daan|AUTHOR Daan van Esch]]|
|[[Van Gysel, Christophe|AUTHOR Christophe Van Gysel]]|
|[[Van hamme, Hugo|AUTHOR Hugo Van hamme]]|
|[[van Hout, Julien|AUTHOR Julien van Hout]]|
|[[van Leeuwen, David A.|AUTHOR David A. van Leeuwen]]|
|[[van Leeuwen, K.G.|AUTHOR K.G. van Leeuwen]]|
|[[van Niekerk, Benjamin|AUTHOR Benjamin van Niekerk]]|
|[[van Santen, Jan|AUTHOR Jan van Santen]]|
|[[van Son, R.J.J.H.|AUTHOR R.J.J.H. van Son]]|
|[[van Staden, Lisa|AUTHOR Lisa van Staden]]|
|[[van Wijngaarden, Adriaan J.|AUTHOR Adriaan J. van Wijngaarden]]|
|[[Vasilescu, Ioana|AUTHOR Ioana Vasilescu]]|
|[[Vásquez-Correa, J.C.|AUTHOR J.C. Vásquez-Correa]]|
|[[Vaughan, Andrew|AUTHOR Andrew Vaughan]]|
|[[Vaughan, Brian|AUTHOR Brian Vaughan]]|
|[[Vega Rodríguez, Jenifer|AUTHOR Jenifer Vega Rodríguez]]|
|[[Velikovich, Leonid|AUTHOR Leonid Velikovich]]|
|[[Venkataraman, Archana|AUTHOR Archana Venkataraman]]|
|[[Venkatesan, Shankar M.|AUTHOR Shankar M. Venkatesan]]|
|[[Venkatesh, Anu|AUTHOR Anu Venkatesh]]|
|[[Venkateswara, Hemanth|AUTHOR Hemanth Venkateswara]]|
|[[Venneri, Annalena|AUTHOR Annalena Venneri]]|
|[[Verwimp, Lyan|AUTHOR Lyan Verwimp]]|
|[[Vestman, Ville|AUTHOR Ville Vestman]]|
|[[Vicente, Luis|AUTHOR Luis Vicente]]|
|[[Vicsi, Klára|AUTHOR Klára Vicsi]]|
|[[Vidailhet, Marie|AUTHOR Marie Vidailhet]]|
|[[Vidal, Jazmín|AUTHOR Jazmín Vidal]]|
|[[Vieira, Fernando|AUTHOR Fernando Vieira]]|
|[[Viglino, Thibault|AUTHOR Thibault Viglino]]|
|[[Vijayan, Karthika|AUTHOR Karthika Vijayan]]|
|[[Villalba, Jesús|AUTHOR Jesús Villalba]]|
|[[Villavicencio, Aline|AUTHOR Aline Villavicencio]]|
|[[Villavicencio, Fernando|AUTHOR Fernando Villavicencio]]|
|[[Viñals, Ignacio|AUTHOR Ignacio Viñals]]|
|[[Vincent, Emmanuel|AUTHOR Emmanuel Vincent]]|
|[[Violette, Patrick|AUTHOR Patrick Violette]]|
|[[Vipperla, Ravichander|AUTHOR Ravichander Vipperla]]|
|[[Virpioja, Sami|AUTHOR Sami Virpioja]]|
|[[Visontai, Mirkó|AUTHOR Mirkó Visontai]]|
|[[Vít, Jakub|AUTHOR Jakub Vít]]|
|[[Vitaladevuni, Shiv|AUTHOR Shiv Vitaladevuni]]|
|[[Voit, Dirk|AUTHOR Dirk Voit]]|
|[[Voleti, Rohit|AUTHOR Rohit Voleti]]|
|[[Volín, Jan|AUTHOR Jan Volín]]|
|[[Volkova, Marina|AUTHOR Marina Volkova]]|
|[[Vollmann, Ralf|AUTHOR Ralf Vollmann]]|
|[[Volokhov, Vladimir|AUTHOR Vladimir Volokhov]]|
|[[von Platen, P.|AUTHOR P. von Platen]]|
|[[Voskuilen, L.|AUTHOR L. Voskuilen]]|
|[[Voße, Jana|AUTHOR Jana Voße]]|
|[[Votis, Konstantinos|AUTHOR Konstantinos Votis]]|
|[[Vougioukas, Konstantinos|AUTHOR Konstantinos Vougioukas]]|
|[[Vu, Ngoc Thang|AUTHOR Ngoc Thang Vu]]|
|[[Vuissoz, Pierre-André|AUTHOR Pierre-André Vuissoz]]|
|[[Vukotić, Vedran|AUTHOR Vedran Vukotić]]|
|[[Vuppala, Anil Kumar|AUTHOR Anil Kumar Vuppala]]|
|[[Vyas, Apoorv|AUTHOR Apoorv Vyas]]|
|[[Vydana, Hari Krishna|AUTHOR Hari Krishna Vydana]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Wada, Shigeo|AUTHOR Shigeo Wada]]|
|[[Wagner, Anita E.|AUTHOR Anita E. Wagner]]|
|[[Wagner, Johannes|AUTHOR Johannes Wagner]]|
|[[Wagner, Petra|AUTHOR Petra Wagner]]|
|[[Waibel, Alex|AUTHOR Alex Waibel]]|
|[[Wakasa, Kenta|AUTHOR Kenta Wakasa]]|
|[[Wall, Julie|AUTHOR Julie Wall]]|
|[[Wan, Li|AUTHOR Li Wan]]|
|[[Wang, Bo|AUTHOR Bo Wang]]|
|[[Wang, Chao|AUTHOR Chao Wang]]|
|[[Wang, Chun-Wei|AUTHOR Chun-Wei Wang]]|
|[[Wang, DeLiang|AUTHOR DeLiang Wang]]|
|[[Wang, Dong|AUTHOR Dong Wang]]|
|[[Wang, Dongxiao|AUTHOR Dongxiao Wang]]|
|[[Wang, Feng|AUTHOR Feng Wang]]|
|[[Wang, Fengna|AUTHOR Fengna Wang]]|
|[[Wang, Haifeng|AUTHOR Haifeng Wang]]|
|[[Wang, Haishuai|AUTHOR Haishuai Wang]]|
|[[Wang, Hao|AUTHOR Hao Wang]]|
|[[Wang, Hongji|AUTHOR Hongji Wang]]|
|[[Wang, Hsin-Min|AUTHOR Hsin-Min Wang]]|
|[[Wang, Jian|AUTHOR Jian Wang]]|
|[[Wang, Jiarui|AUTHOR Jiarui Wang]]|
|[[Wang, Junjie|AUTHOR Junjie Wang]]|
|[[Wang, Jun|AUTHOR Jun Wang]]|
|[[Wang, Jun|AUTHOR Jun Wang]]|
|[[Wang, Kuang-Ching|AUTHOR Kuang-Ching Wang]]|
|[[Wang, L.|AUTHOR L. Wang]]|
|[[Wang, Lan|AUTHOR Lan Wang]]|
|[[Wang, Liming|AUTHOR Liming Wang]]|
|[[Wang, Li|AUTHOR Li Wang]]|
|[[Wang, Li|AUTHOR Li Wang]]|
|[[Wang, Longbiao|AUTHOR Longbiao Wang]]|
|[[Wang, Lu|AUTHOR Lu Wang]]|
|[[Wang, Manna|AUTHOR Manna Wang]]|
|[[Wang, Peidong|AUTHOR Peidong Wang]]|
|[[Wang, Qing|AUTHOR Qing Wang]]|
|[[Wang, Qinyi|AUTHOR Qinyi Wang]]|
|[[Wang, Qiongqiong|AUTHOR Qiongqiong Wang]]|
|[[Wang, Quan|AUTHOR Quan Wang]]|
|[[Wang, Shaojun|AUTHOR Shaojun Wang]]|
|[[Wang, Shuai|AUTHOR Shuai Wang]]|
|[[Wang, Syu-Siang|AUTHOR Syu-Siang Wang]]|
|[[Wang, Tianqi|AUTHOR Tianqi Wang]]|
|[[Wang, Weiqing|AUTHOR Weiqing Wang]]|
|[[Wang, Xi|AUTHOR Xi Wang]]|
|[[Wang, Xianyun|AUTHOR Xianyun Wang]]|
|[[Wang, Xiaofei|AUTHOR Xiaofei Wang]]|
|[[Wang, Xin|AUTHOR Xin Wang]]|
|[[Wang, Xinhao|AUTHOR Xinhao Wang]]|
|[[Wang, Xuyang|AUTHOR Xuyang Wang]]|
|[[Wang, Y.|AUTHOR Y. Wang]]|
|[[Wang, Yi|AUTHOR Yi Wang]]|
|[[Wang, Yiming|AUTHOR Yiming Wang]]|
|[[Wang, Yongqiang|AUTHOR Yongqiang Wang]]|
|[[Wang, Yun|AUTHOR Yun Wang]]|
|[[Wang, Zhiyu|AUTHOR Zhiyu Wang]]|
|[[Wang, Zhong-Qiu|AUTHOR Zhong-Qiu Wang]]|
|[[Wang, Ziqi|AUTHOR Ziqi Wang]]|
|[[Wang, Zi-Rui|AUTHOR Zi-Rui Wang]]|
|[[Wang, Zuowei|AUTHOR Zuowei Wang]]|
|[[Ward, Lauren|AUTHOR Lauren Ward]]|
|[[Ward, Nigel G.|AUTHOR Nigel G. Ward]]|
|[[Warlaumont, Anne S.|AUTHOR Anne S. Warlaumont]]|
|[[Warren, Paul|AUTHOR Paul Warren]]|
|[[Watanabe, Shinji|AUTHOR Shinji Watanabe]]|
|[[Watts, Oliver|AUTHOR Oliver Watts]]|
|[[Webb, Russ|AUTHOR Russ Webb]]|
|[[Weber, Cornelius|AUTHOR Cornelius Weber]]|
|[[Weber, Harli|AUTHOR Harli Weber]]|
|[[Wei, Jianguo|AUTHOR Jianguo Wei]]|
|[[Wei, Xizi|AUTHOR Xizi Wei]]|
|[[Wei, Y.|AUTHOR Y. Wei]]|
|[[Wei, Zice|AUTHOR Zice Wei]]|
|[[Weinberg, Garrett|AUTHOR Garrett Weinberg]]|
|[[Weinstein, Eugene|AUTHOR Eugene Weinstein]]|
|[[Weiss, Ron J.|AUTHOR Ron J. Weiss]]|
|[[Weißkirchen, Norman|AUTHOR Norman Weißkirchen]]|
|[[Wen, Zhengqi|AUTHOR Zhengqi Wen]]|
|[[Wendemuth, Andreas|AUTHOR Andreas Wendemuth]]|
|[[Weng, Chao|AUTHOR Chao Weng]]|
|[[Weng, Yi-Ming|AUTHOR Yi-Ming Weng]]|
|[[Weninger, Felix|AUTHOR Felix Weninger]]|
|[[Wermter, Stefan|AUTHOR Stefan Wermter]]|
|[[Werner, Stefan|AUTHOR Stefan Werner]]|
|[[Westerhof, Gerben J.|AUTHOR Gerben J. Westerhof]]|
|[[Wichern, Gordon|AUTHOR Gordon Wichern]]|
|[[Wickramasinghe, Buddhi|AUTHOR Buddhi Wickramasinghe]]|
|[[Wiesner, Matthew|AUTHOR Matthew Wiesner]]|
|[[Wiles, Janet|AUTHOR Janet Wiles]]|
|[[Willett, Daniel|AUTHOR Daniel Willett]]|
|[[Willi, Megan|AUTHOR Megan Willi]]|
|[[Williams, Jason D.|AUTHOR Jason D. Williams]]|
|[[Williams, Jennifer|AUTHOR Jennifer Williams]]|
|[[Williams, Kyle|AUTHOR Kyle Williams]]|
|[[Williamson, James R.|AUTHOR James R. Williamson]]|
|[[Wilson, Kevin|AUTHOR Kevin Wilson]]|
|[[Wilson, Melissa|AUTHOR Melissa Wilson]]|
|[[Winkelmann, Raphael|AUTHOR Raphael Winkelmann]]|
|[[Wisler, Alan|AUTHOR Alan Wisler]]|
|[[Wissing, Daan|AUTHOR Daan Wissing]]|
|[[Włodarczak, Marcin|AUTHOR Marcin Włodarczak]]|
|[[Woeste, Hannah M.|AUTHOR Hannah M. Woeste]]|
|[[Wójciak, Łukasz|AUTHOR Łukasz Wójciak]]|
|[[Wolf, Lior|AUTHOR Lior Wolf]]|
|[[Wong, Ka Ho|AUTHOR Ka Ho Wong]]|
|[[Wong, Raymond Chi-Wing|AUTHOR Raymond Chi-Wing Wong]]|
|[[Wood, H.|AUTHOR H. Wood]]|
|[[Wood, Sean U.N.|AUTHOR Sean U.N. Wood]]|
|[[Woodland, P.C.|AUTHOR P.C. Woodland]]|
|[[Woolridge, Stephanie|AUTHOR Stephanie Woolridge]]|
|[[Wright, Richard|AUTHOR Richard Wright]]|
|[[Wróbel, Adam|AUTHOR Adam Wróbel]]|
|[[Wu, Chung-Hsien|AUTHOR Chung-Hsien Wu]]|
|[[Wu, Fei|AUTHOR Fei Wu]]|
|[[Wu, Haiwei|AUTHOR Haiwei Wu]]|
|[[Wu, Hua|AUTHOR Hua Wu]]|
|[[Wu, Jian|AUTHOR Jian Wu]]|
|[[Wu, Jibin|AUTHOR Jibin Wu]]|
|[[Wu, Jie|AUTHOR Jie Wu]]|
|[[Wu, Kai-Cheng|AUTHOR Kai-Cheng Wu]]|
|[[Wu, Long|AUTHOR Long Wu]]|
|[[Wu, Mengfei|AUTHOR Mengfei Wu]]|
|[[Wu, Peter|AUTHOR Peter Wu]]|
|[[Wu, Xihong|AUTHOR Xihong Wu]]|
|[[Wu, Xixin|AUTHOR Xixin Wu]]|
|[[Wu, Xueyang|AUTHOR Xueyang Wu]]|
|[[Wu, Yi-Chiao|AUTHOR Yi-Chiao Wu]]|
|[[Wu, Yi-Tong|AUTHOR Yi-Tong Wu]]|
|[[Wu, Yonghui|AUTHOR Yonghui Wu]]|
|[[Wu, Zelin|AUTHOR Zelin Wu]]|
|[[Wu, Zhanghao|AUTHOR Zhanghao Wu]]|
|[[Wu, Zhiyong|AUTHOR Zhiyong Wu]]|
|[[Wu, Zhizheng|AUTHOR Zhizheng Wu]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Xia, Wei|AUTHOR Wei Xia]]|
|[[Xiao, Jing|AUTHOR Jing Xiao]]|
|[[Xie, Di|AUTHOR Di Xie]]|
|[[Xie, Jiamin|AUTHOR Jiamin Xie]]|
|[[Xie, Lei|AUTHOR Lei Xie]]|
|[[Xie, Xurong|AUTHOR Xurong Xie]]|
|[[Xiong, Hao|AUTHOR Hao Xiong]]|
|[[Xiong, Shengwu|AUTHOR Shengwu Xiong]]|
|[[Xiong, Yan|AUTHOR Yan Xiong]]|
|[[Xu, Anqi|AUTHOR Anqi Xu]]|
|[[Xu, Bo|AUTHOR Bo Xu]]|
|[[Xu, Chenglin|AUTHOR Chenglin Xu]]|
|[[Xu, Guanghui|AUTHOR Guanghui Xu]]|
|[[Xu, Haihua|AUTHOR Haihua Xu]]|
|[[Xu, Hainan|AUTHOR Hainan Xu]]|
|[[Xu, Haiyang|AUTHOR Haiyang Xu]]|
|[[Xu, Hu|AUTHOR Hu Xu]]|
|[[Xu, Jiaming|AUTHOR Jiaming Xu]]|
|[[Xu, Li|AUTHOR Li Xu]]|
|[[Xu, Manluolan|AUTHOR Manluolan Xu]]|
|[[Xu, Qian|AUTHOR Qian Xu]]|
|[[Xu, Qiantong|AUTHOR Qiantong Xu]]|
|[[Xu, Shan|AUTHOR Shan Xu]]|
|[[Xu, Shugong|AUTHOR Shugong Xu]]|
|[[Xu, Shuzhuang|AUTHOR Shuzhuang Xu]]|
|[[Xu, Xinzhou|AUTHOR Xinzhou Xu]]|
|[[Xu, Yong|AUTHOR Yong Xu]]|
|[[Xu, Yumo|AUTHOR Yumo Xu]]|
|[[Xue, Liumeng|AUTHOR Liumeng Xue]]|
|[[Xue, Wei|AUTHOR Wei Xue]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Yadav, Ravi|AUTHOR Ravi Yadav]]|
|[[Yalta Soplin, Nelson Enrique|AUTHOR Nelson Enrique Yalta Soplin]]|
|[[Yamagishi, Junichi|AUTHOR Junichi Yamagishi]]|
|[[Yamaguchi, Yoshikazu|AUTHOR Yoshikazu Yamaguchi]]|
|[[Yamamoto, Hitoshi|AUTHOR Hitoshi Yamamoto]]|
|[[Yamamoto, Katsuhiko|AUTHOR Katsuhiko Yamamoto]]|
|[[Yamamoto, Ryuichi|AUTHOR Ryuichi Yamamoto]]|
|[[Yamamoto, Taiki|AUTHOR Taiki Yamamoto]]|
|[[Yan, Jinghao|AUTHOR Jinghao Yan]]|
|[[Yan, Nan|AUTHOR Nan Yan]]|
|[[Yan, Quanlei|AUTHOR Quanlei Yan]]|
|[[Yan, Yonghong|AUTHOR Yonghong Yan]]|
|[[Yan, Zhijie|AUTHOR Zhijie Yan]]|
|[[Yang, Bing|AUTHOR Bing Yang]]|
|[[Yang, Chun|AUTHOR Chun Yang]]|
|[[Yang, Gene-Ping|AUTHOR Gene-Ping Yang]]|
|[[Yang, Guofu|AUTHOR Guofu Yang]]|
|[[Yang, Hansi|AUTHOR Hansi Yang]]|
|[[Yang, IL-Ho|AUTHOR IL-Ho Yang]]|
|[[Yang, Jian|AUTHOR Jian Yang]]|
|[[Yang, Jichen|AUTHOR Jichen Yang]]|
|[[Yang, Jinyi|AUTHOR Jinyi Yang]]|
|[[Yang, Joon-Young|AUTHOR Joon-Young Yang]]|
|[[Yang, Lin|AUTHOR Lin Yang]]|
|[[Yang, Mu|AUTHOR Mu Yang]]|
|[[Yang, Qiang|AUTHOR Qiang Yang]]|
|[[Yang, Seung Hee|AUTHOR Seung Hee Yang]]|
|[[Yang, Shicai|AUTHOR Shicai Yang]]|
|[[Yang, Song|AUTHOR Song Yang]]|
|[[Yang, Yaogen|AUTHOR Yaogen Yang]]|
|[[Yang, Yaping|AUTHOR Yaping Yang]]|
|[[Yang, Yexin|AUTHOR Yexin Yang]]|
|[[Yang, Zixiaofan|AUTHOR Zixiaofan Yang]]|
|[[Yankowitz, Lisa|AUTHOR Lisa Yankowitz]]|
|[[Yanushevskaya, Irena|AUTHOR Irena Yanushevskaya]]|
|[[Yao, Jian|AUTHOR Jian Yao]]|
|[[Yarra, Chiranjeevi|AUTHOR Chiranjeevi Yarra]]|
|[[Ye, Jieping|AUTHOR Jieping Ye]]|
|[[Ye, Zhongfu|AUTHOR Zhongfu Ye]]|
|[[Yeh, Cheng-chieh|AUTHOR Cheng-chieh Yeh]]|
|[[Yeh, Sung-Lin|AUTHOR Sung-Lin Yeh]]|
|[[Yemez, Yücel|AUTHOR Yücel Yemez]]|
|[[Yeung, Gary|AUTHOR Gary Yeung]]|
|[[Yi, Cheng|AUTHOR Cheng Yi]]|
|[[Yi, Jiangyan|AUTHOR Jiangyan Yi]]|
|[[Yi, Yuan-Hao|AUTHOR Yuan-Hao Yi]]|
|[[Yılmaz, Emre|AUTHOR Emre Yılmaz]]|
|[[Yin, Ruiqing|AUTHOR Ruiqing Yin]]|
|[[Yin, Xu-cheng|AUTHOR Xu-cheng Yin]]|
|[[Ylinen, Sari|AUTHOR Sari Ylinen]]|
|[[Yokoe, Yuriko|AUTHOR Yuriko Yokoe]]|
|[[Yokoyama, Katsuya|AUTHOR Katsuya Yokoyama]]|
|[[Yolchuyeva, Sevinj|AUTHOR Sevinj Yolchuyeva]]|
|[[Yolwas, Nurmemet|AUTHOR Nurmemet Yolwas]]|
|[[Yoo, Hiyon|AUTHOR Hiyon Yoo]]|
|[[Yoon, Sung-Hyun|AUTHOR Sung-Hyun Yoon]]|
|[[Yoon, Su-Youn|AUTHOR Su-Youn Yoon]]|
|[[Yoshida, Takami|AUTHOR Takami Yoshida]]|
|[[Yoshinaga, Tsukasa|AUTHOR Tsukasa Yoshinaga]]|
|[[Yoshino, Koichiro|AUTHOR Koichiro Yoshino]]|
|[[Yoshioka, Takuya|AUTHOR Takuya Yoshioka]]|
|[[You, Chang Huai|AUTHOR Chang Huai You]]|
|[[You, Lanhua|AUTHOR Lanhua You]]|
|[[You, Yongbin|AUTHOR Yongbin You]]|
|[[Yousefi, Midia|AUTHOR Midia Yousefi]]|
|[[Yu, Chengzhu|AUTHOR Chengzhu Yu]]|
|[[Yu, Dong|AUTHOR Dong Yu]]|
|[[Yu, Ha-Jin|AUTHOR Ha-Jin Yu]]|
|[[Yu, Jianwei|AUTHOR Jianwei Yu]]|
|[[Yu, Kai|AUTHOR Kai Yu]]|
|[[Yu, Meng|AUTHOR Meng Yu]]|
|[[Yu, Mingzhi|AUTHOR Mingzhi Yu]]|
|[[Yu, Ya-Qi|AUTHOR Ya-Qi Yu]]|
|[[Yuan, Jiahong|AUTHOR Jiahong Yuan]]|
|[[Yue, Xianghu|AUTHOR Xianghu Yue]]|
|[[Yun, Sungrack|AUTHOR Sungrack Yun]]|
|[[Yunusova, Yana|AUTHOR Yana Yunusova]]|
|[[Yusuf, Bolaji|AUTHOR Bolaji Yusuf]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpauthorindexlisttable|k
|[[Zadrazil, Petr|AUTHOR Petr Zadrazil]]|
|[[Zajíc, Zbyněk|AUTHOR Zbyněk Zajíc]]|
|[[Zanon Boito, Marcely|AUTHOR Marcely Zanon Boito]]|
|[[Zapotoczny, Michał|AUTHOR Michał Zapotoczny]]|
|[[Zappi, Victor|AUTHOR Victor Zappi]]|
|[[Zarrieß, Sina|AUTHOR Sina Zarrieß]]|
|[[Zatvornitskiy, Alexander|AUTHOR Alexander Zatvornitskiy]]|
|[[Zayats, Vicky|AUTHOR Vicky Zayats]]|
|[[Zdansky, Jindrich|AUTHOR Jindrich Zdansky]]|
|[[Zechner, Klaus|AUTHOR Klaus Zechner]]|
|[[Zegers, Jeroen|AUTHOR Jeroen Zegers]]|
|[[Zeinali, Hossein|AUTHOR Hossein Zeinali]]|
|[[Żelasko, Piotr|AUTHOR Piotr Żelasko]]|
|[[Zellers, Margaret|AUTHOR Margaret Zellers]]|
|[[Zellou, Georgia|AUTHOR Georgia Zellou]]|
|[[Zen, Heiga|AUTHOR Heiga Zen]]|
|[[Zeng, Michael|AUTHOR Michael Zeng]]|
|[[Zeng, Zhiping|AUTHOR Zhiping Zeng]]|
|[[Zeyer, Albert|AUTHOR Albert Zeyer]]|
|[[Zezario, Ryandhimas E.|AUTHOR Ryandhimas E. Zezario]]|
|[[Zhan, Puming|AUTHOR Puming Zhan]]|
|[[Zhang, Chao|AUTHOR Chao Zhang]]|
|[[Zhang, Chunlei|AUTHOR Chunlei Zhang]]|
|[[Zhang, Chuxiong|AUTHOR Chuxiong Zhang]]|
|[[Zhang, Hao|AUTHOR Hao Zhang]]|
|[[Zhang, Hao|AUTHOR Hao Zhang]]|
|[[Zhang, Hui|AUTHOR Hui Zhang]]|
|[[Zhang, Hui|AUTHOR Hui Zhang]]|
|[[Zhang, Jiacen|AUTHOR Jiacen Zhang]]|
|[[Zhang, Jiajun|AUTHOR Jiajun Zhang]]|
|[[Zhang, Jinchuan|AUTHOR Jinchuan Zhang]]|
|[[Zhang, Jingyang|AUTHOR Jingyang Zhang]]|
|[[Zhang, Jinsong|AUTHOR Jinsong Zhang]]|
|[[Zhang, Malu|AUTHOR Malu Zhang]]|
|[[Zhang, Mingyang|AUTHOR Mingyang Zhang]]|
|[[Zhang, Pengyuan|AUTHOR Pengyuan Zhang]]|
|[[Zhang, Shilei|AUTHOR Shilei Zhang]]|
|[[Zhang, Shiliang|AUTHOR Shiliang Zhang]]|
|[[Zhang, Shi-Xiong|AUTHOR Shi-Xiong Zhang]]|
|[[Zhang, Shucong|AUTHOR Shucong Zhang]]|
|[[Zhang, Wangyou|AUTHOR Wangyou Zhang]]|
|[[Zhang, Wei|AUTHOR Wei Zhang]]|
|[[Zhang, Wei-Qiang|AUTHOR Wei-Qiang Zhang]]|
|[[Zhang, Xiaohan|AUTHOR Xiaohan Zhang]]|
|[[Zhang, Xiao-Ping|AUTHOR Xiao-Ping Zhang]]|
|[[Zhang, Xueliang|AUTHOR Xueliang Zhang]]|
|[[Zhang, Yang|AUTHOR Yang Zhang]]|
|[[Zhang, Yu|AUTHOR Yu Zhang]]|
|[[Zhang, Zhen|AUTHOR Zhen Zhang]]|
|[[Zhang, Zhenrui|AUTHOR Zhenrui Zhang]]|
|[[Zhang, Zhuohuang|AUTHOR Zhuohuang Zhang]]|
|[[Zhang, Zixing|AUTHOR Zixing Zhang]]|
|[[Zhao, Chenghao|AUTHOR Chenghao Zhao]]|
|[[Zhao, Ding|AUTHOR Ding Zhao]]|
|[[Zhao, Guanlong|AUTHOR Guanlong Zhao]]|
|[[Zhao, Jinming|AUTHOR Jinming Zhao]]|
|[[Zhao, Li|AUTHOR Li Zhao]]|
|[[Zhao, Miao|AUTHOR Miao Zhao]]|
|[[Zhao, Sheng|AUTHOR Sheng Zhao]]|
|[[Zhao, Shengkui|AUTHOR Shengkui Zhao]]|
|[[Zhao, Tianyu|AUTHOR Tianyu Zhao]]|
|[[Zhao, Yi|AUTHOR Yi Zhao]]|
|[[Zhao, Yunxin|AUTHOR Yunxin Zhao]]|
|[[Zhao, Ziping|AUTHOR Ziping Zhao]]|
|[[Zhen, Kai|AUTHOR Kai Zhen]]|
|[[Zheng, Jimeng|AUTHOR Jimeng Zheng]]|
|[[Zheng, Siqi|AUTHOR Siqi Zheng]]|
|[[Zheng, Wei-Zhong|AUTHOR Wei-Zhong Zheng]]|
|[[Zheng, Yibin|AUTHOR Yibin Zheng]]|
|[[Zhi, Pengpeng|AUTHOR Pengpeng Zhi]]|
|[[Zhong, Jiaqi|AUTHOR Jiaqi Zhong]]|
|[[Zhong, Shun-Chang|AUTHOR Shun-Chang Zhong]]|
|[[Zhou, Bowen|AUTHOR Bowen Zhou]]|
|[[Zhou, Fang|AUTHOR Fang Zhou]]|
|[[Zhou, Jianfeng|AUTHOR Jianfeng Zhou]]|
|[[Zhou, Kun|AUTHOR Kun Zhou]]|
|[[Zhou, Pan|AUTHOR Pan Zhou]]|
|[[Zhou, Weicong|AUTHOR Weicong Zhou]]|
|[[Zhou, Ying|AUTHOR Ying Zhou]]|
|[[Zhou, Zhong|AUTHOR Zhong Zhou]]|
|[[Zhu, Feiqi|AUTHOR Feiqi Zhu]]|
|[[Zhu, Han|AUTHOR Han Zhu]]|
|[[Zhu, Ji|AUTHOR Ji Zhu]]|
|[[Zhu, Licheng Richard|AUTHOR Licheng Richard Zhu]]|
|[[Zhu, Shuangshuang|AUTHOR Shuangshuang Zhu]]|
|[[Zhu, Yingke|AUTHOR Yingke Zhu]]|
|[[Zimmermann, Roger|AUTHOR Roger Zimmermann]]|
|[[Zisserman, Andrew|AUTHOR Andrew Zisserman]]|
|[[Zong, Chengqing|AUTHOR Chengqing Zong]]|
|[[Zoph, Barret|AUTHOR Barret Zoph]]|
|[[Zou, Yuexian|AUTHOR Yuexian Zou]]|
|[[Zou, Yuxiang|AUTHOR Yuxiang Zou]]|
|[[Zuluaga, Mauricio|AUTHOR Mauricio Zuluaga]]|
|[[Żyła-Hoppe, Marzena|AUTHOR Marzena Żyła-Hoppe]]|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193191.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-2|PAPER Tue-P-5-A-2 — Building a Mixed-Lingual Neural TTS System with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Mixed-Lingual Neural TTS System with Only Monolingual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192731.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-4|PAPER Wed-O-6-2-4 — A Deep Residual Network for Large-Scale Acoustic Scene Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Residual Network for Large-Scale Acoustic Scene Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-3|PAPER Mon-P-1-B-3 — Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191701.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-4|PAPER Thu-O-10-4-4 — CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192396.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-6|PAPER Mon-P-2-C-6 — Speech Model Pre-Training for End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Model Pre-Training for End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-6|PAPER Wed-O-6-2-6 — Self-Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-5|PAPER Tue-P-3-D-5 — “ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-2|PAPER Tue-O-5-3-2 — Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192995.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-2|PAPER Wed-P-7-D-2 — Articulation of Vowel Length Contrasts in Australian English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulation of Vowel Length Contrasts in Australian English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-6|PAPER Thu-O-9-4-6 — Parameter-Transfer Learning for Low-Resource Individualization of Head-Related Transfer Functions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parameter-Transfer Learning for Low-Resource Individualization of Head-Related Transfer Functions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198011.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-3|PAPER Wed-S&T-3-3 — Multimodal Dialog with the MALACH Audiovisual Archive]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Dialog with the MALACH Audiovisual Archive</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-3|PAPER Tue-P-4-E-3 — Fully-Convolutional Network for Pitch Estimation of Speech Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fully-Convolutional Network for Pitch Estimation of Speech Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-4|PAPER Wed-S&T-5-4 — Synthesized Spoken Names: Biases Impacting Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synthesized Spoken Names: Biases Impacting Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192224.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-2|PAPER Mon-O-2-3-2 — Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-4|PAPER Thu-O-10-3-4 — Age-Related Changes in European Portuguese Vowel Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Changes in European Portuguese Vowel Acoustics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191839.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-8|PAPER Mon-P-1-D-8 — EpaDB: A Database for Development of Pronunciation Assessment Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">EpaDB: A Database for Development of Pronunciation Assessment Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-6|PAPER Thu-P-10-A-6 — Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-11|PAPER Thu-P-10-A-11 — Optimizing a Speaker Embedding Extractor Through Backend-Driven Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing a Speaker Embedding Extractor Through Backend-Driven Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-3|PAPER Tue-P-4-B-3 — Using Alexa for Flashcard-Based Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Alexa for Flashcard-Based Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-5|PAPER Tue-P-5-E-5 — On the Use/Misuse of the Term ‘Phoneme’]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Use/Misuse of the Term ‘Phoneme’</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-10|PAPER Thu-P-10-D-10 — Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Progressive Speech Enhancement with Residual Connections</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192224.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-2|PAPER Mon-O-2-3-2 — Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192813.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-1|PAPER Mon-P-1-A-1 — Bayesian HMM Based x-Vector Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian HMM Based x-Vector Clustering for Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-1|PAPER Wed-O-8-5-1 — Self-Supervised Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-7|PAPER Thu-P-10-A-7 — Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192549.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-5|PAPER Tue-O-3-5-5 — Unsupervised Training of Neural Mask-Based Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Training of Neural Mask-Based Beamforming</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191407.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-10|PAPER Mon-P-1-E-10 — An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-5|PAPER Mon-O-2-1-5 — A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-13|PAPER Tue-P-5-C-13 — ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191332.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-1|PAPER Wed-P-8-C-1 — Reverse Transfer Learning: Can Word Embeddings Trained for Different NLP Tasks Improve Neural Language Models?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reverse Transfer Learning: Can Word Embeddings Trained for Different NLP Tasks Improve Neural Language Models?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192741.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-4|PAPER Tue-P-5-D-4 — Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192685.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-5|PAPER Tue-P-5-D-5 — Quantifying Expectation Modulation in Human Speech Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Expectation Modulation in Human Speech Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192929.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-4|PAPER Thu-P-9-E-4 — Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191625.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-6|PAPER Thu-P-9-E-6 — Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-2|PAPER Wed-P-8-C-2 — Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-6|PAPER Thu-P-10-A-6 — Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-1|PAPER Wed-P-7-D-1 — Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-11|PAPER Wed-P-6-C-11 — Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192726.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-2|PAPER Thu-P-9-B-2 — Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-9|PAPER Thu-P-9-B-9 — Video-Driven Speech Reconstruction Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Video-Driven Speech Reconstruction Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-4|PAPER Thu-P-10-A-4 — Variational Domain Adversarial Learning for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Domain Adversarial Learning for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191775.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-4|PAPER Mon-O-2-3-4 — Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191262.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-13|PAPER Mon-P-2-C-13 — A Comparison of Deep Learning Methods for Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Deep Learning Methods for Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-K-3|PAPER Wed-K-3 — Physiology and Physics of Voice Production]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Physiology and Physics of Voice Production</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-7|PAPER Mon-P-2-D-7 — Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-14|PAPER Tue-SS-4-4-14 — Deep Residual Neural Networks for Audio Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Residual Neural Networks for Audio Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191421.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-2|PAPER Wed-O-6-2-2 — Locality-Constrained Linear Coding Based Fused Visual Features for Robust Acoustic Event Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Locality-Constrained Linear Coding Based Fused Visual Features for Robust Acoustic Event Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192528.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-4|PAPER Tue-O-3-4-4 — The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192432.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-7|PAPER Thu-P-10-D-7 — Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-2|PAPER Mon-P-2-E-2 — ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-9|PAPER Mon-P-2-E-9 — An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-4|PAPER Wed-P-6-E-4 — A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192612.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191804.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-4|PAPER Thu-P-9-B-4 — Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191513.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-6|PAPER Wed-O-7-4-6 — Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191949.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-2|PAPER Thu-O-10-1-2 — Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192415.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-2|PAPER Thu-SS-9-6-2 — Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-6|PAPER Wed-O-7-2-6 — Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-4|PAPER Tue-SS-4-4-4 — Robust Bayesian and Light Neural Networks for Voice Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Bayesian and Light Neural Networks for Voice Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-3|PAPER Wed-O-6-1-3 — Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191592.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-6|PAPER Thu-P-9-C-6 — Voice Quality as a Turn-Taking Cue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality as a Turn-Taking Cue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-4|PAPER Tue-O-3-1-4 — Adapting Transformer to End-to-End Spoken Language Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adapting Transformer to End-to-End Spoken Language Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-1|PAPER Mon-P-1-B-1 — Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191385.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-4|PAPER Tue-SS-3-6-4 — UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-7|PAPER Tue-P-3-D-7 — Prosodic Effects on Plosive Duration in German and Austrian German]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Effects on Plosive Duration in German and Austrian German</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-12|PAPER Tue-P-3-D-12 — A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-2|PAPER Wed-P-6-D-2 — Disfluencies and Human Speech Transcription Errors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Human Speech Transcription Errors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-1|PAPER Thu-P-9-D-1 — On the Role of Style in Parsing Speech with Neural Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Style in Parsing Speech with Neural Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192838.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-8|PAPER Wed-P-8-D-8 — Liquid Deletion in French Child-Directed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Liquid Deletion in French Child-Directed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Path Signature Approach for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-7|PAPER Mon-P-2-D-7 — Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191385.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-4|PAPER Tue-SS-3-6-4 — UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191693.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-11|PAPER Wed-SS-6-4-11 — Spatial, Temporal and Spectral Multiresolution Analysis for the INTERSPEECH 2019 ComParE Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial, Temporal and Spectral Multiresolution Analysis for the INTERSPEECH 2019 ComParE Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-2|PAPER Thu-O-10-4-2 — Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192528.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-4|PAPER Tue-O-3-4-4 — The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191788.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-8|PAPER Mon-P-2-B-8 — Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-5|PAPER Tue-O-3-4-5 — The Neural Correlates Underlying Lexically-Guided Perceptual Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Neural Correlates Underlying Lexically-Guided Perceptual Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191487.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-5|PAPER Wed-O-7-2-5 — Multimodal Word Discovery and Retrieval with Phone Sequence and Image Concepts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Word Discovery and Retrieval with Phone Sequence and Image Concepts</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-4|PAPER Wed-O-7-1-4 — Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193186.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-1|PAPER Tue-P-4-B-1 — A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-7|PAPER Wed-P-8-B-7 — Automatic Detection of Prosodic Focus in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Prosodic Focus in American English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192335.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-7|PAPER Tue-P-5-A-7 — Unified Language-Independent DNN-Based G2P Converter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Language-Independent DNN-Based G2P Converter</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192162.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-5|PAPER Mon-P-2-B-5 — Cumulative Adaptation for BLSTM Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cumulative Adaptation for BLSTM Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192702.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-2|PAPER Mon-O-1-1-2 — Very Deep Self-Attention Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Very Deep Self-Attention Networks for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198013.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-1|PAPER Wed-S&T-5-1 — Web-Based Speech Synthesis Editor]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Web-Based Speech Synthesis Editor</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198009.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-2|PAPER Thu-S&T-6-2 — Framework for Conducting Tasks Requiring Human Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framework for Conducting Tasks Requiring Human Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191846.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-6|PAPER Wed-P-7-E-6 — Open-Vocabulary Keyword Spotting with Audio and Text Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Vocabulary Keyword Spotting with Audio and Text Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-8|PAPER Tue-P-3-D-8 — Cross-Lingual Consistency of Phonological Features: An Empirical Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual Consistency of Phonological Features: An Empirical Study</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192740.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-12|PAPER Tue-P-5-C-12 — Sampling from Stochastic Finite Automata with Applications to CTC Decoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sampling from Stochastic Finite Automata with Applications to CTC Decoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-5|PAPER Tue-P-3-D-5 — “ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192890.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-3|PAPER Wed-P-7-D-3 — V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-5|PAPER Wed-P-7-D-5 — Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192984.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-2|PAPER Tue-P-4-D-2 — Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-3|PAPER Wed-O-6-1-3 — Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192743.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Explaining Sentiment Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Explaining Sentiment Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192115.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-3|PAPER Mon-O-2-4-3 — Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191597.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-9|PAPER Mon-P-1-B-9 — Knowledge Distillation for Throat Microphone Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for Throat Microphone Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191597.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-9|PAPER Mon-P-1-B-9 — Knowledge Distillation for Throat Microphone Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for Throat Microphone Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191220.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-5|PAPER Mon-O-1-2-5 — Variational Bayesian Multi-Channel Speech Dereverberation Under Noisy Environments with Probabilistic Convolutive Transfer Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Bayesian Multi-Channel Speech Dereverberation Under Noisy Environments with Probabilistic Convolutive Transfer Function</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191289.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-4|PAPER Wed-O-7-4-4 — Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191855.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-1|PAPER Wed-O-6-2-1 — Audio Classification of Bit-Representation Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Classification of Bit-Representation Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-14|PAPER Mon-P-1-B-14 — One-Pass Single-Channel Noisy Speech Recognition Using a Combination of Noisy and Enhanced Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Pass Single-Channel Noisy Speech Recognition Using a Combination of Noisy and Enhanced Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192229.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-4|PAPER Wed-P-7-C-4 — The Contribution of Acoustic Features Analysis to Model Emotion Perceptual Process for Language Diversity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Contribution of Acoustic Features Analysis to Model Emotion Perceptual Process for Language Diversity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-9|PAPER Tue-P-4-E-9 — Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191930.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-11|PAPER Tue-P-3-B-11 — Direct Neuron-Wise Fusion of Cognate Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Neuron-Wise Fusion of Cognate Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-5|PAPER Tue-O-4-3-5 — Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192132.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-5|PAPER Wed-P-6-B-5 — Leveraging a Character, Word and Prosody Triplet for an ASR Error Robust and Agglutination Friendly Punctuation Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging a Character, Word and Prosody Triplet for an ASR Error Robust and Agglutination Friendly Punctuation Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-5|PAPER Thu-O-9-5-5 — Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-4|PAPER Tue-SS-4-4-4 — Robust Bayesian and Light Neural Networks for Voice Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Bayesian and Light Neural Networks for Voice Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-6|PAPER Tue-P-5-E-6 — Understanding and Visualizing Raw Waveform-Based CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding and Visualizing Raw Waveform-Based CNNs</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-2|PAPER Wed-SS-6-4-2 — Using Speech Production Knowledge for Raw Waveform Modelling Based Styrian Dialect Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech Production Knowledge for Raw Waveform Modelling Based Styrian Dialect Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-1|PAPER Tue-O-4-3-1 — Fusion Strategy for Prosodic and Lexical Representations of Word Importance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Strategy for Prosodic and Lexical Representations of Word Importance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-4|PAPER Tue-O-3-1-4 — Adapting Transformer to End-to-End Spoken Language Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adapting Transformer to End-to-End Spoken Language Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191735.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-6|PAPER Mon-P-1-E-6 — Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191281.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-11|PAPER Wed-P-6-D-11 — R²SPIN: Re-Recording the Revised Speech Perception in Noise Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R²SPIN: Re-Recording the Revised Speech Perception in Noise Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192283.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-5|PAPER Tue-O-5-3-5 — Detecting Depression with Word-Level Multimodal Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Depression with Word-Level Multimodal Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192219.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-7|PAPER Tue-P-5-E-7 — Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-4|PAPER Wed-P-8-B-4 — Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-4|PAPER Tue-O-3-1-4 — Adapting Transformer to End-to-End Spoken Language Translation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adapting Transformer to End-to-End Spoken Language Translation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191592.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-6|PAPER Thu-P-9-C-6 — Voice Quality as a Turn-Taking Cue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality as a Turn-Taking Cue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191994.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-5|PAPER Thu-P-10-A-5 — A Unified Framework for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192219.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-7|PAPER Tue-P-5-E-7 — Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-2|PAPER Wed-O-8-3-2 — Continuous Emotion Recognition in Speech — Do We Need Recurrence?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continuous Emotion Recognition in Speech — Do We Need Recurrence?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191402.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-10|PAPER Wed-P-6-D-10 — Talker Intelligibility and Listening Effort with Temporally Modified Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Talker Intelligibility and Listening Effort with Temporally Modified Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193062.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-3|PAPER Tue-P-4-C-3 — Analyzing Verbal and Nonverbal Features for Predicting Group Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Verbal and Nonverbal Features for Predicting Group Performance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-12|PAPER Tue-P-3-C-12 — Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-4|PAPER Tue-P-5-E-4 — DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-2|PAPER Mon-P-1-B-2 — Label Driven Time-Frequency Masking for Robust Continuous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Label Driven Time-Frequency Masking for Robust Continuous Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192090.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-5|PAPER Mon-P-1-B-5 — Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191786.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-10|PAPER Tue-P-4-C-10 — Do Conversational Partners Entrain on Articulatory Precision?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Conversational Partners Entrain on Articulatory Precision?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-3|PAPER Wed-P-7-C-3 — Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-3|PAPER Wed-O-8-2-3 — Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-8|PAPER Thu-P-9-E-8 — A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-11|PAPER Wed-P-8-C-11 — Better Morphology Prediction for Better Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Better Morphology Prediction for Better Speech Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-4|PAPER Tue-O-5-3-4 — Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-9|PAPER Tue-P-4-C-9 — Identifying Mood Episodes Using Dialogue Features from Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Mood Episodes Using Dialogue Features from Clinical Interviews</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-8|PAPER Wed-P-7-C-8 — Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-10|PAPER Tue-P-3-D-10 — Neural Network-Based Modeling of Phonetic Durations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network-Based Modeling of Phonetic Durations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193276.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-1|PAPER Thu-O-9-5-1 — Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191701.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-4|PAPER Thu-O-10-4-4 — CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193096.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-7|PAPER Wed-SS-8-6-7 — Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192190.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-5|PAPER Tue-P-3-A-5 — Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-2|PAPER Wed-P-8-A-2 — Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193140.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-3|PAPER Thu-O-10-2-3 — Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-8|PAPER Tue-SS-4-4-8 — Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191873.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-6|PAPER Wed-P-8-B-6 — wav2vec: Unsupervised Pre-Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">wav2vec: Unsupervised Pre-Training for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191861.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-1|PAPER Mon-P-2-E-1 — Salient Speech Representations Based on Cloned Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Salient Speech Representations Based on Cloned Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-2|PAPER Wed-P-8-C-2 — Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192293.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-6|PAPER Wed-O-8-3-6 — CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192641.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-6|PAPER Mon-O-2-2-6 — Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-3|PAPER Tue-P-3-B-3 — Acoustic Model Optimization Based on Evolutionary Stochastic Gradient Descent with Anchors for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Optimization Based on Evolutionary Stochastic Gradient Descent with Anchors for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-9|PAPER Tue-P-4-C-9 — Identifying Mood Episodes Using Dialogue Features from Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Mood Episodes Using Dialogue Features from Clinical Interviews</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-2|PAPER Wed-O-6-5-2 — Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192793.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-2|PAPER Wed-P-6-B-2 — Detection and Recovery of OOVs for Improved English Broadcast News Captioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection and Recovery of OOVs for Improved English Broadcast News Captioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192995.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-2|PAPER Wed-P-7-D-2 — Articulation of Vowel Length Contrasts in Australian English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulation of Vowel Length Contrasts in Australian English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193096.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-7|PAPER Wed-SS-8-6-7 — Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192389.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-8|PAPER Thu-P-10-D-8 — Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191326.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-10|PAPER Tue-P-4-D-10 — F0 Variability Measures Based on Glottal Closure Instants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Variability Measures Based on Glottal Closure Instants</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Althingi ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-4|PAPER Tue-SS-4-4-4 — Robust Bayesian and Light Neural Networks for Voice Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Bayesian and Light Neural Networks for Voice Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-11|PAPER Tue-P-5-C-11 — Lattice Generation in Attention-Based Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Generation in Attention-Based Speech Recognition Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192720.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-3|PAPER Thu-P-10-B-3 — Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191557.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-8|PAPER Mon-P-1-C-8 — Towards an Annotation Scheme for Complex Laughter in Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards an Annotation Scheme for Complex Laughter in Speech Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192115.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-3|PAPER Mon-O-2-4-3 — Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-7|PAPER Mon-P-2-D-7 — Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191368.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — Expressiveness Influences Human Vocal Alignment Toward voice-AI]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expressiveness Influences Human Vocal Alignment Toward voice-AI</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-1|PAPER Mon-O-1-3-1 — Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-1|PAPER Tue-P-5-D-1 — The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191433.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-13|PAPER Tue-P-5-D-13 — Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-14|PAPER Wed-P-6-A-14 — On Robustness of Unsupervised Domain Adaptation for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Robustness of Unsupervised Domain Adaptation for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191827.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-8|PAPER Thu-P-10-E-8 — Probabilistic Permutation Invariant Training for Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Probabilistic Permutation Invariant Training for Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191344.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-6|PAPER Wed-O-7-5-6 — Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-7|PAPER Tue-P-4-B-7 — Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-10|PAPER Thu-P-9-D-10 — Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193099.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-3|PAPER Mon-O-1-5-3 — Expediting TTS Synthesis with Adversarial Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expediting TTS Synthesis with Adversarial Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-5|PAPER Tue-P-5-B-5 — End-to-End Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Accented Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191846.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-6|PAPER Wed-P-7-E-6 — Open-Vocabulary Keyword Spotting with Audio and Text Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Vocabulary Keyword Spotting with Audio and Text Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-9|PAPER Tue-P-4-C-9 — Identifying Mood Episodes Using Dialogue Features from Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Mood Episodes Using Dialogue Features from Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191117.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-15|PAPER Tue-P-3-B-15 — Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192586.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-4|PAPER Wed-P-6-B-4 — Hybrid Arbitration Using Raw ASR String and NLU Information — Taking the Best of Both Embedded World and Cloud World]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hybrid Arbitration Using Raw ASR String and NLU Information — Taking the Best of Both Embedded World and Cloud World</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-1|PAPER Wed-P-8-A-1 — Parameter Enhancement for MELP Speech Codec in Noisy Communication Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parameter Enhancement for MELP Speech Codec in Noisy Communication Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-11|PAPER Tue-P-5-B-11 — Towards Language-Universal Mandarin-English Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Language-Universal Mandarin-English Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191290.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-2|PAPER Tue-P-5-C-2 — Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191302.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-9|PAPER Wed-P-7-E-9 — Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191388.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-5|PAPER Mon-P-1-A-5 — LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-3|PAPER Tue-SS-4-4-3 — The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-1|PAPER Tue-O-4-1-1 — Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191235.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-12|PAPER Tue-P-5-A-12 — Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-12|PAPER Wed-SS-6-4-12 — The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191435.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-12|PAPER Wed-SS-7-A-12 — The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191542.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-8|PAPER Thu-P-9-A-8 — Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-14|PAPER Thu-P-10-A-14 — Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191436.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-15|PAPER Thu-P-10-A-15 — The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191766.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-5|PAPER Tue-P-5-C-5 — Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191300.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-10|PAPER Thu-P-9-C-10 — Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-4|PAPER Tue-O-4-2-4 — Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-4|PAPER Tue-P-4-E-4 — Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191617.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-12|PAPER Thu-P-10-D-12 — Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191886.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-8|PAPER Tue-P-4-C-8 — Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-10|PAPER Tue-P-4-B-10 — Self-Imitating Feedback Generation Using GAN for Computer-Assisted Pronunciation Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Imitating Feedback Generation Using GAN for Computer-Assisted Pronunciation Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-2|PAPER Wed-P-8-A-2 — Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-2|PAPER Mon-P-2-B-2 — Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191444.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-11|PAPER Thu-P-9-A-11 — Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192616.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-2|PAPER Thu-P-10-A-2 — Self Multi-Head Attention for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self Multi-Head Attention for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-4|PAPER Mon-O-1-4-4 — Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192396.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-6|PAPER Mon-P-2-C-6 — Speech Model Pre-Training for End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Model Pre-Training for End-to-End Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192380.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-3|PAPER Tue-O-3-2-3 — Learning Speaker Representations with Mutual Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Representations with Mutual Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192813.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-1|PAPER Mon-P-1-A-1 — Bayesian HMM Based x-Vector Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian HMM Based x-Vector Clustering for Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191621.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-6|PAPER Thu-P-9-D-6 — Prosodic Phrase Alignment for Machine Dubbing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Phrase Alignment for Machine Dubbing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-11|PAPER Wed-P-7-E-11 — A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Thu-K-4|PAPER Thu-K-4 — Learning Natural Language Interfaces with Neural Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Natural Language Interfaces with Neural Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192446.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-5|PAPER Wed-O-7-1-5 — Foreign-Language Knowledge Enhances Artificial-Language Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Foreign-Language Knowledge Enhances Artificial-Language Segmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-9|PAPER Wed-P-6-D-9 — Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192729.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-2|PAPER Tue-O-3-4-2 — ERP Signal Analysis with Temporal Resolution Using a Time Window Bank]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ERP Signal Analysis with Temporal Resolution Using a Time Window Bank</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-2|PAPER Tue-P-4-B-2 — Language Learning Using Speech to Image Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Learning Using Speech to Image Retrieval</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192741.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-4|PAPER Tue-P-5-D-4 — Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192685.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-5|PAPER Tue-P-5-D-5 — Quantifying Expectation Modulation in Human Speech Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Expectation Modulation in Human Speech Processing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-9|PAPER Wed-P-6-D-9 — Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-5|PAPER Wed-P-6-A-5 — Adversarial Optimization for Dictionary Attacks on Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Optimization for Dictionary Attacks on Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191541.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-8|PAPER Tue-P-5-E-8 — ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-6|PAPER Thu-P-10-A-6 — Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-11|PAPER Thu-P-10-A-11 — Optimizing a Speaker Embedding Extractor Through Backend-Driven Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing a Speaker Embedding Extractor Through Backend-Driven Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191736.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-10|PAPER Mon-P-1-D-10 — Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-10|PAPER Thu-P-9-D-10 — Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198030.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-5|PAPER Tue-S&T-2-5 — FarSpeech: Arabic Natural Language Processing for Live Arabic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FarSpeech: Arabic Natural Language Processing for Live Arabic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-2|PAPER Mon-P-1-E-2 — Automatic Detection of Breath Using Voice Activity Detection and SVM Classifier with Application on News Reports]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Breath Using Voice Activity Detection and SVM Classifier with Application on News Reports</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-4|PAPER Mon-P-2-C-4 — M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191539.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-9|PAPER Thu-P-10-B-9 — Real to H-Space Encoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real to H-Space Encoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-13|PAPER Tue-P-5-C-13 — ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-11|PAPER Wed-P-7-C-11 — Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191117.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-15|PAPER Tue-P-3-B-15 — Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-5|PAPER Tue-P-3-B-5 — Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-4|PAPER Tue-P-5-E-4 — DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198021.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-3|PAPER Wed-S&T-4-3 — PyToBI: A Toolkit for ToBI Labeling Under Python]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PyToBI: A Toolkit for ToBI Labeling Under Python</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192838.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-8|PAPER Wed-P-8-D-8 — Liquid Deletion in French Child-Directed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Liquid Deletion in French Child-Directed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193072.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-2|PAPER Mon-P-1-D-2 — Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191797.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-7|PAPER Wed-P-7-B-7 — Latent Dirichlet Allocation Based Acoustic Data Selection for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Dirichlet Allocation Based Acoustic Data Selection for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192283.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-5|PAPER Tue-O-5-3-5 — Detecting Depression with Word-Level Multimodal Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Depression with Word-Level Multimodal Fusion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192908.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-2|PAPER Thu-P-9-E-2 — Deep Multitask Acoustic Echo Cancellation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Multitask Acoustic Echo Cancellation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191318.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-9|PAPER Wed-P-7-B-9 — Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192507.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-7|PAPER Tue-P-4-D-7 — A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-14|PAPER Tue-SS-4-4-14 — Deep Residual Neural Networks for Audio Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Residual Neural Networks for Audio Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-7|PAPER Mon-P-2-C-7 — Spoken Language Intent Detection Using Confusion2Vec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Language Intent Detection Using Confusion2Vec</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-7|PAPER Thu-P-10-B-7 — Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-6|PAPER Mon-O-1-3-6 — SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192629.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-3|PAPER Wed-P-6-B-3 — Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192622.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-10|PAPER Tue-P-3-E-10 — A Non-Causal FFTNet Architecture for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Non-Causal FFTNet Architecture for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-5|PAPER Mon-P-2-C-5 — Ultra-Compact NLU: Neuronal Network Binarization as Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultra-Compact NLU: Neuronal Network Binarization as Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-3|PAPER Tue-SS-5-6-3 — Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-3|PAPER Wed-O-7-2-3 — An Empirical Evaluation of DTW Subsampling Methods for Keyword Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Empirical Evaluation of DTW Subsampling Methods for Keyword Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191972.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-3|PAPER Tue-O-4-2-3 — Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198030.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-5|PAPER Tue-S&T-2-5 — FarSpeech: Arabic Natural Language Processing for Live Arabic Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">FarSpeech: Arabic Natural Language Processing for Live Arabic Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-2|PAPER Tue-O-4-5-2 — Recursive Speech Separation for Unknown Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recursive Speech Separation for Unknown Number of Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-1|PAPER Mon-P-2-D-1 — Multi-Corpus Acoustic-to-Articulatory Speech Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Corpus Acoustic-to-Articulatory Speech Inversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-6|PAPER Tue-O-5-3-6 — Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-12|PAPER Wed-P-7-C-12 — Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-7|PAPER Mon-P-2-E-7 — On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-9|PAPER Tue-P-3-E-9 — Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192622.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-10|PAPER Tue-P-3-E-10 — A Non-Causal FFTNet Architecture for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Non-Causal FFTNet Architecture for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-3|PAPER Mon-P-1-A-3 — MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191794.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-1|PAPER Tue-SS-4-4-1 — ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-2|PAPER Wed-O-7-2-2 — Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-12|PAPER Wed-P-6-A-12 — Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192397.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-9|PAPER Thu-P-9-E-9 — End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191794.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-1|PAPER Tue-SS-4-4-1 — ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-12|PAPER Wed-P-6-A-12 — Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191902.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-7|PAPER Wed-P-6-D-7 — Effects of Urgent Speech and Congruent/Incongruent Text on Speech Intelligibility in Noise and Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Urgent Speech and Congruent/Incongruent Text on Speech Intelligibility in Noise and Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-1|PAPER Mon-O-1-2-1 — Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-3|PAPER Mon-O-2-3-3 — Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191855.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-1|PAPER Wed-O-6-2-1 — Audio Classification of Bit-Representation Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Classification of Bit-Representation Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-2|PAPER Tue-O-4-5-2 — Recursive Speech Separation for Unknown Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recursive Speech Separation for Unknown Number of Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191313.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-9|PAPER Thu-P-9-C-9 — Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192899.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-1|PAPER Thu-P-10-A-1 — End-to-End Neural Speaker Diarization with Permutation-Free Objectives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Speaker Diarization with Permutation-Free Objectives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-5|PAPER Wed-P-6-A-5 — Adversarial Optimization for Dictionary Attacks on Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Optimization for Dictionary Attacks on Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-8|PAPER Mon-P-2-C-8 — Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-2|PAPER Tue-P-5-D-2 — Individual Differences in Implicit Attention to Phonetic Detail in Speech Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Differences in Implicit Attention to Phonetic Detail in Speech Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-2|PAPER Thu-O-10-4-2 — Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-1|PAPER Tue-P-5-E-1 — Multiview Shared Subspace Learning Across Speakers and Speech Commands]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiview Shared Subspace Learning Across Speakers and Speech Commands</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192526.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-5|PAPER Thu-O-9-1-5 — A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-11|PAPER Wed-P-7-E-11 — A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192190.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-5|PAPER Tue-P-3-A-5 — Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191750.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-9|PAPER Mon-P-1-D-9 — Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192293.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-6|PAPER Wed-O-8-3-6 — CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191677.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-3|PAPER Wed-P-8-D-3 — Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-11|PAPER Thu-P-10-B-11 — End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192702.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-2|PAPER Mon-O-1-1-2 — Very Deep Self-Attention Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Very Deep Self-Attention Networks for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191336.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-9|PAPER Thu-P-9-D-9 — Noisy BiLSTM-Based Models for Disfluency Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noisy BiLSTM-Based Models for Disfluency Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192529.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-4|PAPER Tue-P-3-D-4 — The Voicing Contrast in Stops and Affricates in the Western Armenian of Lebanon]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Voicing Contrast in Stops and Affricates in the Western Armenian of Lebanon</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191846.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-6|PAPER Wed-P-7-E-6 — Open-Vocabulary Keyword Spotting with Audio and Text Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Open-Vocabulary Keyword Spotting with Audio and Text Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191786.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-10|PAPER Tue-P-4-C-10 — Do Conversational Partners Entrain on Articulatory Precision?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Conversational Partners Entrain on Articulatory Precision?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-1|PAPER Thu-S&T-6-1 — Elpis, an Accessible Speech-to-Text Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Elpis, an Accessible Speech-to-Text Tool</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-5|PAPER Mon-O-2-1-5 — A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-2|PAPER Wed-O-8-3-2 — Continuous Emotion Recognition in Speech — Do We Need Recurrence?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Continuous Emotion Recognition in Speech — Do We Need Recurrence?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-13|PAPER Tue-P-5-C-13 — ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-1|PAPER Thu-S&T-6-1 — Elpis, an Accessible Speech-to-Text Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Elpis, an Accessible Speech-to-Text Tool</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-5|PAPER Tue-P-3-D-5 — “ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-15|PAPER Thu-P-10-C-15 — Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192743.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Explaining Sentiment Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Explaining Sentiment Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-1|PAPER Wed-O-6-1-1 — Survey Talk: Prosody Research and Applications: The State of the Art]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Prosody Research and Applications: The State of the Art</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-4|PAPER Mon-O-1-1-4 — Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-4|PAPER Thu-O-9-3-4 — Vectorized Beam Search for CTC-Attention-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vectorized Beam Search for CTC-Attention-Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-6|PAPER Mon-P-2-E-6 — Unsupervised Low-Rank Representations for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Low-Rank Representations for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191504.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-8|PAPER Mon-P-1-E-8 — Phone Aware Nearest Neighbor Technique Using Spectral Transition Measure for Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone Aware Nearest Neighbor Technique Using Spectral Transition Measure for Non-Parallel Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192608.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-4|PAPER Tue-P-3-B-4 — Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-2|PAPER Tue-P-3-D-2 — The Monophthongs of Formal Nigerian English: An Acoustic Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Monophthongs of Formal Nigerian English: An Acoustic Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-5|PAPER Mon-O-2-5-5 — Towards Variability Resistant Dialectal Speech Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Variability Resistant Dialectal Speech Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192188.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-6|PAPER Tue-P-3-A-6 — Evaluating Intention Communication by TTS Using Explicit Definitions of Illocutionary Act Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Intention Communication by TTS Using Explicit Definitions of Illocutionary Act Performance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-6|PAPER Tue-P-4-B-6 — Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192236.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-5|PAPER Mon-P-2-A-5 — StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192188.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-6|PAPER Tue-P-3-A-6 — Evaluating Intention Communication by TTS Using Explicit Definitions of Illocutionary Act Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Intention Communication by TTS Using Explicit Definitions of Illocutionary Act Performance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191311.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-5|PAPER Tue-O-4-2-5 — Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-10|PAPER Thu-P-10-C-10 — Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-9|PAPER Tue-P-4-E-9 — Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192502.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-5|PAPER Tue-P-3-C-5 — Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191852.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-8|PAPER Wed-P-6-D-8 — Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191850.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-5|PAPER Thu-P-9-E-5 — Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192521.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-4|PAPER Thu-P-10-C-4 — Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-5|PAPER Tue-O-3-4-5 — The Neural Correlates Underlying Lexically-Guided Perceptual Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Neural Correlates Underlying Lexically-Guided Perceptual Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-1|PAPER Thu-O-10-1-1 — Survey Talk: Reaching Over the Gap: Cross- and Interdisciplinary Research on Human and Automatic Speech Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Reaching Over the Gap: Cross- and Interdisciplinary Research on Human and Automatic Speech Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-15|PAPER Mon-P-2-A-15 — Semi-Supervised Voice Conversion with Amortized Variational Inference]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Voice Conversion with Amortized Variational Inference</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-7|PAPER Thu-P-10-B-7 — Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-2|PAPER Wed-O-8-4-2 — Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191523.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-12|PAPER Wed-P-8-D-12 — A Computational Model of Early Language Acquisition from Audiovisual Experiences of Young Infants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Computational Model of Early Language Acquisition from Audiovisual Experiences of Young Infants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192876.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-6|PAPER Mon-P-2-D-6 — Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-6|PAPER Wed-O-8-4-6 — Nonparallel Emotional Speech Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nonparallel Emotional Speech Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-1|PAPER Wed-O-8-5-1 — Self-Supervised Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-7|PAPER Thu-P-10-A-7 — Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192502.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-5|PAPER Tue-P-3-C-5 — Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — God as Interlocutor — Real or Imaginary? Prosodic Markers of Dialogue Speech and Expected Efficacy in Spoken Prayer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">God as Interlocutor — Real or Imaginary? Prosodic Markers of Dialogue Speech and Expected Efficacy in Spoken Prayer</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-5|PAPER Mon-O-2-4-5 — PASCAL and DPA: A Pilot Study on Using Prosodic Competence Scores to Predict Communicative Skills for Team Working and Public Speaking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PASCAL and DPA: A Pilot Study on Using Prosodic Competence Scores to Predict Communicative Skills for Team Working and Public Speaking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-11|PAPER Mon-P-1-C-11 — Do not Hesitate! — Unless You Do it Shortly or Nasally: How the Phonetics of Filled Pauses Determine Their Subjective Frequency and Perceived Speaker Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do not Hesitate! — Unless You Do it Shortly or Nasally: How the Phonetics of Filled Pauses Determine Their Subjective Frequency and Perceived Speaker Performance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-12|PAPER Tue-P-3-D-12 — A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191945.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-9|PAPER Thu-P-10-C-9 — Improving Speech Synthesis with Discourse Relations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Synthesis with Discourse Relations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-2|PAPER Wed-S&T-5-2 —  GFM-Voc: A Real-Time Voice Quality Modification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> GFM-Voc: A Real-Time Voice Quality Modification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192756.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-9|PAPER Mon-P-1-A-9 — Speaker-Corrupted Embeddings for Online Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Corrupted Embeddings for Online Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-1|PAPER Thu-O-10-4-1 — Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-7|PAPER Thu-P-10-A-7 — Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-6|PAPER Tue-P-3-B-6 — Lattice-Based Lightly-Supervised Acoustic Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Based Lightly-Supervised Acoustic Model Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-7|PAPER Thu-P-10-A-7 — Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-1|PAPER Thu-O-9-1-1 — Survey Talk: Realistic Physics-Based Computational Voice Production]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Realistic Physics-Based Computational Voice Production</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192589.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-2|PAPER Wed-P-7-B-2 — Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-4|PAPER Wed-P-6-D-4 — Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-3|PAPER Tue-SS-5-6-3 — Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192589.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-2|PAPER Wed-P-7-B-2 — Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192454.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-1|PAPER Tue-O-5-2-1 — Multi-Span Acoustic Modelling Using Raw Waveform Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Span Acoustic Modelling Using Raw Waveform Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191968.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-9|PAPER Tue-P-3-C-9 — Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192454.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-1|PAPER Tue-O-5-2-1 — Multi-Span Acoustic Modelling Using Raw Waveform Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Span Acoustic Modelling Using Raw Waveform Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193099.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-3|PAPER Mon-O-1-5-3 — Expediting TTS Synthesis with Adversarial Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expediting TTS Synthesis with Adversarial Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-8|PAPER Mon-P-2-A-8 — GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-3|PAPER Wed-SS-8-6-3 — Mel-Frequency Cepstral Coefficients of Voice Source Waveforms for Classification of Phonation Types in Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mel-Frequency Cepstral Coefficients of Voice Source Waveforms for Classification of Phonation Types in Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191333.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-1|PAPER Wed-O-8-4-1 — Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-2|PAPER Wed-O-8-4-2 — Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-3|PAPER Tue-P-3-D-3 — Quantifying Fundamental Frequency Modulation as a Function of Language, Speaking Style and Speaker]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Fundamental Frequency Modulation as a Function of Language, Speaking Style and Speaker</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192462.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-3|PAPER Tue-SS-3-6-3 — ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-6|PAPER Thu-SS-9-6-6 — Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-4|PAPER Thu-P-10-B-4 — An Online Attention-Based Model for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Online Attention-Based Model for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-7|PAPER Mon-P-2-C-7 — Spoken Language Intent Detection Using Confusion2Vec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Language Intent Detection Using Confusion2Vec</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-1|PAPER Tue-P-5-E-1 — Multiview Shared Subspace Learning Across Speakers and Speech Commands]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiview Shared Subspace Learning Across Speakers and Speech Commands</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-11|PAPER Wed-P-6-C-11 — Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193179.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-5|PAPER Tue-O-5-5-5 — Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-3|PAPER Mon-O-1-4-3 — Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192791.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-4|PAPER Wed-P-6-C-4 — Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-7|PAPER Mon-S&T-1-7 — Speech-Based Web Navigation for Limited Mobility Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-Based Web Navigation for Limited Mobility Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-5|PAPER Thu-SS-9-6-5 — Extracting Mel-Frequency and Bark-Frequency Cepstral Coefficients from Encrypted Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extracting Mel-Frequency and Bark-Frequency Cepstral Coefficients from Encrypted Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192396.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-6|PAPER Mon-P-2-C-6 — Speech Model Pre-Training for End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Model Pre-Training for End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193146.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-1|PAPER Tue-O-3-2-1 — Deep Speaker Recognition: Modular or Monolithic?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Recognition: Modular or Monolithic?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198021.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-3|PAPER Wed-S&T-4-3 — PyToBI: A Toolkit for ToBI Labeling Under Python]]</div>|^<div class="cpauthorindexpersoncardpapertitle">PyToBI: A Toolkit for ToBI Labeling Under Python</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-5|PAPER Mon-O-1-5-5 — Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192307.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-4|PAPER Mon-P-2-A-4 — Non-Parallel Voice Conversion with Cyclic Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Cyclic Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191402.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-10|PAPER Wed-P-6-D-10 — Talker Intelligibility and Listening Effort with Temporally Modified Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Talker Intelligibility and Listening Effort with Temporally Modified Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193109.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-2|PAPER Mon-P-2-D-2 — Towards a Speaker Independent Speech-BCI Using Speaker Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Speaker Independent Speech-BCI Using Speaker Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193105.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-1|PAPER Tue-O-3-4-1 — Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192115.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-3|PAPER Mon-O-2-4-3 — Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-11|PAPER Thu-P-10-B-11 — End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192162.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-5|PAPER Mon-P-2-B-5 — Cumulative Adaptation for BLSTM Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cumulative Adaptation for BLSTM Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191691.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-3|PAPER Tue-O-5-4-3 — Multi-Lingual Dialogue Act Recognition with Deep Learning Methods]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Lingual Dialogue Act Recognition with Deep Learning Methods</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192918.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-4|PAPER Tue-P-4-D-4 — Prosodic Factors Influencing Vowel Reduction in Russian]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Factors Influencing Vowel Reduction in Russian</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192082.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-8|PAPER Tue-P-4-D-8 — Perceptual Evaluation of Early versus Late F0 Peaks in the Intonation Structure of Czech Question-Word Questions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Evaluation of Early versus Late F0 Peaks in the Intonation Structure of Czech Question-Word Questions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-5|PAPER Wed-P-6-A-5 — Adversarial Optimization for Dictionary Attacks on Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Optimization for Dictionary Attacks on Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191789.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-7|PAPER Thu-P-9-B-7 — Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-1|PAPER Mon-P-2-C-1 — Mitigating Noisy Inputs for Question Answering]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Noisy Inputs for Question Answering</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-4|PAPER Thu-O-10-3-4 — Age-Related Changes in European Portuguese Vowel Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Age-Related Changes in European Portuguese Vowel Acoustics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-5|PAPER Mon-O-2-2-5 — Large Margin Training for Attention Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Training for Attention Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191495.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-11|PAPER Mon-P-1-B-11 — Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191493.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-12|PAPER Mon-P-1-B-12 — Enhanced Spectral Features for Distortion-Independent Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enhanced Spectral Features for Distortion-Independent Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192228.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-6|PAPER Tue-O-4-3-6 — Latent Topic Attention for Domain Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Topic Attention for Domain Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192228.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-6|PAPER Tue-O-4-3-6 — Latent Topic Attention for Domain Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Topic Attention for Domain Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-4|PAPER Wed-O-7-5-4 — Maximum a posteriori Speech Enhancement Based on Double Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Maximum a posteriori Speech Enhancement Based on Double Spectrum</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192271.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-3|PAPER Wed-P-8-E-3 — Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192544.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-4|PAPER Mon-P-2-B-4 — Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192983.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-1|PAPER Thu-P-9-A-1 — Adversarial Regularization for End-to-End Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Regularization for End-to-End Robust Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-3|PAPER Mon-P-1-B-3 — Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193155.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-3|PAPER Mon-P-2-B-3 — Multi-Accent Adaptation Based on Gate Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Accent Adaptation Based on Gate Mechanism</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-3|PAPER Wed-O-6-5-3 — Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-8|PAPER Wed-P-7-B-8 — Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191484.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-3|PAPER Wed-P-8-C-3 — Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-6|PAPER Mon-O-2-5-6 — How to Annotate 100 Hours in 45 Minutes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How to Annotate 100 Hours in 45 Minutes</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191553.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-7|PAPER Thu-P-9-D-7 — Spot the Pleasant People! Navigating the Cocktail Party Buzz]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spot the Pleasant People! Navigating the Cocktail Party Buzz</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-5|PAPER Thu-O-9-3-5 — Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191327.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-2|PAPER Mon-O-1-4-2 — Harmonic-Aligned Frame Mask Based on Non-Stationary Gabor Transform with Application to Content-Dependent Speaker Comparison]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Harmonic-Aligned Frame Mask Based on Non-Stationary Gabor Transform with Application to Content-Dependent Speaker Comparison</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-1|PAPER Mon-O-2-2-1 — Untranscribed Web Audio for Low Resource Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Untranscribed Web Audio for Low Resource Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-5|PAPER Tue-O-5-2-5 — Trainable Dynamic Subsampling for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Trainable Dynamic Subsampling for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-6|PAPER Tue-P-3-B-6 — Lattice-Based Lightly-Supervised Acoustic Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Based Lightly-Supervised Acoustic Model Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191257.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-9|PAPER Wed-P-8-B-9 — On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-3|PAPER Thu-O-9-1-3 — Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191334.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-4|PAPER Thu-O-9-1-4 — Articulatory Copy Synthesis Based on a Genetic Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Copy Synthesis Based on a Genetic Algorithm</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-10|PAPER Thu-P-9-D-10 — Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192278.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-6|PAPER Wed-SS-6-4-6 — Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191407.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-10|PAPER Mon-P-1-E-10 — An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-1|PAPER Wed-O-8-5-1 — Self-Supervised Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193246.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-1|PAPER Mon-P-2-B-1 — Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-5|PAPER Tue-P-5-B-5 — End-to-End Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191752.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-9|PAPER Mon-P-2-B-9 — An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191733.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-7|PAPER Mon-P-1-C-7 — Laughter Dynamics in Dyadic Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Laughter Dynamics in Dyadic Conversations</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198015.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-2|PAPER Wed-S&T-4-2 — A User-Friendly and Adaptable Re-Implementation of an Acoustic Prominence Detection and Annotation Tool]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A User-Friendly and Adaptable Re-Implementation of an Acoustic Prominence Detection and Annotation Tool</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-2|PAPER Thu-O-10-4-2 — Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-3|PAPER Thu-O-10-4-3 — The Greennn Tree — Lengthening Position Influences Uncertainty Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Greennn Tree — Lengthening Position Influences Uncertainty Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191812.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-9|PAPER Mon-P-2-D-9 — Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192678.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-2|PAPER Tue-P-3-B-2 — Unbiased Semi-Supervised LF-MMI Training Using Dropout]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbiased Semi-Supervised LF-MMI Training Using Dropout</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-6|PAPER Wed-O-6-2-6 — Self-Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192338.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-2|PAPER Wed-SS-8-6-2 — Aerodynamics and Lumped-Masses Combined with Delay Lines for Modeling Vertical and Anterior-Posterior Phase Differences in Pathological Vocal Fold Vibration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Aerodynamics and Lumped-Masses Combined with Delay Lines for Modeling Vertical and Anterior-Posterior Phase Differences in Pathological Vocal Fold Vibration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-5|PAPER Wed-SS-8-6-5 — Analysis and Synthesis of Vocal Flutter and Vocal Jitter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis and Synthesis of Vocal Flutter and Vocal Jitter</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191405.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-12|PAPER Mon-P-1-C-12 — Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192080.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-10|PAPER Wed-P-6-C-10 — Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-2|PAPER Mon-O-2-4-2 — Sibilant Variation in New Englishes: A Comparative Sociophonetic Study of Trinidadian and American English /s(tr)/-Retraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sibilant Variation in New Englishes: A Comparative Sociophonetic Study of Trinidadian and American English /s(tr)/-Retraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-3|PAPER Thu-P-9-B-3 — “Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">“Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193201.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-5|PAPER Mon-SS-2-6-5 — Fusion Techniques for Utterance-Level Emotion Recognition Combining Speech and Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Techniques for Utterance-Level Emotion Recognition Combining Speech and Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-1|PAPER Tue-P-3-A-1 — Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-2|PAPER Tue-P-3-A-2 — Selection and Training Schemes for Improving TTS Voice Built on Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Selection and Training Schemes for Improving TTS Voice Built on Found Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-5|PAPER Wed-O-6-3-5 — A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-11|PAPER Wed-P-6-B-11 — CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-5|PAPER Mon-P-2-D-5 — Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-14|PAPER Wed-P-6-A-14 — On Robustness of Unsupervised Domain Adaptation for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Robustness of Unsupervised Domain Adaptation for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-3|PAPER Tue-O-4-5-3 — Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191824.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-8|PAPER Tue-P-3-A-8 — Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192726.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-2|PAPER Thu-P-9-B-2 — Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-9|PAPER Thu-P-9-B-9 — Video-Driven Speech Reconstruction Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Video-Driven Speech Reconstruction Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-11|PAPER Tue-P-5-C-11 — Lattice Generation in Attention-Based Speech Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice Generation in Attention-Based Speech Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192048.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-5|PAPER Tue-SS-5-6-5 — Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192616.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-2|PAPER Thu-P-10-A-2 — Self Multi-Head Attention for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self Multi-Head Attention for Speaker Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192716.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-2|PAPER Tue-SS-3-6-2 — LEAP Diarization System for the Second DIHARD Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LEAP Diarization System for the Second DIHARD Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NITK Kids’ Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191241.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-1|PAPER Tue-P-5-C-1 — Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-6|PAPER Mon-O-1-3-6 — SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-2|PAPER Mon-O-1-3-2 — An Investigation on Speaker Specific Articulatory Synthesis with Speaker Independent Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Investigation on Speaker Specific Articulatory Synthesis with Speaker Independent Articulatory Inversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-2|PAPER Mon-P-2-E-2 — ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192295.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-3|PAPER Mon-P-2-E-3 — Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-9|PAPER Mon-P-2-E-9 — An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-10|PAPER Mon-P-2-E-10 — Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-3|PAPER Mon-S&T-1-3 — SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192280.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-9|PAPER Thu-P-10-A-9 — Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-7|PAPER Mon-P-2-C-7 — Spoken Language Intent Detection Using Confusion2Vec]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spoken Language Intent Detection Using Confusion2Vec</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-5|PAPER Tue-P-3-B-5 — Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192482.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-3|PAPER Tue-O-4-3-3 — Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NITK Kids’ Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191959.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-7|PAPER Tue-P-5-B-7 — Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-8|PAPER Mon-P-1-B-8 — NIESR: Nuisance Invariant End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NIESR: Nuisance Invariant End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-2|PAPER Mon-P-1-C-2 — Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-5|PAPER Thu-O-10-3-5 — Vowel-Tone Interaction in Two Tibeto-Burman Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vowel-Tone Interaction in Two Tibeto-Burman Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192604.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-4|PAPER Thu-P-10-D-4 — Modification of Devoicing Error in Cleft Lip and Palate Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modification of Devoicing Error in Cleft Lip and Palate Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-9|PAPER Thu-P-10-D-9 — Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192742.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-2|PAPER Wed-P-6-A-2 — Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-4|PAPER Mon-P-1-C-4 — Deep Learning Based Mandarin Accent Identification for Accent Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Mandarin Accent Identification for Accent Robust ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192719.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-6|PAPER Thu-O-9-2-6 — Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192652.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-3|PAPER Wed-P-8-B-3 — Unsupervised Raw Waveform Representation Learning for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Raw Waveform Representation Learning for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192432.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-7|PAPER Thu-P-10-D-7 — Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191483.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-7|PAPER Wed-P-7-E-7 — ToneNet: A CNN Model of Tone Classification of Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ToneNet: A CNN Model of Tone Classification of Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-7|PAPER Mon-P-1-D-7 — Detecting Mismatch Between Speech and Transcription Using Cross-Modal Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Mismatch Between Speech and Transcription Using Cross-Modal Attention</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192182.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-5|PAPER Wed-P-7-B-5 — Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192460.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-2|PAPER Thu-O-9-2-2 — Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191924.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-1|PAPER Wed-O-7-5-1 — Speech Denoising with Deep Feature Losses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Denoising with Deep Feature Losses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-6|PAPER Tue-P-3-C-6 — Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192983.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-1|PAPER Thu-P-9-A-1 — Adversarial Regularization for End-to-End Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Regularization for End-to-End Robust Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192457.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-4|PAPER Wed-P-6-A-4 — Deep Hashing for Speaker Identification and Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hashing for Speaker Identification and Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191388.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-5|PAPER Mon-P-1-A-5 — LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-8|PAPER Tue-SS-4-4-8 — Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191704.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-5|PAPER Wed-O-8-5-5 — Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-4|PAPER Thu-O-10-5-4 — Code-Switching Detection Using ASR-Generated Language Posteriors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Detection Using ASR-Generated Language Posteriors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-3|PAPER Thu-O-9-4-3 — Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191715.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-7|PAPER Mon-P-1-E-7 — Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-5|PAPER Wed-O-6-1-5 — Phonological Awareness of French Rising Contours in Japanese Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonological Awareness of French Rising Contours in Japanese Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192251.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-10|PAPER Tue-P-5-D-10 — A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-4|PAPER Tue-SS-4-4-4 — Robust Bayesian and Light Neural Networks for Voice Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Bayesian and Light Neural Networks for Voice Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-9|PAPER Wed-P-6-B-9 — Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-8|PAPER Wed-P-8-B-8 — Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192420.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-3|PAPER Wed-P-7-B-3 — Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-1|PAPER Tue-O-5-4-1 — Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-2|PAPER Tue-O-5-4-2 — HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-2|PAPER Mon-P-2-C-2 — One-vs-All Models for Asynchronous Training: An Empirical Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-vs-All Models for Asynchronous Training: An Empirical Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192394.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-2|PAPER Wed-P-8-E-2 — Neural Network Distillation on IoT Platforms for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network Distillation on IoT Platforms for Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191148.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-3|PAPER Thu-SS-9-6-3 — Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191703.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-4|PAPER Thu-SS-9-6-4 — Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193252.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-1|PAPER Thu-O-10-2-1 — Direct Modelling of Speech Emotion from Raw Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Modelling of Speech Emotion from Raw Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-5|PAPER Wed-P-7-C-5 — Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193252.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-1|PAPER Thu-O-10-2-1 — Direct Modelling of Speech Emotion from Raw Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Modelling of Speech Emotion from Raw Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192742.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-2|PAPER Wed-P-6-A-2 — Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-1|PAPER Mon-O-1-1-1 — Survey Talk: Modeling in Automatic Speech Recognition: Beyond Hidden Markov Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: Modeling in Automatic Speech Recognition: Beyond Hidden Markov Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192162.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-5|PAPER Mon-P-2-B-5 — Cumulative Adaptation for BLSTM Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cumulative Adaptation for BLSTM Acoustic Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192879.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-2|PAPER Tue-O-5-2-2 — An Analysis of Local Monotonic Attention Variants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Analysis of Local Monotonic Attention Variants</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-7|PAPER Tue-P-3-B-7 — Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191728.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-6|PAPER Wed-O-6-5-6 — Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192225.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-3|PAPER Thu-O-10-1-3 — Language Modeling with Deep Transformers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Modeling with Deep Transformers</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191817.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-4|PAPER Thu-P-9-D-4 — Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-7|PAPER Wed-P-8-E-7 — A Robust Framework for Acoustic Scene Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Robust Framework for Acoustic Scene Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-1|PAPER Mon-O-2-1-1 — Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192540.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-4|PAPER Wed-SS-6-4-4 — Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193075.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-1|PAPER Tue-O-3-3-1 — Neural Transition Systems for Modeling Hierarchical Semantic Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transition Systems for Modeling Hierarchical Semantic Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-1|PAPER Mon-O-2-5-1 — VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191450.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-9|PAPER Mon-P-1-E-9 — Weakly Supervised Syllable Segmentation by Vowel-Consonant Peak Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Weakly Supervised Syllable Segmentation by Vowel-Consonant Peak Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192512.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-4|PAPER Wed-O-8-4-4 — A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-15|PAPER Thu-P-10-C-15 — Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-13|PAPER Tue-P-5-C-13 — ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198016.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-4|PAPER Mon-S&T-1-4 — Using Real-Time Visual Biofeedback for Second Language Instruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Real-Time Visual Biofeedback for Second Language Instruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191975.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-9|PAPER Tue-P-3-B-9 — Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-10|PAPER Wed-P-8-D-10 — Nasal Consonant Discrimination in Infant- and Adult-Directed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Consonant Discrimination in Infant- and Adult-Directed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-7|PAPER Tue-P-4-B-7 — Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192244.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-3|PAPER Mon-O-1-2-3 — Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192549.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-5|PAPER Tue-O-3-5-5 — Unsupervised Training of Neural Mask-Based Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Training of Neural Mask-Based Beamforming</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191703.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-4|PAPER Thu-SS-9-6-4 — Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-5|PAPER Wed-P-7-C-5 — Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191539.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-9|PAPER Thu-P-10-B-9 — Real to H-Space Encoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real to H-Space Encoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192295.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-3|PAPER Mon-P-2-E-3 — Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-4|PAPER Wed-S&T-5-4 — Synthesized Spoken Names: Biases Impacting Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synthesized Spoken Names: Biases Impacting Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191708.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-5|PAPER Wed-O-8-3-5 — Gender De-Biasing in Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gender De-Biasing in Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191852.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-8|PAPER Wed-P-6-D-8 — Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191198.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-14|PAPER Mon-P-2-A-14 — Group Latent Embedding for Vector Quantized Variational Autoencoder in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Group Latent Embedding for Vector Quantized Variational Autoencoder in Non-Parallel Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-3|PAPER Wed-O-8-4-3 — Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191708.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-5|PAPER Wed-O-8-3-5 — Gender De-Biasing in Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Gender De-Biasing in Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-13|PAPER Thu-P-10-C-13 — Dual Encoder Classifier Models as Constraints in Neural Text Normalization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dual Encoder Classifier Models as Constraints in Neural Text Normalization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-2|PAPER Wed-P-6-D-2 — Disfluencies and Human Speech Transcription Errors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Human Speech Transcription Errors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-3|PAPER Mon-P-1-C-3 — Mitigating Gender and L1 Differences to Improve State and Trait Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mitigating Gender and L1 Differences to Improve State and Trait Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193075.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-1|PAPER Tue-O-3-3-1 — Neural Transition Systems for Modeling Hierarchical Semantic Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transition Systems for Modeling Hierarchical Semantic Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-2|PAPER Tue-P-3-D-2 — The Monophthongs of Formal Nigerian English: An Acoustic Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Monophthongs of Formal Nigerian English: An Acoustic Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Althingi ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-3|PAPER Wed-SS-6-4-3 — Deep Neural Baselines for Computational Paralinguistics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Baselines for Computational Paralinguistics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192751.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-2|PAPER Mon-O-1-2-2 — On Nonlinear Spatial Filtering in Multichannel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Nonlinear Spatial Filtering in Multichannel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192763.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-2|PAPER Wed-O-7-1-2 — The Effects of Time Expansion on English as a Second Language Individuals]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Effects of Time Expansion on English as a Second Language Individuals</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-5|PAPER Thu-O-9-5-5 — Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191795.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-12|PAPER Tue-P-5-D-12 — The Different Roles of Expectations in Phonetic and Lexical Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Different Roles of Expectations in Phonetic and Lexical Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-12|PAPER Tue-P-3-C-12 — Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-3|PAPER Tue-P-4-B-3 — Using Alexa for Flashcard-Based Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Alexa for Flashcard-Based Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-5|PAPER Tue-P-5-E-5 — On the Use/Misuse of the Term ‘Phoneme’]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Use/Misuse of the Term ‘Phoneme’</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198028.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-5|PAPER Wed-S&T-4-5 — SLP-AA: Tools for Sign Language Phonetic and Phonological Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SLP-AA: Tools for Sign Language Phonetic and Phonological Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191887.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-10|PAPER Tue-SS-4-4-10 — Long Range Acoustic Features for Spoofed Speech Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Long Range Acoustic Features for Spoofed Speech Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191928.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-5|PAPER Tue-P-4-E-5 — Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191925.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-6|PAPER Tue-P-4-E-6 — On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191894.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-9|PAPER Wed-SS-6-4-9 — Instantaneous Phase and Long-Term Acoustic Cues for Orca Activity Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Instantaneous Phase and Long-Term Acoustic Cues for Orca Activity Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-4|PAPER Wed-S&T-3-4 — SpeechMarker: A Voice Based Multi-Level Attendance Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMarker: A Voice Based Multi-Level Attendance Application</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191994.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-5|PAPER Thu-P-10-A-5 — A Unified Framework for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-8|PAPER Tue-P-5-B-8 — Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-4|PAPER Wed-S&T-3-4 — SpeechMarker: A Voice Based Multi-Level Attendance Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMarker: A Voice Based Multi-Level Attendance Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191241.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-1|PAPER Tue-P-5-C-1 — Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-2|PAPER Wed-P-7-E-2 — A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191705.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-1|PAPER Mon-O-1-5-1 — High Quality, Lightweight and Adaptable TTS Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality, Lightweight and Adaptable TTS Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191789.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-7|PAPER Thu-P-9-B-7 — Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191873.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-6|PAPER Wed-P-8-B-6 — wav2vec: Unsupervised Pre-Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">wav2vec: Unsupervised Pre-Training for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192460.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-2|PAPER Thu-O-9-2-2 — Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193107.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-5|PAPER Thu-O-10-1-5 — Who Needs Words? Lexicon-Free Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Who Needs Words? Lexicon-Free Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192078.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-2|PAPER Tue-O-3-5-2 — Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-8|PAPER Tue-SS-4-4-8 — Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193074.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-5|PAPER Wed-O-6-2-5 — Supervised Classifiers for Audio Impairments with Noisy Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Supervised Classifiers for Audio Impairments with Noisy Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192347.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-8|PAPER Wed-P-8-C-8 — Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-4|PAPER Thu-P-10-B-4 — An Online Attention-Based Model for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Online Attention-Based Model for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191388.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-5|PAPER Mon-P-1-A-5 — LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191776.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-8|PAPER Tue-P-4-E-8 — Optimizing Voice Activity Detection for Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Voice Activity Detection for Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-15|PAPER Tue-P-5-C-15 — Performance Monitoring for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Performance Monitoring for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191343.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-8|PAPER Wed-P-6-B-8 — Exploring Methods for the Automatic Detection of Errors in Manual Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Methods for the Automatic Detection of Errors in Manual Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-3|PAPER Wed-P-7-C-3 — Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mirroring to Build Trust in Digital Assistants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192425.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-6|PAPER Wed-P-6-E-6 — Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192263.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-8|PAPER Tue-P-5-C-8 — A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-4|PAPER Wed-O-8-3-4 — Speech Emotion Recognition Based on Multi-Label Emotion Existence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition Based on Multi-Label Emotion Existence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191510.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-10|PAPER Thu-P-9-A-10 — Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-9|PAPER Tue-P-4-E-9 — Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-9|PAPER Mon-P-2-A-9 — Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-2|PAPER Tue-P-3-A-2 — Selection and Training Schemes for Improving TTS Voice Built on Found Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Selection and Training Schemes for Improving TTS Voice Built on Found Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-9|PAPER Thu-P-10-D-9 — Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192151.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-11|PAPER Thu-P-10-D-11 — Hypernasality Severity Detection Using Constant Q Cepstral Coefficients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hypernasality Severity Detection Using Constant Q Cepstral Coefficients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-2|PAPER Wed-SS-6-4-2 — Using Speech Production Knowledge for Raw Waveform Modelling Based Styrian Dialect Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech Production Knowledge for Raw Waveform Modelling Based Styrian Dialect Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191891.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-11|PAPER Wed-P-6-A-11 — A Study of x-Vector Based Speaker Recognition on Short Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of x-Vector Based Speaker Recognition on Short Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191891.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-11|PAPER Wed-P-6-A-11 — A Study of x-Vector Based Speaker Recognition on Short Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of x-Vector Based Speaker Recognition on Short Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193152.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-1|PAPER Thu-P-9-C-1 — Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-4|PAPER Wed-S&T-3-4 — SpeechMarker: A Voice Based Multi-Level Attendance Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMarker: A Voice Based Multi-Level Attendance Application</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192604.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-4|PAPER Thu-P-10-D-4 — Modification of Devoicing Error in Cleft Lip and Palate Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modification of Devoicing Error in Cleft Lip and Palate Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-9|PAPER Thu-P-10-D-9 — Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192151.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-11|PAPER Thu-P-10-D-11 — Hypernasality Severity Detection Using Constant Q Cepstral Coefficients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hypernasality Severity Detection Using Constant Q Cepstral Coefficients</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191715.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-7|PAPER Mon-P-1-E-7 — Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192526.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-5|PAPER Thu-O-9-1-5 — A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-1|PAPER Thu-O-10-4-1 — Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191677.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-3|PAPER Wed-P-8-D-3 — Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-4|PAPER Mon-O-1-2-4 — Exploiting Multi-Channel Speech Presence Probability in Parametric Multi-Channel Wiener Filter]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Multi-Channel Speech Presence Probability in Parametric Multi-Channel Wiener Filter</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192629.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-3|PAPER Wed-P-6-B-3 — Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192278.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-6|PAPER Wed-SS-6-4-6 — Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-3|PAPER Thu-O-10-5-3 — Variational Attention Using Articulatory Priors for Generating Code Mixed Speech Using Monolingual Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Attention Using Articulatory Priors for Generating Code Mixed Speech Using Monolingual Corpora</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-6|PAPER Tue-O-3-4-6 — Speech Quality Evaluation of Synthesized Japanese Speech Using EEG]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Quality Evaluation of Synthesized Japanese Speech Using EEG</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-6|PAPER Thu-O-9-3-6 — Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-5|PAPER Mon-O-2-5-5 — Towards Variability Resistant Dialectal Speech Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Variability Resistant Dialectal Speech Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192650.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-13|PAPER Mon-P-2-D-13 — Strength and Structure: Coupling Tones with Oral Constriction Gestures]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Strength and Structure: Coupling Tones with Oral Constriction Gestures</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-10|PAPER Thu-P-9-D-10 — Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191343.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-8|PAPER Wed-P-6-B-8 — Exploring Methods for the Automatic Detection of Errors in Manual Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Methods for the Automatic Detection of Errors in Manual Transcription</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192723.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-1|PAPER Wed-P-8-B-1 — Modulation Vectors as Robust Feature Representation for ASR in Domain Mismatched Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modulation Vectors as Robust Feature Representation for ASR in Domain Mismatched Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-6|PAPER Thu-O-10-5-6 — Multi-Graph Decoding for Code-Switching ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Graph Decoding for Code-Switching ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191788.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-8|PAPER Mon-P-2-B-8 — Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192793.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-2|PAPER Wed-P-6-B-2 — Detection and Recovery of OOVs for Improved English Broadcast News Captioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection and Recovery of OOVs for Improved English Broadcast News Captioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-11|PAPER Wed-P-6-C-11 — Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-12|PAPER Mon-P-1-A-12 — Normal Variance-Mean Mixtures for Unsupervised Score Calibration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Normal Variance-Mean Mixtures for Unsupervised Score Calibration</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191775.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-4|PAPER Mon-O-2-3-4 — Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191781.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-10|PAPER Tue-P-5-A-10 — Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-7|PAPER Thu-P-10-B-7 — Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192507.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-7|PAPER Tue-P-4-D-7 — A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192980.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192961.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-7|PAPER Mon-P-1-A-7 — Multi-PLDA Diarization on Children’s Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-PLDA Diarization on Children’s Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-1|PAPER Tue-P-5-D-1 — The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-4|PAPER Mon-O-1-4-4 — Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-3|PAPER Tue-P-3-E-3 — Towards Generalized Speech Enhancement with Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Generalized Speech Enhancement with Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-4|PAPER Wed-O-6-3-4 — Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192097.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-6|PAPER Wed-O-6-3-6 — Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193252.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-1|PAPER Thu-O-10-2-1 — Direct Modelling of Speech Emotion from Raw Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Modelling of Speech Emotion from Raw Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-4|PAPER Wed-P-8-D-4 — Using Prosody to Discover Word Order Alternations in a Novel Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Prosody to Discover Word Order Alternations in a Novel Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192967.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-1|PAPER Mon-P-1-E-1 — Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-5|PAPER Tue-P-4-C-5 — Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-4|PAPER Wed-S&T-3-4 — SpeechMarker: A Voice Based Multi-Level Attendance Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechMarker: A Voice Based Multi-Level Attendance Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-7|PAPER Tue-P-4-B-7 — Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192629.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-3|PAPER Wed-P-6-B-3 — Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-6|PAPER Thu-O-9-3-6 — Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191959.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-7|PAPER Tue-P-5-B-7 — Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-7|PAPER Mon-P-2-B-7 — End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-4|PAPER Wed-O-8-3-4 — Speech Emotion Recognition Based on Multi-Label Emotion Existence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition Based on Multi-Label Emotion Existence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-10|PAPER Wed-P-7-C-10 — Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-6|PAPER Tue-O-3-4-6 — Speech Quality Evaluation of Synthesized Japanese Speech Using EEG]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Quality Evaluation of Synthesized Japanese Speech Using EEG</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-6|PAPER Thu-O-9-3-6 — Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191826.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-4|PAPER Thu-P-9-C-4 — An Incremental Turn-Taking Model for Task-Oriented Dialog Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Incremental Turn-Taking Model for Task-Oriented Dialog Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-6|PAPER Tue-S&T-2-6 — A System for Real-Time Privacy Preserving Data Collection for Ambient Assisted Living]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A System for Real-Time Privacy Preserving Data Collection for Ambient Assisted Living</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-12|PAPER Wed-P-7-C-12 — Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-2|PAPER Wed-O-7-2-2 — Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191510.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-10|PAPER Thu-P-9-A-10 — Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192637.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-2|PAPER Wed-P-8-D-2 — An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-4|PAPER Wed-O-7-5-4 — Maximum a posteriori Speech Enhancement Based on Double Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Maximum a posteriori Speech Enhancement Based on Double Spectrum</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192636.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-8|PAPER Wed-P-8-A-8 — Quality Degradation Diagnosis for Voice Networks — Estimating the Perceived Noisiness, Coloration, and Discontinuity of Transmitted Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quality Degradation Diagnosis for Voice Networks — Estimating the Perceived Noisiness, Coloration, and Discontinuity of Transmitted Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-10|PAPER Wed-P-8-A-10 — Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192845.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-5|PAPER Mon-P-2-E-5 — Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-6|PAPER Tue-P-5-E-6 — Understanding and Visualizing Raw Waveform-Based CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding and Visualizing Raw Waveform-Based CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192420.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-3|PAPER Wed-P-7-B-3 — Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198005.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-1|PAPER Tue-S&T-2-1 — Directional Audio Rendering Using a Neural Network Based Personalized HRTF]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Directional Audio Rendering Using a Neural Network Based Personalized HRTF</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-7|PAPER Mon-S&T-1-7 — Speech-Based Web Navigation for Limited Mobility Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-Based Web Navigation for Limited Mobility Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193096.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-7|PAPER Wed-SS-8-6-7 — Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-10|PAPER Tue-P-4-B-10 — Self-Imitating Feedback Generation Using GAN for Computer-Assisted Pronunciation Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Imitating Feedback Generation Using GAN for Computer-Assisted Pronunciation Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-2|PAPER Wed-P-8-A-2 — Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-9|PAPER Tue-P-5-A-9 — Transformer Based Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transformer Based Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-10|PAPER Mon-P-2-A-10 — One-Shot Voice Conversion with Disentangled Representations by Leveraging Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Disentangled Representations by Leveraging Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-1|PAPER Tue-O-5-4-1 — Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-2|PAPER Tue-O-5-4-2 — HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-1|PAPER Wed-P-6-E-1 — Monaural Speech Enhancement with Dilated Convolutions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Monaural Speech Enhancement with Dilated Convolutions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-6|PAPER Mon-P-1-B-6 — Far-Field Speech Enhancement Using Heteroscedastic Autoencoder for Improved Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Far-Field Speech Enhancement Using Heteroscedastic Autoencoder for Improved Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-10|PAPER Mon-P-2-B-10 — A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191418.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-11|PAPER Thu-P-10-C-11 — Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191412.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-13|PAPER Tue-P-3-D-13 — Phonetic Detail Encoding in Explaining the Size of Speech Planning Window]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetic Detail Encoding in Explaining the Size of Speech Planning Window</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-2|PAPER Mon-P-2-C-2 — One-vs-All Models for Asynchronous Training: An Empirical Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-vs-All Models for Asynchronous Training: An Empirical Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-10|PAPER Thu-P-9-B-10 — On the Use of Pitch Features for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Use of Pitch Features for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-11|PAPER Wed-P-6-C-11 — Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191198.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-14|PAPER Mon-P-2-A-14 — Group Latent Embedding for Vector Quantized Variational Autoencoder in Non-Parallel Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Group Latent Embedding for Vector Quantized Variational Autoencoder in Non-Parallel Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-3|PAPER Wed-O-8-4-3 — Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191865.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-1|PAPER Mon-O-2-4-1 — Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192950.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-3|PAPER Tue-P-4-D-3 — Compensation for French Liquid Deletion During Auditory Sentence Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compensation for French Liquid Deletion During Auditory Sentence Processing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191795.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-12|PAPER Tue-P-5-D-12 — The Different Roles of Expectations in Phonetic and Lexical Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Different Roles of Expectations in Phonetic and Lexical Processing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192838.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-8|PAPER Wed-P-8-D-8 — Liquid Deletion in French Child-Directed Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Liquid Deletion in French Child-Directed Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-6|PAPER Mon-P-1-B-6 — Far-Field Speech Enhancement Using Heteroscedastic Autoencoder for Improved Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Far-Field Speech Enhancement Using Heteroscedastic Autoencoder for Improved Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NITK Kids’ Speech Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191421.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-2|PAPER Wed-O-6-2-2 — Locality-Constrained Linear Coding Based Fused Visual Features for Robust Acoustic Event Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Locality-Constrained Linear Coding Based Fused Visual Features for Robust Acoustic Event Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191865.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-1|PAPER Mon-O-2-4-1 — Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198016.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-4|PAPER Mon-S&T-1-4 — Using Real-Time Visual Biofeedback for Second Language Instruction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Real-Time Visual Biofeedback for Second Language Instruction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-2|PAPER Wed-O-7-2-2 — Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192271.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-3|PAPER Wed-P-8-E-3 — Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-7|PAPER Mon-P-2-A-7 — Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192078.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-2|PAPER Tue-O-3-5-2 — Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191949.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-2|PAPER Thu-O-10-1-2 — Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191376.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-4|PAPER Mon-O-1-3-4 — Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192108.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-7|PAPER Wed-P-6-E-7 — Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191532.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-10|PAPER Wed-P-8-E-10 — Few-Shot Audio Classification with Attentional Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Few-Shot Audio Classification with Attentional Graph Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-11|PAPER Tue-P-5-B-11 — Towards Language-Universal Mandarin-English Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Language-Universal Mandarin-English Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191290.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-2|PAPER Tue-P-5-C-2 — Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191302.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-9|PAPER Wed-P-7-E-9 — Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-10|PAPER Wed-P-7-C-10 — Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193038.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-7|PAPER Thu-O-10-5-7 — End-to-End Multilingual Multi-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multilingual Multi-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-4|PAPER Thu-O-9-3-4 — Vectorized Beam Search for CTC-Attention-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vectorized Beam Search for CTC-Attention-Based Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192899.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-1|PAPER Thu-P-10-A-1 — End-to-End Neural Speaker Diarization with Permutation-Free Objectives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Speaker Diarization with Permutation-Free Objectives</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-4-6|PAPER Tue-O-3-4-6 — Speech Quality Evaluation of Synthesized Japanese Speech Using EEG]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Quality Evaluation of Synthesized Japanese Speech Using EEG</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-6|PAPER Tue-P-4-B-6 — Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191795.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-12|PAPER Tue-P-5-D-12 — The Different Roles of Expectations in Phonetic and Lexical Processing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Different Roles of Expectations in Phonetic and Lexical Processing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191766.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-5|PAPER Tue-P-5-C-5 — Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-5|PAPER Thu-O-9-4-5 — Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-6|PAPER Tue-P-3-C-6 — Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191527.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-8|PAPER Thu-P-9-C-8 — Analysis of Effect and Timing of Fillers in Natural Turn-Taking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Effect and Timing of Fillers in Natural Turn-Taking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193099.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-3|PAPER Mon-O-1-5-3 — Expediting TTS Synthesis with Adversarial Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Expediting TTS Synthesis with Adversarial Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191318.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-9|PAPER Wed-P-7-B-9 — Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191313.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-9|PAPER Thu-P-9-C-9 — Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192899.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-1|PAPER Thu-P-10-A-1 — End-to-End Neural Speaker Diarization with Permutation-Free Objectives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Speaker Diarization with Permutation-Free Objectives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-10|PAPER Thu-P-9-B-10 — On the Use of Pitch Features for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Use of Pitch Features for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192420.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-3|PAPER Wed-P-7-B-3 — Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-4-2|PAPER Wed-O-8-4-2 — Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-1|PAPER Tue-P-5-E-1 — Multiview Shared Subspace Learning Across Speakers and Speech Commands]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multiview Shared Subspace Learning Across Speakers and Speech Commands</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192813.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-1|PAPER Mon-P-1-A-1 — Bayesian HMM Based x-Vector Clustering for Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bayesian HMM Based x-Vector Clustering for Speaker Diarization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-5|PAPER Tue-O-3-2-5 — Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192120.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-10|PAPER Wed-P-6-A-10 — Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192432.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-7|PAPER Thu-P-10-D-7 — Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-9|PAPER Tue-P-5-C-9 — Active Learning Methods for Low Resource End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning Methods for Low Resource End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-5|PAPER Tue-O-5-2-5 — Trainable Dynamic Subsampling for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Trainable Dynamic Subsampling for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191522.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-9|PAPER Thu-P-9-A-9 — Two-Stage Training for Chinese Dialect Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Stage Training for Chinese Dialect Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191968.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-9|PAPER Tue-P-3-C-9 — Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-1-3|PAPER Wed-O-7-1-3 — Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-7|PAPER Tue-P-3-C-7 — Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193214.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-1|PAPER Mon-P-1-D-1 — Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191483.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-7|PAPER Wed-P-7-E-7 — ToneNet: A CNN Model of Tone Classification of Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ToneNet: A CNN Model of Tone Classification of Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193267.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-1|PAPER Tue-P-4-E-1 — Direct F0 Estimation with Neural-Network-Based Regression]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct F0 Estimation with Neural-Network-Based Regression</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192678.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-2|PAPER Tue-P-3-B-2 — Unbiased Semi-Supervised LF-MMI Training Using Dropout]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unbiased Semi-Supervised LF-MMI Training Using Dropout</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-11|PAPER Wed-P-7-C-11 — Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-1|PAPER Tue-P-5-B-1 — Multilingual Speech Recognition with Corpus Relatedness Sampling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Corpus Relatedness Sampling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-6|PAPER Wed-S&T-4-6 — SANTLR: Speech Annotation Toolkit for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SANTLR: Speech Annotation Toolkit for Low Resource Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-2|PAPER Thu-P-10-B-2 — Cross-Attention End-to-End ASR for Two-Party Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Attention End-to-End ASR for Two-Party Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193252.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-1|PAPER Thu-O-10-2-1 — Direct Modelling of Speech Emotion from Raw Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Modelling of Speech Emotion from Raw Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-6|PAPER Mon-O-1-3-6 — SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191764.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-2|PAPER Thu-O-9-1-2 — An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191148.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-3|PAPER Thu-SS-9-6-3 — Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-1|PAPER Wed-P-8-D-1 — Vietnamese Learners Tackling the German /ʃt/ in Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vietnamese Learners Tackling the German /ʃt/ in Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192251.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-10|PAPER Tue-P-5-D-10 — A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-5|PAPER Tue-P-4-C-5 — Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-3|PAPER Thu-O-10-4-3 — The Greennn Tree — Lengthening Position Influences Uncertainty Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Greennn Tree — Lengthening Position Influences Uncertainty Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191800.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-2|PAPER Tue-O-5-1-2 — Evaluating Near End Listening Enhancement Algorithms in Realistic Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Evaluating Near End Listening Enhancement Algorithms in Realistic Environments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191824.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-8|PAPER Tue-P-3-A-8 — Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-9|PAPER Tue-P-3-A-9 — Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-6|PAPER Thu-O-10-2-6 — Disentangling Style Factors from Speaker Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disentangling Style Factors from Speaker Representations</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191945.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-9|PAPER Thu-P-10-C-9 — Improving Speech Synthesis with Discourse Relations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Speech Synthesis with Discourse Relations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-1-2|PAPER Wed-O-6-1-2 — Dimensions of Prosodic Prominence in an Attractor Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dimensions of Prosodic Prominence in an Attractor Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-3|PAPER Thu-O-9-1-3 — Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191334.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-4|PAPER Thu-O-9-1-4 — Articulatory Copy Synthesis Based on a Genetic Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Copy Synthesis Based on a Genetic Algorithm</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|^<div class="cpauthorindexpersoncardpapertitle">All Together Now: The Living Audio Dataset</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-5|PAPER Tue-P-4-C-5 — Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-3|PAPER Thu-O-10-4-3 — The Greennn Tree — Lengthening Position Influences Uncertainty Perception]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Greennn Tree — Lengthening Position Influences Uncertainty Perception</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192544.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-4|PAPER Mon-P-2-B-4 — Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192983.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-1|PAPER Thu-P-9-A-1 — Adversarial Regularization for End-to-End Robust Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adversarial Regularization for End-to-End Robust Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191442.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-12|PAPER Thu-P-9-A-12 — Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191440.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-13|PAPER Thu-P-10-A-13 — Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-9|PAPER Thu-P-10-D-9 — Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191338.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-6|PAPER Mon-O-2-3-6 — Improving Unsupervised Subword Modeling via Disentangled Speech Representation Learning and Transformation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Unsupervised Subword Modeling via Disentangled Speech Representation Learning and Transformation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191337.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-2|PAPER Tue-SS-5-6-2 — Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191344.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-6|PAPER Wed-O-7-5-6 — Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191705.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-1|PAPER Mon-O-1-5-1 — High Quality, Lightweight and Adaptable TTS Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality, Lightweight and Adaptable TTS Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-4|PAPER Wed-O-6-3-4 — Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192097.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-6|PAPER Wed-O-6-3-6 — Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-6|PAPER Thu-SS-9-6-6 — Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-10|PAPER Thu-P-10-D-10 — Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-7|PAPER Mon-SS-1-6-7 — Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192984.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-2|PAPER Tue-P-4-D-2 — Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191850.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-5|PAPER Thu-P-9-E-5 — Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191827.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-8|PAPER Thu-P-10-E-8 — Probabilistic Permutation Invariant Training for Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Probabilistic Permutation Invariant Training for Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192090.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-5|PAPER Mon-P-1-B-5 — Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-3|PAPER Wed-P-7-C-3 — Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192934.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-1|PAPER Tue-P-3-D-1 — L2 Pronunciation Accuracy and Context: A Pilot Study on the Realization of Geminates in Italian as L2 by French Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">L2 Pronunciation Accuracy and Context: A Pilot Study on the Realization of Geminates in Italian as L2 by French Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192988.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-7|PAPER Wed-SS-6-4-7 — Voice Quality and Between-Frame Entropy for Sleepiness Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality and Between-Frame Entropy for Sleepiness Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-6|PAPER Wed-S&T-5-6 — Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-11|PAPER Wed-P-7-C-11 — Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-3|PAPER Mon-S&T-1-3 — SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192881.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-2|PAPER Tue-P-5-B-2 — Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191241.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-1|PAPER Tue-P-5-C-1 — Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191241.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-1|PAPER Tue-P-5-C-1 — Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-2|PAPER Wed-P-7-E-2 — A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-5|PAPER Mon-O-1-4-5 — Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192366.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-4|PAPER Tue-O-3-3-4 — End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192571.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-3|PAPER Thu-P-10-C-3 — Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193075.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-1|PAPER Tue-O-3-3-1 — Neural Transition Systems for Modeling Hierarchical Semantic Representations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transition Systems for Modeling Hierarchical Semantic Representations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192716.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-2|PAPER Tue-SS-3-6-2 — LEAP Diarization System for the Second DIHARD Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LEAP Diarization System for the Second DIHARD Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192371.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-2|PAPER Tue-O-4-1-2 — Attention Based Hybrid i-Vector BLSTM Model for Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention Based Hybrid i-Vector BLSTM Model for Language Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-9|PAPER Tue-P-5-C-9 — Active Learning Methods for Low Resource End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning Methods for Low Resource End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192652.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-3|PAPER Wed-P-8-B-3 — Unsupervised Raw Waveform Representation Learning for ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Raw Waveform Representation Learning for ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192726.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-2|PAPER Thu-P-9-B-2 — Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-9|PAPER Thu-P-9-B-9 — Video-Driven Speech Reconstruction Using Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Video-Driven Speech Reconstruction Using Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191592.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-6|PAPER Thu-P-9-C-6 — Voice Quality as a Turn-Taking Cue]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality as a Turn-Taking Cue</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-2|PAPER Tue-P-4-B-2 — Language Learning Using Speech to Image Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Learning Using Speech to Image Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-3|PAPER Wed-SS-6-4-3 — Deep Neural Baselines for Computational Paralinguistics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Baselines for Computational Paralinguistics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192845.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-5|PAPER Mon-P-2-E-5 — Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191393.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-5|PAPER Wed-O-8-1-5 — LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-11|PAPER Tue-P-4-D-11 — Recognition of Creaky Voice from Emergency Calls]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Creaky Voice from Emergency Calls</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-13|PAPER Tue-P-5-C-13 — ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-3|PAPER Wed-SS-6-4-3 — Deep Neural Baselines for Computational Paralinguistics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Baselines for Computational Paralinguistics</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191873.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-6|PAPER Wed-P-8-B-6 — wav2vec: Unsupervised Pre-Training for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">wav2vec: Unsupervised Pre-Training for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191786.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-10|PAPER Tue-P-4-C-10 — Do Conversational Partners Entrain on Articulatory Precision?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Conversational Partners Entrain on Articulatory Precision?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-12|PAPER Tue-P-3-D-12 — A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192910.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-6|PAPER Wed-SS-8-6-6 — Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192612.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-1|PAPER Mon-O-2-2-1 — Untranscribed Web Audio for Low Resource Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Untranscribed Web Audio for Low Resource Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-5|PAPER Tue-O-5-2-5 — Trainable Dynamic Subsampling for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Trainable Dynamic Subsampling for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-6|PAPER Tue-P-3-B-6 — Lattice-Based Lightly-Supervised Acoustic Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lattice-Based Lightly-Supervised Acoustic Model Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191257.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-9|PAPER Wed-P-8-B-9 — On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191804.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-4|PAPER Thu-P-9-B-4 — Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-5|PAPER Tue-O-3-1-5 — Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-8|PAPER Tue-P-4-B-8 — Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-3|PAPER Thu-P-9-D-3 — Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-11|PAPER Tue-SS-4-4-11 — Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193246.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-1|PAPER Mon-P-2-B-1 — Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-3|PAPER Wed-SS-8-6-3 — Mel-Frequency Cepstral Coefficients of Voice Source Waveforms for Classification of Phonation Types in Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mel-Frequency Cepstral Coefficients of Voice Source Waveforms for Classification of Phonation Types in Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-2|PAPER Tue-O-4-5-2 — Recursive Speech Separation for Unknown Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recursive Speech Separation for Unknown Number of Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NITK Kids’ Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-5|PAPER Wed-P-7-C-5 — Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-8|PAPER Thu-P-9-E-8 — A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-7|PAPER Wed-P-8-B-7 — Automatic Detection of Prosodic Focus in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Prosodic Focus in American English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-7|PAPER Wed-P-6-A-7 — An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192366.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-4|PAPER Tue-O-3-3-4 — End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-3|PAPER Wed-P-7-C-3 — Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-5|PAPER Mon-O-1-4-5 — Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-3|PAPER Thu-O-9-1-3 — Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192876.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-6|PAPER Mon-P-2-D-6 — Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-1|PAPER Tue-O-4-3-1 — Fusion Strategy for Prosodic and Lexical Representations of Word Importance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fusion Strategy for Prosodic and Lexical Representations of Word Importance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-3|PAPER Mon-P-1-A-3 — MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|^<div class="cpauthorindexpersoncardpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-6|PAPER Wed-O-8-5-6 — VoiceID Loss: Speech Enhancement for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceID Loss: Speech Enhancement for Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191498.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-15|PAPER Wed-P-6-A-15 — Large-Scale Speaker Retrieval on Random Speaker Variability Subspace]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Retrieval on Random Speaker Variability Subspace</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-2|PAPER Thu-P-10-B-2 — Cross-Attention End-to-End ASR for Two-Party Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Attention End-to-End ASR for Two-Party Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192366.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-4|PAPER Tue-O-3-3-4 — End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-9|PAPER Mon-P-2-E-9 — An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192108.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-7|PAPER Wed-P-6-E-7 — Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-5|PAPER Tue-P-3-E-5 — IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192425.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-6|PAPER Wed-P-6-E-6 — Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192521.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-4|PAPER Thu-P-10-C-4 — Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-3|PAPER Wed-O-6-5-3 — Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192389.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-8|PAPER Thu-P-10-D-8 — Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-5-6|PAPER Wed-S&T-5-6 — Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-10|PAPER Mon-P-2-A-10 — One-Shot Voice Conversion with Disentangled Representations by Leveraging Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Disentangled Representations by Leveraging Phonetic Posteriorgrams</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191498.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-15|PAPER Wed-P-6-A-15 — Large-Scale Speaker Retrieval on Random Speaker Variability Subspace]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Retrieval on Random Speaker Variability Subspace</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-9|PAPER Tue-P-4-E-9 — Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-4|PAPER Mon-O-1-1-4 — Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-1|PAPER Tue-P-4-C-1 — Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193038.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-7|PAPER Thu-O-10-5-7 — End-to-End Multilingual Multi-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multilingual Multi-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-4|PAPER Thu-O-9-3-4 — Vectorized Beam Search for CTC-Attention-Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vectorized Beam Search for CTC-Attention-Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-2|PAPER Mon-P-1-A-2 — Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191508.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-13|PAPER Mon-P-1-A-13 — Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The NEC-TT 2018 Speaker Verification System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192263.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-8|PAPER Tue-P-5-C-8 — A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191597.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-9|PAPER Mon-P-1-B-9 — Knowledge Distillation for Throat Microphone Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for Throat Microphone Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-14|PAPER Mon-P-2-C-14 — Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192263.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-8|PAPER Tue-P-5-C-8 — A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192497.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-5|PAPER Thu-P-10-C-5 — Semi-Supervised Prosody Modeling Using Deep Gaussian Process Latent Variable Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Prosody Modeling Using Deep Gaussian Process Latent Variable Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191930.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-11|PAPER Tue-P-3-B-11 — Direct Neuron-Wise Fusion of Cognate Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Neuron-Wise Fusion of Cognate Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191180.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-11|PAPER Thu-P-9-D-11 — Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191597.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-9|PAPER Mon-P-1-B-9 — Knowledge Distillation for Throat Microphone Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for Throat Microphone Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192236.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-5|PAPER Mon-P-2-A-5 — StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191288.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-6|PAPER Tue-O-4-2-6 — Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191855.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-1|PAPER Wed-O-6-2-1 — Audio Classification of Bit-Representation Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Classification of Bit-Representation Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192890.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-3|PAPER Wed-P-7-D-3 — V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-5|PAPER Wed-P-7-D-5 — Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-4|PAPER Wed-P-8-D-4 — Using Prosody to Discover Word Order Alternations in a Novel Language]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Prosody to Discover Word Order Alternations in a Novel Language</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191338.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-6|PAPER Mon-O-2-3-6 — Improving Unsupervised Subword Modeling via Disentangled Speech Representation Learning and Transformation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Unsupervised Subword Modeling via Disentangled Speech Representation Learning and Transformation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192050.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-6|PAPER Mon-P-2-B-6 — Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191337.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-2|PAPER Tue-SS-5-6-2 — Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191968.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-9|PAPER Tue-P-3-C-9 — Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-12|PAPER Wed-P-6-C-12 — Automatic Assessment of Language Impairment Based on Raw ASR Output]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Assessment of Language Impairment Based on Raw ASR Output</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192320.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-4|PAPER Thu-O-9-5-4 — Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-11|PAPER Tue-P-4-D-11 — Recognition of Creaky Voice from Emergency Calls]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Creaky Voice from Emergency Calls</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193072.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-2|PAPER Mon-P-1-D-2 — Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Tue-K-2|PAPER Tue-K-2 — Biosignal Processing for Human-Machine Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Biosignal Processing for Human-Machine Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191704.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-5|PAPER Wed-O-8-5-5 — Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-1|PAPER Mon-O-2-1-1 — Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192730.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-5|PAPER Tue-P-5-A-5 — End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-8|PAPER Tue-P-5-B-8 — Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-3|PAPER Tue-P-5-C-3 — Improving Performance of End-to-End ASR on Numeric Sequences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Performance of End-to-End ASR on Numeric Sequences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192502.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-5|PAPER Tue-P-3-C-5 — Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-6|PAPER Tue-P-4-B-6 — Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192918.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-4|PAPER Tue-P-4-D-4 — Prosodic Factors Influencing Vowel Reduction in Russian]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Factors Influencing Vowel Reduction in Russian</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193107.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-5|PAPER Thu-O-10-1-5 — Who Needs Words? Lexicon-Free Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Who Needs Words? Lexicon-Free Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192594.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-1|PAPER Wed-O-8-3-1 — Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-7|PAPER Thu-P-9-C-7 — Turn-Taking Prediction Based on Detection of Transition Relevance Place]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Turn-Taking Prediction Based on Detection of Transition Relevance Place</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191527.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-8|PAPER Thu-P-9-C-8 — Analysis of Effect and Timing of Fillers in Natural Turn-Taking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Effect and Timing of Fillers in Natural Turn-Taking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191220.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-5|PAPER Mon-O-1-2-5 — Variational Bayesian Multi-Channel Speech Dereverberation Under Noisy Environments with Probabilistic Convolutive Transfer Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Bayesian Multi-Channel Speech Dereverberation Under Noisy Environments with Probabilistic Convolutive Transfer Function</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191289.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-4|PAPER Wed-O-7-4-4 — Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-5|PAPER Tue-P-3-E-5 — IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192890.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-3|PAPER Wed-P-7-D-3 — V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent]]</div>|^<div class="cpauthorindexpersoncardpapertitle">V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-D-5|PAPER Wed-P-7-D-5 — Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Path Signature Approach for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-1|PAPER Mon-O-1-2-1 — Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-3|PAPER Mon-O-2-3-3 — Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-1|PAPER Mon-O-1-2-1 — Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-3|PAPER Mon-O-2-3-3 — Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192702.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-2|PAPER Mon-O-1-1-2 — Very Deep Self-Attention Networks for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Very Deep Self-Attention Networks for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192361.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-6|PAPER Wed-P-6-A-6 — An Adaptive-Q Cochlear Model for Replay Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Adaptive-Q Cochlear Model for Replay Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-1|PAPER Wed-O-8-5-1 — Self-Supervised Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Supervised Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192097.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-6|PAPER Wed-O-6-3-6 — Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-6|PAPER Mon-P-2-E-6 — Unsupervised Low-Rank Representations for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Low-Rank Representations for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-5|PAPER Tue-P-5-B-5 — End-to-End Accented Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Accented Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-10|PAPER Thu-P-10-C-10 — Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-10|PAPER Wed-P-8-A-10 — Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-5|PAPER Thu-O-9-5-5 — Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192571.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-3|PAPER Thu-P-10-C-3 — Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-6|PAPER Tue-O-5-3-6 — Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-7|PAPER Mon-P-1-D-7 — Detecting Mismatch Between Speech and Transcription Using Cross-Modal Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detecting Mismatch Between Speech and Transcription Using Cross-Modal Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-12|PAPER Tue-P-3-C-12 — Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191797.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-7|PAPER Wed-P-7-B-7 — Latent Dirichlet Allocation Based Acoustic Data Selection for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Dirichlet Allocation Based Acoustic Data Selection for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192540.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-4|PAPER Wed-SS-6-4-4 — Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198001.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-1|PAPER Wed-S&T-4-1 — BAS Web Services for Automatic Subtitle Creation and Anonymization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">BAS Web Services for Automatic Subtitle Creation and Anonymization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191500.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-12|PAPER Tue-P-3-A-12 — CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-9|PAPER Wed-P-6-B-9 — Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-8|PAPER Wed-P-8-B-8 — Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192164.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-7|PAPER Wed-P-8-C-7 — Improving Automatically Induced Lexicons for Highly Agglutinating Languages Using Data-Driven Morphological Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Automatically Induced Lexicons for Highly Agglutinating Languages Using Data-Driven Morphological Segmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-5|PAPER Thu-O-10-5-5 — Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191975.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-9|PAPER Tue-P-3-B-9 — Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-6|PAPER Wed-P-6-B-6 — The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-6|PAPER Mon-O-2-4-6 — Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192956.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-1|PAPER Wed-P-6-A-1 — Blind Channel Response Estimation for Replay Attack Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Blind Channel Response Estimation for Replay Attack Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192974.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-2|PAPER Thu-P-9-A-2 — Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191994.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-5|PAPER Thu-P-10-A-5 — A Unified Framework for Speaker and Utterance Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Unified Framework for Speaker and Utterance Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192594.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-1|PAPER Wed-O-8-3-1 — Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-1|PAPER Tue-P-4-C-1 — Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191148.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-3|PAPER Thu-SS-9-6-3 — Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192751.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-2|PAPER Mon-O-1-2-2 — On Nonlinear Spatial Filtering in Multichannel Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On Nonlinear Spatial Filtering in Multichannel Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192459.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-4|PAPER Thu-P-10-E-4 — Influence of Speaker-Specific Parameters on Speech Separation Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Influence of Speaker-Specific Parameters on Speech Separation Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-3|PAPER Wed-O-8-3-3 — Speech Based Emotion Prediction: Can a Linear Model Work?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Based Emotion Prediction: Can a Linear Model Work?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192080.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-10|PAPER Wed-P-6-C-10 — Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-4|PAPER Mon-P-2-C-4 — M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191539.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-9|PAPER Thu-P-10-B-9 — Real to H-Space Encoder for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real to H-Space Encoder for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-5|PAPER Mon-P-2-C-5 — Ultra-Compact NLU: Neuronal Network Binarization as Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ultra-Compact NLU: Neuronal Network Binarization as Regularization</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192894.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-8|PAPER Wed-SS-7-A-8 — Intel Far-Field Speaker Recognition System for VOiCES Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Intel Far-Field Speaker Recognition System for VOiCES Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191728.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-6|PAPER Wed-O-6-5-6 — Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-12|PAPER Tue-P-4-E-12 — A Study of Soprano Singing in Light of the Source-Filter Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Study of Soprano Singing in Light of the Source-Filter Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191284.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-3|PAPER Wed-P-8-A-3 — End-to-End Optimization of Source Models for Speech and Audio Coding Using a Machine Learning Framework]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Optimization of Source Models for Speech and Audio Coding Using a Machine Learning Framework</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-5|PAPER Wed-P-8-A-5 — Super-Wideband Spectral Envelope Modeling for Speech Coding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Super-Wideband Spectral Envelope Modeling for Speech Coding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-6|PAPER Thu-SS-9-6-6 — Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192250.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-10|PAPER Thu-P-10-A-10 — Mixup Learning Strategies for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixup Learning Strategies for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193095.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-1|PAPER Wed-P-6-C-1 — Optimizing Speech-Input Length for Speaker-Independent Depression Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Speech-Input Length for Speaker-Independent Depression Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-2|PAPER Mon-P-1-A-2 — Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-6|PAPER Mon-P-1-C-6 — Conversational and Social Laughter Synthesis with WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational and Social Laughter Synthesis with WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-2-6|PAPER Mon-O-1-2-6 — Simultaneous Denoising and Dereverberation for Low-Latency Applications Using Frame-by-Frame Online Unified Convolutional Beamformer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Denoising and Dereverberation for Low-Latency Applications Using Frame-by-Frame Online Unified Convolutional Beamformer</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191513.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-6|PAPER Wed-O-7-4-6 — Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191949.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-1-2|PAPER Thu-O-10-1-2 — Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192263.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-8|PAPER Tue-P-5-C-8 — A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-5|PAPER Mon-O-1-5-5 — Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192307.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-4|PAPER Mon-P-2-A-4 — Non-Parallel Voice Conversion with Cyclic Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Cyclic Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192497.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-5|PAPER Thu-P-10-C-5 — Semi-Supervised Prosody Modeling Using Deep Gaussian Process Latent Variable Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Prosody Modeling Using Deep Gaussian Process Latent Variable Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191318.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-9|PAPER Wed-P-7-B-9 — Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-5|PAPER Mon-O-1-5-5 — Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192307.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-4|PAPER Mon-P-2-A-4 — Non-Parallel Voice Conversion with Cyclic Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Cyclic Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-6|PAPER Mon-P-2-A-6 — Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191288.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-6|PAPER Tue-O-4-2-6 — Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192526.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-5|PAPER Thu-O-9-1-5 — A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-7|PAPER Mon-P-2-B-7 — End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-3|PAPER Tue-P-5-A-3 — Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-2|PAPER Wed-P-6-D-2 — Disfluencies and Human Speech Transcription Errors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Human Speech Transcription Errors</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-1|PAPER Thu-P-9-D-1 — On the Role of Style in Parsing Speech with Neural Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Style in Parsing Speech with Neural Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193114.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-11|PAPER Thu-P-9-E-11 — My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-3|PAPER Tue-P-5-E-3 — Acoustic Scene Classification with Mismatched Devices Using CliqueNets and Mixup Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification with Mismatched Devices Using CliqueNets and Mixup Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193246.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-1|PAPER Mon-P-2-B-1 — Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-7|PAPER Mon-P-2-A-7 — Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192339.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-4|PAPER Wed-P-7-B-4 — Towards Debugging Deep Neural Networks by Generating Speech Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Debugging Deep Neural Networks by Generating Speech Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191513.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-6|PAPER Wed-O-7-4-6 — Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191376.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-4|PAPER Mon-O-1-3-4 — Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-5|PAPER Tue-O-5-1-5 — Using a Manifold Vocoder for Spectral Voice and Style Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using a Manifold Vocoder for Spectral Voice and Style Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-1|PAPER Tue-O-3-5-1 — Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191786.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-10|PAPER Tue-P-4-C-10 — Do Conversational Partners Entrain on Articulatory Precision?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Conversational Partners Entrain on Articulatory Precision?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191749.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-5|PAPER Thu-P-9-D-5 — SpeechYOLO: Detection and Localization of Speech Objects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechYOLO: Detection and Localization of Speech Objects</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — God as Interlocutor — Real or Imaginary? Prosodic Markers of Dialogue Speech and Expected Efficacy in Spoken Prayer]]</div>|^<div class="cpauthorindexpersoncardpapertitle">God as Interlocutor — Real or Imaginary? Prosodic Markers of Dialogue Speech and Expected Efficacy in Spoken Prayer</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193062.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-3|PAPER Tue-P-4-C-3 — Analyzing Verbal and Nonverbal Features for Predicting Group Performance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Verbal and Nonverbal Features for Predicting Group Performance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191444.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-11|PAPER Thu-P-9-A-11 — Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-SS-9-6-6|PAPER Thu-SS-9-6-6 — Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-5|PAPER Tue-O-4-3-5 — Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-4|PAPER Wed-O-6-3-4 — Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-4|PAPER Wed-P-6-D-4 — Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191402.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-10|PAPER Wed-P-6-D-10 — Talker Intelligibility and Listening Effort with Temporally Modified Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Talker Intelligibility and Listening Effort with Temporally Modified Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-5|PAPER Wed-P-8-C-5 — Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-7|PAPER Mon-SS-1-6-7 — Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-5|PAPER Tue-P-3-B-5 — Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-S&T-1-7|PAPER Mon-S&T-1-7 — Speech-Based Web Navigation for Limited Mobility Users]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech-Based Web Navigation for Limited Mobility Users</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-9|PAPER Tue-P-3-E-9 — Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192622.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-10|PAPER Tue-P-3-E-10 — A Non-Causal FFTNet Architecture for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Non-Causal FFTNet Architecture for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Achieving Robust Universal Neural Vocoding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192977.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-2|PAPER Tue-O-3-3-2 — Mining Polysemous Triplets with Recurrent Neural Networks for Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mining Polysemous Triplets with Recurrent Neural Networks for Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193043.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-6|PAPER Wed-P-8-A-6 — Speech Audio Super-Resolution for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Audio Super-Resolution for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193019.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-1|PAPER Wed-P-8-E-1 — Multi-Stream Network with Temporal Attention for Environmental Sound Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stream Network with Temporal Attention for Environmental Sound Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191796.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-6|PAPER Thu-P-9-B-6 — Deep Sensing of Breathing Signal During Conversational Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Sensing of Breathing Signal During Conversational Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">NITK Kids’ Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192571.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-3|PAPER Thu-P-10-C-3 — Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-2|PAPER Wed-P-6-D-2 — Disfluencies and Human Speech Transcription Errors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disfluencies and Human Speech Transcription Errors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-1|PAPER Thu-O-10-5-1 — Improving Code-Switched Language Modeling Performance Using Cognate Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Code-Switched Language Modeling Performance Using Cognate Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191764.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-2|PAPER Thu-O-9-1-2 — An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-3|PAPER Wed-P-6-A-3 — Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Language Recognition Using Triplet Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-3|PAPER Wed-O-8-3-3 — Speech Based Emotion Prediction: Can a Linear Model Work?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Based Emotion Prediction: Can a Linear Model Work?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-11|PAPER Wed-P-8-E-11 — Semi-Supervised Audio Classification with Consistency-Based Regularization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Semi-Supervised Audio Classification with Consistency-Based Regularization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192988.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-7|PAPER Wed-SS-6-4-7 — Voice Quality and Between-Frame Entropy for Sleepiness Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Voice Quality and Between-Frame Entropy for Sleepiness Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-12|PAPER Wed-P-7-C-12 — Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192396.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-6|PAPER Mon-P-2-C-6 — Speech Model Pre-Training for End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Model Pre-Training for End-to-End Spoken Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192339.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-4|PAPER Wed-P-7-B-4 — Towards Debugging Deep Neural Networks by Generating Speech Utterances]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Debugging Deep Neural Networks by Generating Speech Utterances</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-2|PAPER Mon-P-1-A-2 — Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-6|PAPER Tue-P-5-E-6 — Understanding and Visualizing Raw Waveform-Based CNNs]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Understanding and Visualizing Raw Waveform-Based CNNs</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-10|PAPER Wed-P-8-A-10 — Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-4|PAPER Wed-O-6-3-4 — Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-10|PAPER Thu-P-10-C-10 — Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191786.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-10|PAPER Tue-P-4-C-10 — Do Conversational Partners Entrain on Articulatory Precision?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Do Conversational Partners Entrain on Articulatory Precision?</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193096.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-7|PAPER Wed-SS-8-6-7 — Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192913.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-1|PAPER Wed-P-7-E-1 — Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-11|PAPER Wed-P-6-B-11 — CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-10|PAPER Mon-P-2-B-10 — A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191776.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-E-8|PAPER Tue-P-4-E-8 — Optimizing Voice Activity Detection for Noisy Conditions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Voice Activity Detection for Noisy Conditions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-5|PAPER Thu-O-10-3-5 — Vowel-Tone Interaction in Two Tibeto-Burman Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vowel-Tone Interaction in Two Tibeto-Burman Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191924.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-1|PAPER Wed-O-7-5-1 — Speech Denoising with Deep Feature Losses]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Denoising with Deep Feature Losses</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-3|PAPER Tue-S&T-2-3 — Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-1|PAPER Thu-O-10-4-1 — Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192756.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-9|PAPER Mon-P-1-A-9 — Speaker-Corrupted Embeddings for Online Speaker Diarization]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Corrupted Embeddings for Online Speaker Diarization</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-1|PAPER Wed-P-7-B-1 — Acoustic Model Bootstrapping Using Semi-Supervised Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Bootstrapping Using Semi-Supervised Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191861.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-1|PAPER Mon-P-2-E-1 — Salient Speech Representations Based on Cloned Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Salient Speech Representations Based on Cloned Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191809.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-11|PAPER Tue-P-3-E-11 — Speech Enhancement with Variance Constrained Autoencoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement with Variance Constrained Autoencoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-4|PAPER Wed-O-7-5-4 — Maximum a posteriori Speech Enhancement Based on Double Spectrum]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Maximum a posteriori Speech Enhancement Based on Double Spectrum</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-3|PAPER Tue-O-5-1-3 — Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193192.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-5|PAPER Wed-O-6-5-5 — Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-3|PAPER Wed-O-7-4-3 — Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-4|PAPER Thu-P-10-B-4 — An Online Attention-Based Model for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Online Attention-Based Model for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-4|PAPER Tue-O-4-1-4 — Target Speaker Extraction for Multi-Talker Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Extraction for Multi-Talker Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193191.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-2|PAPER Tue-P-5-A-2 — Building a Mixed-Lingual Neural TTS System with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Mixed-Lingual Neural TTS System with Only Monolingual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-6|PAPER Wed-P-8-E-6 — Sound Event Detection in Multichannel Audio Using Convolutional Time-Frequency-Channel Squeeze and Excitation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sound Event Detection in Multichannel Audio Using Convolutional Time-Frequency-Channel Squeeze and Excitation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191383.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-11|PAPER Mon-P-2-C-11 — Meta Learning for Hyperparameter Optimization in Dialogue System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meta Learning for Hyperparameter Optimization in Dialogue System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192641.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-6|PAPER Mon-O-2-2-6 — Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191473.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-1|PAPER Mon-O-1-4-1 — An Unsupervised Autoregressive Model for Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Autoregressive Model for Speech Representation Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-10|PAPER Wed-P-7-B-10 — Transfer Learning from Audio-Visual Grounding to Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Transfer Learning from Audio-Visual Grounding to Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191683.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-11|PAPER Tue-P-3-C-11 — Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-3|PAPER Wed-O-6-2-3 — Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191298.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-10|PAPER Wed-P-7-E-10 — Music Genre Classification Using Duplicated Convolutional Layers in Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Music Genre Classification Using Duplicated Convolutional Layers in Neural Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-5|PAPER Wed-P-8-E-5 — Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-11|PAPER Tue-P-5-D-11 — Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-3|PAPER Tue-SS-4-4-3 — The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-1|PAPER Tue-O-4-1-1 — Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191435.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-12|PAPER Wed-SS-7-A-12 — The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191436.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-15|PAPER Thu-P-10-A-15 — The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191117.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-15|PAPER Tue-P-3-B-15 — Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-12|PAPER Wed-SS-6-4-12 — The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-3-2|PAPER Tue-O-5-3-2 — Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192228.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-6|PAPER Tue-O-4-3-6 — Latent Topic Attention for Domain Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Latent Topic Attention for Domain Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-5|PAPER Thu-O-10-3-5 — Vowel-Tone Interaction in Two Tibeto-Burman Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vowel-Tone Interaction in Two Tibeto-Burman Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-4|PAPER Thu-O-9-4-4 — Multi-Scale Time-Frequency Attention for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Scale Time-Frequency Attention for Acoustic Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-4|PAPER Tue-O-4-3-4 — Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-3|PAPER Mon-O-1-3-3 — Individual Difference of Relative Tongue Size and its Acoustic Effects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Difference of Relative Tongue Size and its Acoustic Effects</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-8|PAPER Wed-P-7-B-8 — Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192432.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-7|PAPER Thu-P-10-D-7 — Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193276.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-1|PAPER Thu-O-9-5-1 — Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-4-2|PAPER Mon-O-2-4-2 — Sibilant Variation in New Englishes: A Comparative Sociophonetic Study of Trinidadian and American English /s(tr)/-Retraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sibilant Variation in New Englishes: A Comparative Sociophonetic Study of Trinidadian and American English /s(tr)/-Retraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192164.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-7|PAPER Wed-P-8-C-7 — Improving Automatically Induced Lexicons for Highly Agglutinating Languages Using Data-Driven Morphological Segmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Automatically Induced Lexicons for Highly Agglutinating Languages Using Data-Driven Morphological Segmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198007.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-2|PAPER Tue-S&T-2-2 — Online Speech Processing and Analysis Suite]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Speech Processing and Analysis Suite</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-7|PAPER Tue-P-3-B-7 — Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192714.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-3|PAPER Mon-P-1-D-3 — RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts]]</div>|^<div class="cpauthorindexpersoncardpapertitle">RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-7|PAPER Wed-P-6-C-7 — Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192482.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-3|PAPER Tue-O-4-3-3 — Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-7|PAPER Wed-P-6-A-7 — An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192397.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-9|PAPER Thu-P-9-E-9 — End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192264.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-4|PAPER Tue-O-3-2-4 — Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191746.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-6|PAPER Tue-O-3-2-6 — Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-8|PAPER Thu-P-9-D-8 — Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192457.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-4|PAPER Wed-P-6-A-4 — Deep Hashing for Speaker Identification and Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hashing for Speaker Identification and Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-4|PAPER Mon-P-2-C-4 — M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-11|PAPER Thu-P-10-E-11 — End-to-End Music Source Separation: Is it Possible in the Waveform Domain?]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Music Source Separation: Is it Possible in the Waveform Domain?</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191567.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-2|PAPER Tue-P-3-E-2 — UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191567.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-2|PAPER Tue-P-3-E-2 — UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191382.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-2|PAPER Thu-O-10-5-2 — Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-5-6|PAPER Thu-O-10-5-6 — Multi-Graph Decoding for Code-Switching ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Graph Decoding for Code-Switching ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191141.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-10|PAPER Wed-P-6-E-10 — Masking Estimation with Phase Restoration of Clean Speech for Monaural Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Masking Estimation with Phase Restoration of Clean Speech for Monaural Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-8|PAPER Tue-P-5-D-8 — Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192641.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-6|PAPER Mon-O-2-2-6 — Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-3|PAPER Tue-P-3-B-3 — Acoustic Model Optimization Based on Evolutionary Stochastic Gradient Descent with Anchors for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Optimization Based on Evolutionary Stochastic Gradient Descent with Anchors for Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Diarization with Lexical Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191973.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-4|PAPER Wed-O-8-2-4 — Multi-Stride Self-Attention for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stride Self-Attention for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191343.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-8|PAPER Wed-P-6-B-8 — Exploring Methods for the Automatic Detection of Errors in Manual Transcription]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring Methods for the Automatic Detection of Errors in Manual Transcription</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191514.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-6|PAPER Mon-O-1-5-6 — A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-3|PAPER Mon-O-1-3-3 — Individual Difference of Relative Tongue Size and its Acoustic Effects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Individual Difference of Relative Tongue Size and its Acoustic Effects</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-6|PAPER Thu-O-9-4-6 — Parameter-Transfer Learning for Low-Resource Individualization of Head-Related Transfer Functions]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parameter-Transfer Learning for Low-Resource Individualization of Head-Related Transfer Functions</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191256.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-15|PAPER Thu-P-9-A-15 — A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-S&T-2-7|PAPER Tue-S&T-2-7 —  NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle"> NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191235.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-12|PAPER Tue-P-5-A-12 — Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191435.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-12|PAPER Wed-SS-7-A-12 — The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191542.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-8|PAPER Thu-P-9-A-8 — Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-14|PAPER Thu-P-10-A-14 — Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-1|PAPER Wed-P-6-D-1 — Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-4|PAPER Tue-O-4-2-4 — Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191311.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-5|PAPER Tue-O-4-2-5 — Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192229.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-4|PAPER Wed-P-7-C-4 — The Contribution of Acoustic Features Analysis to Model Emotion Perceptual Process for Language Diversity]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Contribution of Acoustic Features Analysis to Model Emotion Perceptual Process for Language Diversity</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-3|PAPER Thu-P-9-D-3 — Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-1|PAPER Tue-P-5-B-1 — Multilingual Speech Recognition with Corpus Relatedness Sampling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multilingual Speech Recognition with Corpus Relatedness Sampling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-6|PAPER Wed-S&T-4-6 — SANTLR: Speech Annotation Toolkit for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SANTLR: Speech Annotation Toolkit for Low Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192719.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-6|PAPER Thu-O-9-2-6 — Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193043.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-A-6|PAPER Wed-P-8-A-6 — Speech Audio Super-Resolution for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Audio Super-Resolution for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193019.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-1|PAPER Wed-P-8-E-1 — Multi-Stream Network with Temporal Attention for Environmental Sound Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stream Network with Temporal Attention for Environmental Sound Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191400.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-12|PAPER Thu-P-10-C-12 — A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192384.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-3|PAPER Wed-P-7-E-3 — Unsupervised Methods for Audio Classification from Lecture Discussion Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Audio Classification from Lecture Discussion Recordings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-10|PAPER Tue-P-3-D-10 — Neural Network-Based Modeling of Phonetic Durations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Network-Based Modeling of Phonetic Durations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193140.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-2-3|PAPER Thu-O-10-2-3 — Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193192.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-5|PAPER Wed-O-6-5-5 — Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192171.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-4|PAPER Wed-P-8-E-4 — A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-8|PAPER Wed-P-6-E-8 — Investigation of Cost Function for Supervised Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Cost Function for Supervised Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191777.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-3|PAPER Wed-O-7-5-3 — Incorporating Symbolic Sequential Modeling for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Symbolic Sequential Modeling for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192425.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-6|PAPER Wed-P-6-E-6 — Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192271.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-3|PAPER Wed-P-8-E-3 — Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192050.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-6|PAPER Mon-P-2-B-6 — Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192384.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-3|PAPER Wed-P-7-E-3 — Unsupervised Methods for Audio Classification from Lecture Discussion Recordings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Methods for Audio Classification from Lecture Discussion Recordings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-10|PAPER Thu-P-9-B-10 — On the Use of Pitch Features for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Use of Pitch Features for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192050.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-6|PAPER Mon-P-2-B-6 — Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191715.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-7|PAPER Mon-P-1-E-7 — Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192457.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-4|PAPER Wed-P-6-A-4 — Deep Hashing for Speaker Identification and Retrieval]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Hashing for Speaker Identification and Retrieval</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192712.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-1|PAPER Tue-P-3-E-1 — Speech Augmentation via Speaker-Specific Noise in Unseen Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Augmentation via Speaker-Specific Noise in Unseen Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191749.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-5|PAPER Thu-P-9-D-5 — SpeechYOLO: Detection and Localization of Speech Objects]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpeechYOLO: Detection and Localization of Speech Objects</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191972.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-3|PAPER Tue-O-4-2-3 — Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191606.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-7|PAPER Thu-P-9-A-7 — An Effective Deep Embedding Learning Architecture for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Deep Embedding Learning Architecture for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192913.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-1|PAPER Wed-P-7-E-1 — Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192967.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-1|PAPER Mon-P-1-E-1 — Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191563.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-3|PAPER Wed-O-6-3-3 — Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-1|PAPER Thu-P-9-D-1 — On the Role of Style in Parsing Speech with Neural Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Role of Style in Parsing Speech with Neural Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193095.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-1|PAPER Wed-P-6-C-1 — Optimizing Speech-Input Length for Speaker-Independent Depression Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Optimizing Speech-Input Length for Speaker-Independent Depression Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-4|PAPER Mon-P-1-C-4 — Deep Learning Based Mandarin Accent Identification for Accent Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Mandarin Accent Identification for Accent Robust ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192486.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-3|PAPER Thu-P-9-A-3 — VAE-Based Regularization for Deep Speaker Embedding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VAE-Based Regularization for Deep Speaker Embedding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-5|PAPER Tue-O-3-2-5 — Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-7|PAPER Tue-P-5-C-7 — Joint Decoding of CTC Based Systems for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Decoding of CTC Based Systems for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193192.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-5|PAPER Wed-O-6-5-5 — Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-3|PAPER Wed-O-7-4-3 — Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192120.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-10|PAPER Wed-P-6-A-10 — Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192659.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-2|PAPER Wed-P-8-B-2 — Prosody Usage Optimization for Children Speech Recognition with Zero Resource Children Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Prosody Usage Optimization for Children Speech Recognition with Zero Resource Children Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-8|PAPER Mon-P-2-C-8 — Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Visual Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192869.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-1|PAPER Mon-P-2-A-1 — Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-9|PAPER Tue-P-3-E-9 — Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192869.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-1|PAPER Mon-P-2-A-1 — Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-9|PAPER Tue-P-3-E-9 — Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192622.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-10|PAPER Tue-P-3-E-10 — A Non-Causal FFTNet Architecture for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Non-Causal FFTNet Architecture for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-4|PAPER Mon-P-1-E-4 — Rare Sound Event Detection Using Deep Learning and Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Rare Sound Event Detection Using Deep Learning and Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-3|PAPER Thu-P-9-D-3 — Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191235.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-12|PAPER Tue-P-5-A-12 — Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191483.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-7|PAPER Wed-P-7-E-7 — ToneNet: A CNN Model of Tone Classification of Mandarin Chinese]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ToneNet: A CNN Model of Tone Classification of Mandarin Chinese</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-4|PAPER Mon-O-2-2-4 — Speaker Adaptation for Attention-Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Attention-Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193056.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-14|PAPER Tue-P-5-C-14 — Acoustic-to-Phrase Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-to-Phrase Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191554.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-4|PAPER Thu-O-9-2-4 — Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192203.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-5|PAPER Thu-P-10-B-5 — Self-Attention Transducers for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Transducers for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191789.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-7|PAPER Thu-P-9-B-7 — Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-5|PAPER Wed-P-8-C-5 — Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-5|PAPER Thu-P-9-A-5 — Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191300.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-10|PAPER Thu-P-9-C-10 — Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-3|PAPER Wed-O-8-5-3 — Large Margin Softmax Loss for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large Margin Softmax Loss for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191369.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-4|PAPER Tue-O-5-1-4 — Listener Preference on the Local Criterion for Ideal Binary-Masked Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listener Preference on the Local Criterion for Ideal Binary-Masked Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-10|PAPER Wed-P-7-C-10 — Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-5|PAPER Mon-O-1-5-5 — Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192307.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-4|PAPER Mon-P-2-A-4 — Non-Parallel Voice Conversion with Cyclic Variational Autoencoder]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Non-Parallel Voice Conversion with Cyclic Variational Autoencoder</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-7|PAPER Tue-P-3-C-7 — Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191696.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-5|PAPER Thu-P-9-C-5 — Personalized Dialogue Response Generation Learned from Monologues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalized Dialogue Response Generation Learned from Monologues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-8|PAPER Wed-P-6-C-8 — Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-5|PAPER Tue-P-3-E-5 — IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-6|PAPER Tue-P-3-D-6 — Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-6|PAPER Tue-P-3-D-6 — Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192712.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-1|PAPER Tue-P-3-E-1 — Speech Augmentation via Speaker-Specific Noise in Unseen Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Augmentation via Speaker-Specific Noise in Unseen Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-4|PAPER Mon-O-2-2-4 — Speaker Adaptation for Attention-Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Attention-Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192971.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-3|PAPER Tue-O-5-2-3 — Layer Trajectory BLSTM]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Layer Trajectory BLSTM</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193056.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-14|PAPER Tue-P-5-C-14 — Acoustic-to-Phrase Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-to-Phrase Models for Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191467.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-6|PAPER Wed-O-8-2-6 — Self-Teaching Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Teaching Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191606.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-7|PAPER Thu-P-9-A-7 — An Effective Deep Embedding Learning Architecture for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Deep Embedding Learning Architecture for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-3|PAPER Tue-O-3-3-3 — Iterative Delexicalization for Improved Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Iterative Delexicalization for Improved Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-4|PAPER Tue-O-4-3-4 — Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-12|PAPER Wed-P-6-C-12 — Automatic Assessment of Language Impairment Based on Raw ASR Output]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Assessment of Language Impairment Based on Raw ASR Output</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192320.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-4|PAPER Thu-O-9-5-4 — Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-3|PAPER Wed-O-7-4-3 — Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-11|PAPER Tue-P-5-D-11 — Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building the Singapore English National Speech Corpus</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192793.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-2|PAPER Wed-P-6-B-2 — Detection and Recovery of OOVs for Improved English Broadcast News Captioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection and Recovery of OOVs for Improved English Broadcast News Captioning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192250.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-10|PAPER Thu-P-10-A-10 — Mixup Learning Strategies for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Mixup Learning Strategies for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191334.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-1-4|PAPER Thu-O-9-1-4 — Articulatory Copy Synthesis Based on a Genetic Algorithm]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Articulatory Copy Synthesis Based on a Genetic Algorithm</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-1|PAPER Tue-O-3-5-1 — Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191766.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-5|PAPER Tue-P-5-C-5 — Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-2|PAPER Mon-P-1-C-2 — Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192599.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-1-5|PAPER Mon-O-1-1-5 — Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191532.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-10|PAPER Wed-P-8-E-10 — Few-Shot Audio Classification with Attentional Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Few-Shot Audio Classification with Attentional Graph Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-B-7|PAPER Wed-P-8-B-7 — Automatic Detection of Prosodic Focus in American English]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automatic Detection of Prosodic Focus in American English</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-7|PAPER Tue-P-5-C-7 — Joint Decoding of CTC Based Systems for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Decoding of CTC Based Systems for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-B-3|PAPER Mon-P-1-B-3 — Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193155.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-3|PAPER Mon-P-2-B-3 — Multi-Accent Adaptation Based on Gate Mechanism]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Accent Adaptation Based on Gate Mechanism</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-3|PAPER Wed-O-6-5-3 — Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-8|PAPER Wed-P-7-B-8 — Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191484.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-3|PAPER Wed-P-8-C-3 — Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191256.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-15|PAPER Thu-P-9-A-15 — A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191532.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-10|PAPER Wed-P-8-E-10 — Few-Shot Audio Classification with Attentional Graph Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Few-Shot Audio Classification with Attentional Graph Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Pass End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-2|PAPER Wed-P-8-C-2 — Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191289.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-4-4|PAPER Wed-O-7-4-4 — Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-2|PAPER Thu-O-9-3-2 — GPU-Based WFST Decoding with Extra Large Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GPU-Based WFST Decoding with Extra Large Language Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-6|PAPER Mon-P-1-C-6 — Conversational and Social Laughter Synthesis with WaveNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational and Social Laughter Synthesis with WaveNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191288.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-6|PAPER Tue-O-4-2-6 — Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-8|PAPER Thu-P-10-C-8 — Duration Modeling with Global Phoneme-Duration Vectors]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Duration Modeling with Global Phoneme-Duration Vectors</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-4|PAPER Mon-O-1-4-4 — Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192396.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-6|PAPER Mon-P-2-C-6 — Speech Model Pre-Training for End-to-End Spoken Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Model Pre-Training for End-to-End Spoken Language Understanding</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192380.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-3|PAPER Tue-O-3-2-3 — Learning Speaker Representations with Mutual Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Speaker Representations with Mutual Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191735.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-6|PAPER Mon-P-1-E-6 — Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-3-3|PAPER Mon-O-2-3-3 — Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-B-7|PAPER Mon-P-2-B-7 — End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-2|PAPER Thu-O-10-3-2 — Frication as a Vowel Feature? — Evidence from the Rui’an Wu Chinese Dialect]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Frication as a Vowel Feature? — Evidence from the Rui’an Wu Chinese Dialect</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191498.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-A-15|PAPER Wed-P-6-A-15 — Large-Scale Speaker Retrieval on Random Speaker Variability Subspace]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Speaker Retrieval on Random Speaker Variability Subspace</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-5|PAPER Thu-P-9-A-5 — Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-5|PAPER Thu-P-9-A-5 — Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-4|PAPER Thu-P-10-A-4 — Variational Domain Adversarial Learning for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Variational Domain Adversarial Learning for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191265.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-13|PAPER Mon-P-2-A-13 — Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|^<div class="cpauthorindexpersoncardpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-5|PAPER Tue-P-3-E-5 — IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191777.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-3|PAPER Wed-O-7-5-3 — Incorporating Symbolic Sequential Modeling for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Incorporating Symbolic Sequential Modeling for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191519.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-2|PAPER Wed-P-6-E-2 — Noise Adaptive Speech Enhancement Using Domain Adversarial Training]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Noise Adaptive Speech Enhancement Using Domain Adversarial Training</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192425.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-6|PAPER Wed-P-6-E-6 — Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192108.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-7|PAPER Wed-P-6-E-7 — Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192271.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-3|PAPER Wed-P-8-E-3 — Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191473.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-4-1|PAPER Mon-O-1-4-1 — An Unsupervised Autoregressive Model for Speech Representation Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Unsupervised Autoregressive Model for Speech Representation Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-5|PAPER Tue-P-3-E-5 — IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-2-3|PAPER Wed-O-6-2-3 — Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-5|PAPER Wed-P-8-E-5 — Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191326.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-D-10|PAPER Tue-P-4-D-10 — F0 Variability Measures Based on Glottal Closure Instants]]</div>|^<div class="cpauthorindexpersoncardpapertitle">F0 Variability Measures Based on Glottal Closure Instants</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192545.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-1|PAPER Thu-O-10-3-1 — Sentence Prosody and  Wh-Indeterminates in Taiwan Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Sentence Prosody and  Wh-Indeterminates in Taiwan Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191541.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-E-8|PAPER Tue-P-5-E-8 — ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192291.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-6|PAPER Tue-P-4-C-6 — Influence of Contextuality on Prosodic Realization of Information Structure in Chinese Dialogues]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Influence of Contextuality on Prosodic Realization of Information Structure in Chinese Dialogues</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-11|PAPER Tue-P-5-B-11 — Towards Language-Universal Mandarin-English Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Language-Universal Mandarin-English Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191563.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-3|PAPER Wed-O-6-3-3 — Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192730.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-5|PAPER Tue-P-5-A-5 — End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192594.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-1|PAPER Wed-O-8-3-1 — Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191117.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-15|PAPER Tue-P-3-B-15 — Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192521.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-4|PAPER Thu-P-10-C-4 — Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-8|PAPER Wed-P-6-E-8 — Investigation of Cost Function for Supervised Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Cost Function for Supervised Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-3-2|PAPER Thu-O-9-3-2 — GPU-Based WFST Decoding with Extra Large Language Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">GPU-Based WFST Decoding with Extra Large Language Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-14|PAPER Mon-P-2-C-14 — Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191701.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-4-4|PAPER Thu-O-10-4-4 — CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-2|PAPER Tue-O-4-5-2 — Recursive Speech Separation for Unknown Number of Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Recursive Speech Separation for Unknown Number of Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191826.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-C-4|PAPER Thu-P-9-C-4 — An Incremental Turn-Taking Model for Task-Oriented Dialog Systems]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Incremental Turn-Taking Model for Task-Oriented Dialog Systems</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-2-5|PAPER Tue-O-5-2-5 — Trainable Dynamic Subsampling for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Trainable Dynamic Subsampling for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191442.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-12|PAPER Thu-P-9-A-12 — Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191440.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-13|PAPER Thu-P-10-A-13 — Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-8|PAPER Wed-P-6-E-8 — Investigation of Cost Function for Supervised Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Cost Function for Supervised Monaural Speech Separation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191973.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-2-4|PAPER Wed-O-8-2-4 — Multi-Stride Self-Attention for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Stride Self-Attention for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-7|PAPER Tue-P-3-C-7 — Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-8|PAPER Tue-P-3-C-8 — Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-E-8|PAPER Thu-P-9-E-8 — A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|^<div class="cpauthorindexpersoncardpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
|^{{$:/causal/NO-PDF Marker}}|^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192302.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-D-9|PAPER Tue-P-5-D-9 — Place Shift as an Autonomous Process: Evidence from Japanese Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Place Shift as an Autonomous Process: Evidence from Japanese Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192263.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-8|PAPER Tue-P-5-C-8 — A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-3-4|PAPER Wed-O-8-3-4 — Speech Emotion Recognition Based on Multi-Label Emotion Existence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Emotion Recognition Based on Multi-Label Emotion Existence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192899.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-A-1|PAPER Thu-P-10-A-1 — End-to-End Neural Speaker Diarization with Permutation-Free Objectives]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Neural Speaker Diarization with Permutation-Free Objectives</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191180.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-11|PAPER Thu-P-9-D-11 — Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-6|PAPER Mon-P-2-A-6 — Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193233.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-1|PAPER Tue-P-5-A-1 — Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-D-5|PAPER Mon-P-2-D-5 — Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191824.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-8|PAPER Tue-P-3-A-8 — Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-9|PAPER Tue-P-4-C-9 — Identifying Mood Episodes Using Dialogue Features from Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Identifying Mood Episodes Using Dialogue Features from Clinical Interviews</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191385.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-3-6-4|PAPER Tue-SS-3-6-4 — UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-3|PAPER Tue-P-5-C-3 — Improving Performance of End-to-End ASR on Numeric Sequences]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Performance of End-to-End ASR on Numeric Sequences</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|^<div class="cpauthorindexpersoncardpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191235.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-12|PAPER Tue-P-5-A-12 — Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-2-5|PAPER Tue-O-3-2-5 — Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192076.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-5-4|PAPER Tue-O-4-5-4 — Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-2|PAPER Wed-P-8-C-2 — Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-D-1|PAPER Wed-P-6-D-1 — Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191484.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-3|PAPER Wed-P-8-C-3 — Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191614.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-11|PAPER Tue-P-3-A-11 — A Chinese Dataset for Identifying Speakers in Novels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Chinese Dataset for Identifying Speakers in Novels</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191563.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-3-3|PAPER Wed-O-6-3-3 — Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-8|PAPER Thu-P-9-D-8 — Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-8|PAPER Tue-SS-4-4-8 — Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191704.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-8-5-5|PAPER Wed-O-8-5-5 — Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191577.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-4-C-11|PAPER Tue-P-4-C-11 — Conversational Emotion Analysis via Attention Mechanisms]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Conversational Emotion Analysis via Attention Mechanisms</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-1|PAPER Thu-O-9-4-1 — Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-B-6|PAPER Wed-P-7-B-6 — Keyword Spotting for Hearing Assistive Devices Robust to External Speakers]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Keyword Spotting for Hearing Assistive Devices Robust to External Speakers</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191831.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-C-7|PAPER Wed-P-7-C-7 — Development of Emotion Rankers Based on Intended and Perceived Emotion Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Development of Emotion Rankers Based on Intended and Perceived Emotion Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191554.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-4|PAPER Thu-O-9-2-4 — Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192203.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-5|PAPER Thu-P-10-B-5 — Self-Attention Transducers for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Transducers for End-to-End Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191554.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-4|PAPER Thu-O-9-2-4 — Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192203.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-B-5|PAPER Thu-P-10-B-5 — Self-Attention Transducers for End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Self-Attention Transducers for End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191940.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-7|PAPER Thu-P-10-E-7 — Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-10-3-3|PAPER Thu-O-10-3-3 — Vowels and Diphthongs in the Xupu Xiang Chinese Dialect]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Vowels and Diphthongs in the Xupu Xiang Chinese Dialect</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-E-11|PAPER Mon-P-1-E-11 — Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-D-8|PAPER Thu-P-9-D-8 — Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191606.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-7|PAPER Thu-P-9-A-7 — An Effective Deep Embedding Learning Architecture for Speaker Verification]]</div>|^<div class="cpauthorindexpersoncardpapertitle">An Effective Deep Embedding Learning Architecture for Speaker Verification</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-4-5|PAPER Thu-O-9-4-5 — Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191290.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-2|PAPER Tue-P-5-C-2 — Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-C-5|PAPER Wed-P-8-C-5 — Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191400.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-12|PAPER Thu-P-10-C-12 — A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191683.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-C-11|PAPER Tue-P-3-C-11 — Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191302.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-7-E-9|PAPER Wed-P-7-E-9 — Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|^<div class="cpauthorindexpersoncardpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191567.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-2|PAPER Tue-P-3-E-2 — UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191337.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-5-6-2|PAPER Tue-SS-5-6-2 — Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192320.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-5-4|PAPER Thu-O-9-5-4 — Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193191.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-A-2|PAPER Tue-P-5-A-2 — Building a Mixed-Lingual Neural TTS System with Only Monolingual Data]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Building a Mixed-Lingual Neural TTS System with Only Monolingual Data</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-2-4|PAPER Mon-O-2-2-4 — Speaker Adaptation for Attention-Based End-to-End Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speaker Adaptation for Attention-Based End-to-End Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193056.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-5-C-14|PAPER Tue-P-5-C-14 — Acoustic-to-Phrase Models for Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic-to-Phrase Models for Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-4-6|PAPER Wed-S&T-4-6 — SANTLR: Speech Annotation Toolkit for Low Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">SANTLR: Speech Annotation Toolkit for Low Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191428.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-13|PAPER Thu-P-9-A-13 — Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Speech Translation with Knowledge Distillation</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192482.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-4-3-3|PAPER Tue-O-4-3-3 — Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191369.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-1-4|PAPER Tue-O-5-1-4 — Listener Preference on the Local Criterion for Ideal Binary-Masked Speech]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Listener Preference on the Local Criterion for Ideal Binary-Masked Speech</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192171.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-8-E-4|PAPER Wed-P-8-E-4 — A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|^<div class="cpauthorindexpersoncardpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-5|PAPER Mon-O-2-1-5 — A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192712.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-P-3-E-1|PAPER Tue-P-3-E-1 — Speech Augmentation via Speaker-Specific Noise in Unseen Environment]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Speech Augmentation via Speaker-Specific Noise in Unseen Environment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-SS-4-4-14|PAPER Tue-SS-4-4-14 — Deep Residual Neural Networks for Audio Spoofing Detection]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Residual Neural Networks for Audio Spoofing Detection</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|^<div class="cpauthorindexpersoncardpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193113.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-1-C-1|PAPER Mon-P-1-C-1 — Predicting Humor by Learning from Time-Aligned Comments]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Humor by Learning from Time-Aligned Comments</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193119.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-7-2-4|PAPER Wed-O-7-2-4 — Linguistically-Informed Training of Acoustic Word Embeddings for Low-Resource Languages]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Linguistically-Informed Training of Acoustic Word Embeddings for Low-Resource Languages</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191262.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-P-2-C-13|PAPER Mon-P-2-C-13 — A Comparison of Deep Learning Methods for Language Understanding]]</div>|^<div class="cpauthorindexpersoncardpapertitle">A Comparison of Deep Learning Methods for Language Understanding</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-6|PAPER Mon-O-2-5-6 — How to Annotate 100 Hours in 45 Minutes]]</div>|^<div class="cpauthorindexpersoncardpapertitle">How to Annotate 100 Hours in 45 Minutes</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-O-6-5-2|PAPER Wed-O-6-5-2 — Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192793.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Wed-P-6-B-2|PAPER Wed-P-6-B-2 — Detection and Recovery of OOVs for Improved English Broadcast News Captioning]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Detection and Recovery of OOVs for Improved English Broadcast News Captioning</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-O-9-2-1|PAPER Thu-O-9-2-1 — Advancing Sequence-to-Sequence Based Speech Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Advancing Sequence-to-Sequence Based Speech Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191522.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Thu-P-9-A-9|PAPER Thu-P-9-A-9 — Two-Stage Training for Chinese Dialect Recognition]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Two-Stage Training for Chinese Dialect Recognition</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models</div> |
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Tue-O-5-4-5|PAPER Tue-O-5-4-5 — Discovering Dialog Rules by Means of an Evolutionary Approach]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Discovering Dialog Rules by Means of an Evolutionary Approach</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|^<div class="cpauthorindexpersoncardpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cptablecelltopbottomspace2|k
|cpaidxauthortable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191705.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in author view}}</a> |^<div class="cpauthorindexpersoncardpapercode">[[Mon-O-1-5-1|PAPER Mon-O-1-5-1 — High Quality, Lightweight and Adaptable TTS Using LPCNet]]</div>|^<div class="cpauthorindexpersoncardpapertitle">High Quality, Lightweight and Adaptable TTS Using LPCNet</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cpconfinfotable|k
|^<a href="./IS2019/HTML/ABSBOOK.PDF#page1" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in confinfo view}}</a>|^Program and Abstract Book |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}}
</p></div>
<div class="cpcopyrightpage">{{$:/causal/publication/Copyright Statement}}</div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Keiichi Tokuda|AUTHOR Keiichi Tokuda]]
</p><p class="cpabstractcardaffiliationlist">Nagoya Institute of Technology, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>The basic problem of statistical speech synthesis is quite simple: we have a speech database for training, i.e., a set of speech waveforms and corresponding texts; given a text not included in the training data, what is the speech waveform corresponding to the text?  The whole text-to-speech generation process is decomposed into feasible subproblems: usually, text analysis, acoustic modeling, and waveform generation, combined as a statistical generative model.  Each submodule can be modeled by a statistical machine learning technique: first, hidden Markov models were applied to acoustic modeling module and then various types of deep neural networks (DNN) have been applied to not only acoustic modeling module but also other modules.  I will give an overview of such statistical approaches to speech synthesis, looking back on the evolution in the last couple of decades.  Recent DNN-based approaches drastically improved the speech quality, causing a paradigm shift from concatenative speech synthesis approach to generative model-based statistical approach.  However, for realizing human-like talking machines, the goal is not only to generate natural-sounding speech but also to flexibly control variations in speech, such as speaker identities, speaking styles, emotional expressions, etc.  This talk will also discuss such future challenges and the direction in speech synthesis Research.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ralf Schlüter|AUTHOR Ralf Schlüter]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>The general architecture and modeling of the state-of-the-art statistical approach to automatic speech recognition (ASR) have not been challenged significantly for decades. The classical statistical approach to ASR is based on Bayes decision rule, a separation of acoustic and language modeling, hidden Markov modeling (HMM), and a search organization based on dynamic programming and hypothesis pruning methods. Even when artificial neural networks for acoustic modeling and language modeling started to considerably boost ASR performance, the general architecture of state-of-the-art ASR systems was not altered considerably. The hybrid deep neural network (DNN)/HMM approach, together with recurrent long short-term memory (LSTM) neural network language modeling currently marks the state-of-the-art on many tasks, covering a wide range of training set sizes. However, currently more and more alternative approaches occur, moving gradually towards so-called end-to-end approaches. Gradually, these novel end-to-end approaches replace explicit time alignment modeling and dedicated search space organization by more implicit, integrated neural-network based representations, while also dropping the separation between acoustic and language modeling. Corresponding approaches show promising results, especially using large training sets. In this presentation, an overview of current modeling approaches to ASR will be given, including variations of both HMM-based and end-to-end modeling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ngoc-Quan Pham|AUTHOR Ngoc-Quan Pham]], [[Thai-Son Nguyen|AUTHOR Thai-Son Nguyen]], [[Jan Niehues|AUTHOR Jan Niehues]], [[Markus Müller|AUTHOR Markus Müller]], [[Alex Waibel|AUTHOR Alex Waibel]]
</p><p class="cpabstractcardaffiliationlist">KIT, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 66–70&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, end-to-end sequence-to-sequence models for speech recognition have gained significant interest in the research community. While previous architecture choices revolve around time-delay neural networks (TDNN) and long short-term memory (LSTM) recurrent neural networks, we propose to use self-attention via the Transformer architecture as an alternative. Our analysis shows that deep Transformer networks with high learning capacity are able to exceed performance from previous end-to-end approaches and even match the conventional hybrid systems. Moreover, we trained very deep models with up to 48 Transformer layers for both encoder and decoders combined with stochastic residual connections, which greatly improve generalizability and training efficiency. The resulting models outperform all previous end-to-end ASR approaches on the Switchboard benchmark. An ensemble of these models achieve 9.9% and 17.7% WER on Switchboard and CallHome test sets respectively. This finding brings our end-to-end models to competitive levels with previous hybrid systems. Further, with model ensembling the Transformers can outperform certain hybrid systems, which are more complicated in terms of both structure and training procedure.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jason Li|AUTHOR Jason Li]]^^1^^, [[Vitaly Lavrukhin|AUTHOR Vitaly Lavrukhin]]^^1^^, [[Boris Ginsburg|AUTHOR Boris Ginsburg]]^^1^^, [[Ryan Leary|AUTHOR Ryan Leary]]^^1^^, [[Oleksii Kuchaiev|AUTHOR Oleksii Kuchaiev]]^^1^^, [[Jonathan M. Cohen|AUTHOR Jonathan M. Cohen]]^^1^^, [[Huyen Nguyen|AUTHOR Huyen Nguyen]]^^1^^, [[Ravi Teja Gadde|AUTHOR Ravi Teja Gadde]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NVIDIA, USA; ^^2^^New York University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 71–75&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we report state-of-the-art results on LibriSpeech among end-to-end speech recognition models without any external training data. Our model, Jasper, uses only 1D convolutions, batch normalization, ReLU, dropout, and residual connections. To improve training, we further introduce a new layer-wise optimizer called NovoGrad. Through experiments, we demonstrate that the proposed deep architecture performs as well or better than more complex choices. Our deepest Jasper variant uses 54 convolutional layers. With this architecture, we achieve 2.95% WER using a beam-search decoder with an external neural language model and 3.86% WER with a greedy decoder on LibriSpeech test-clean. We also report competitive results on Wall Street Journal and the Hub5’00 conversational evaluation datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Niko Moritz|AUTHOR Niko Moritz]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]
</p><p class="cpabstractcardaffiliationlist">MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 76–80&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In hybrid automatic speech recognition (ASR) systems, neural networks are used as acoustic models (AMs) to recognize phonemes that are composed to words and sentences using pronunciation dictionaries, hidden Markov models, and language models, which can be jointly represented by a weighted finite state transducer (WFST). The importance of capturing temporal context by an AM has been studied and discussed in prior work. In an end-to-end ASR system, however, all components are merged into a single neural network, i.e., the breakdown into an AM and the different parts of the WFST model is no longer possible. This implies that end-to-end neural network architectures have even stronger requirements for processing long contextual information. Bidirectional long short-term memory (BLSTM) neural networks have demonstrated state-of-the-art results in end-to-end ASR but are unsuitable for streaming applications. Latency-controlled BLSTMs account for this by limiting the future context seen by the backward directed recurrence using chunk-wise processing. In this paper, we propose two new unidirectional neural network architectures, the time-delay LSTM (TDLSTM) and the parallel time-delayed LSTM (PTDLSTM) streams, which both limit the processing latency to a fixed size and demonstrate significant improvements compared to prior art on a variety of ASR tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yonatan Belinkov|AUTHOR Yonatan Belinkov]]^^1^^, [[Ahmed Ali|AUTHOR Ahmed Ali]]^^2^^, [[James Glass|AUTHOR James Glass]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^HBKU, Qatar</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 81–85&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end neural network systems for automatic speech recognition (ASR) are trained from acoustic features to text transcriptions. In contrast to modular ASR systems, which contain separately-trained components for acoustic modeling, pronunciation lexicon, and language modeling, the end-to-end paradigm is both conceptually simpler and has the potential benefit of training the entire system on the end task. However, such neural network models are more opaque: it is not clear how to interpret the role of different parts of the network and what information it learns during training. In this paper, we analyze the learned internal representations in an end-to-end ASR model. We evaluate the representation quality in terms of several classification tasks, comparing phonemes and graphemes, as well as different articulatory features. We study two languages (English and Arabic) and three datasets, finding remarkable consistency in how different properties are represented in different layers of the deep neural network.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naohiro Tawara|AUTHOR Naohiro Tawara]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]], [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]]
</p><p class="cpabstractcardaffiliationlist">Waseda University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 86–90&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the use of time-domain convolutional denoising autoencoders (TCDAEs) with multiple channels as a method of speech enhancement. In general, denoising autoencoders (DAEs), deep learning systems that map noise-corrupted into clean waveforms, have been shown to generate high-quality signals while working in the time domain without the intermediate stage of phase modeling. Convolutional DAEs are one of the popular structures which learns a mapping between noise-corrupted and clean waveforms with convolutional denoising autoencoder. Multi-channel signals for TCDAEs are promising because the different times of arrival of a signal can be directly processed with their convolutional structure, Up to this time, TCDAEs have only been applied to single-channel signals. This paper explorers the effectiveness of TCDAEs in a multi-channel configuration. A multi-channel TCDAEs are evaluated on multi-channel speech enhancement experiments, yielding significant improvement over single-channel DAEs in terms of signal-to-distortion ratio, perceptual evaluation of speech quality (PESQ), and word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kristina Tesch|AUTHOR Kristina Tesch]], [[Robert Rehr|AUTHOR Robert Rehr]], [[Timo Gerkmann|AUTHOR Timo Gerkmann]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 91–95&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Using multiple microphones for speech enhancement allows for exploiting spatial information for improved performance. In most cases, the spatial filter is selected to be a linear function of the input as, for example, the minimum variance distortionless response (MVDR) beamformer. For non-Gaussian distributed noise, however, the minimum mean square error (MMSE) optimal spatial filter may be nonlinear.

Potentially, such nonlinear functional relationships could be learned by deep neural networks. However, the performance would depend on many parameters and the architecture of the neural network. Therefore, in this paper, we more generally analyze the potential benefit of nonlinear spatial filters as a function of the multivariate kurtosis of the noise distribution.

The results imply that using a nonlinear spatial filter is only worth the effort if the noise data follows a distribution with a multivariate kurtosis that is considerably higher than for a Gaussian. In this case, we report a performance difference of up to 2.6 dB segmental signal-to-noise ratio (SNR) improvement for artificial stationary noise. We observe an advantage of 1.2dB for the nonlinear spatial filter over the linear one even for real-world noise data from the CHiME-3 dataset given oracle data for parameter estimation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Juan M. Martín-Doñas|AUTHOR Juan M. Martín-Doñas]]^^1^^, [[Jens Heitkaemper|AUTHOR Jens Heitkaemper]]^^2^^, [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]^^2^^, [[Angel M. Gomez|AUTHOR Angel M. Gomez]]^^1^^, [[Antonio M. Peinado|AUTHOR Antonio M. Peinado]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad de Granada, Spain; ^^2^^Universität Paderborn, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 96–100&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper deals with multi-channel speech recognition in scenarios with multiple speakers. Recently, the spectral characteristics of a target speaker, extracted from an adaptation utterance, have been used to guide a neural network mask estimator to focus on that speaker. In this work we present two variants of speaker-aware neural networks, which exploit both spectral and spatial information to allow better discrimination between target and interfering speakers. Thus, we introduce either a spatial pre-processing prior to the mask estimation or a spatial plus spectral speaker characterization block whose output is directly fed into the neural mask estimator. The target speaker’s spectral and spatial signature is extracted from an adaptation utterance recorded at the beginning of a session. We further adapt the architecture for low-latency processing by means of block-online beamforming that recursively updates the signal statistics. Experimental results show that the additional spatial information clearly improves source extraction, in particular in the same-gender case, and that our proposal achieves state-of-the-art performance in terms of distortion reduction and recognition accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Saeed Bagheri|AUTHOR Saeed Bagheri]], [[Daniele Giacobello|AUTHOR Daniele Giacobello]]
</p><p class="cpabstractcardaffiliationlist">Sonos, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 101–105&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a practical implementation of the parametric multi-channel Wiener filter (PMWF) noise reduction algorithm. In particular, we extend on methods that incorporate the multi-channel speech presence probability (MC-SPP) in the PMWF derivation and its output. The use of the MC-SPP brings several advantages. Firstly, the MC-SPP allows for better estimates of noise and speech statistics, for which we derive a direct update of the inverse of the noise power spectral density (PSD). Secondly, the MC-SPP is used to control the trade-off parameter in PMWF which, with proper tuning, outperforms the traditional approach with a fixed trade-off parameter. Thirdly, the MC-SPP for each frequency-band is used to obtain the MMSE estimate of the desired speech signal at the output, where we control the maximum amount of noise reduction based on our application. Experimental results on a large number of simulated scenarios show significant benefits of employing MC-SPP in terms of SNR improvements and speech distortion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masahito Togami|AUTHOR Masahito Togami]], [[Tatsuya Komatsu|AUTHOR Tatsuya Komatsu]]
</p><p class="cpabstractcardaffiliationlist">LINE, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 106–110&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a multi-channel speech dereverberation method which can reduce reverberation even when acoustic transfer functions (ATFs) are time varying under noisy environments. The microphone input signal is modeled as a convolutive mixture in a time-frequency domain so as to incorporate late reverberation whose tap length is longer than frame size of short term Fourier transform. To reduce reverberation effectively under the time-varying ATF conditions, the proposed method extends the deterministic convolutive transfer function (D-CTF) into a probabilistic convolutive transfer function (P-CTF). A variational Bayesian framework was applied to approximation of a joint posterior probability density functions of a speech source signal and the ATFs. Variational posterior probability density functions and the other parameters are iteratively updated so as to maximize an evidence lower bound (ELBO). Experimental results when the ATFs are time-varying and there is background noise showed that the proposed method can reduce reverberation more accurately than the Weighted Prediction error (WPE) and the Kalman-EM for dereverberation (KEMD).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 111–115&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This article presents frame-by-frame online processing algorithms for a Weighted Power minimization Distortionless response convolutional beamformer (WPD). The WPD unifies widely-used multichannel dereverberation and denoising methods, namely a weighted prediction error based dereverberation method (WPE) and a minimum power distortionless response beamformer (MPDR) into a single convolutional beamformer, and achieves simultaneous dereverberation and denoising based on maximum likelihood estimation. We derive two different online algorithms, one based on frame-by-frame recursive updating of the spatio-temporal covariance matrix of the captured signal, and the other on recursive least square estimation of the convolutional beamformer. In addition, for both algorithms, the desired signal’s relative transfer function (RTF) is estimated by online processing using a neural network based online mask estimation. Experiments using the REVERB challenge dataset show the effectiveness of both algorithms in terms of objective speech enhancement measures and automatic speech recognition (ASR) performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cathryn Snyder|AUTHOR Cathryn Snyder]], [[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 116–120&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phonetic imitation, or implicitly matching the acoustic-phonetic patterns of another speaker, has been empirically associated with natural tendencies to promote successful social communication, as well as individual differences in personality and cognitive processing style. The present study explores whether individual differences in cognitive processing style, as indexed by self-reported scored from the Autism-Spectrum Quotient (AQ) questionnaire, are linked to the way people imitate the vocal productions by two digital device voices (i.e., Apple’s Siri) and two human voices. Subjects first performed a word shadowing task of human and device voices and then completed the self-administered AQ. We assessed imitation of two acoustic properties: f0 and vowel duration. We find that the attention to detail and the imagination subscale scores on the AQ mediated degree of imitation of f0 and vowel duration, respectively. The findings yield new insight to speech production and perception mechanisms and how it interacts with individual cognitive processing style differences.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 121–125&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Estimating speech representations from articulatory movements is known as articulatory-to-acoustic forward (AAF) mapping. Typically this mapping is learned using directly measured articulatory movement in a subject-specific manner. Such AAF mapping has been shown to benefit the speech synthesis applications. In this work, we investigate the speaker similarity and naturalness of utterances generated by AAF which is driven by the articulatory movements from a subject (referred to as cross speaker) different from the speaker (target speaker) used for training AAF mapping. Experiments are performed with directly measured articulatory data from 9 speakers (8 target speakers and 1 cross speaker), which are recorded using Electromagnetic articulograph AG501. Experiments are also performed with articulatory features estimated using speaker independent acoustic-to-articulatory inversion (SI-AAI) model trained on 26 reference speakers. Objective evaluation on target speakers reveal that the articulatory features estimated from SI-AAI result in a lower Mel-cepstrum distortion compared to that using directly measured articulatory features. Further, listening tests reveal that the directly measured articulatory movements preserve the speaker similarity better than estimated ones. Although, for naturalness, articulatory movements predicted by SI-AAI perform better than the direct measurements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaohan Zhang|AUTHOR Xiaohan Zhang]], [[Chongke Bi|AUTHOR Chongke Bi]], [[Kiyoshi Honda|AUTHOR Kiyoshi Honda]], [[Wenhuan Lu|AUTHOR Wenhuan Lu]], [[Jianguo Wei|AUTHOR Jianguo Wei]]
</p><p class="cpabstractcardaffiliationlist">Tianjin University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 126–130&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study examines how the speaker’s tongue size contributes to generating dynamic characteristics of speaker individuality. The relative tongue size (RTS) has been proposed as an index for the tongue area within the oropharyngeal cavity on the midsagittal magnetic resonance imaging (MRI). Our earlier studies have shown that the smaller the RTS, the faster the tongue movement. In this study, acoustic consequences of individual RTS values were analyzed by comparing tongue movement velocity and formant transition rate. The materials used were cine-MRI data and acoustic signals during production of a sentence and two words produced by two female speakers with contrasting RTS values. The results indicate that the speaker with the small RTS value exhibited the faster changes of tongue positions and formant transitions than the speakers with the large RTS values. Since the tongue size is uncontrollable by a speaker’s intention, the RTS can be regarded as one of the causal factors of dynamic individual characteristics in the lower frequency region of speech signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tsukasa Yoshinaga|AUTHOR Tsukasa Yoshinaga]]^^1^^, [[Kazunori Nozaki|AUTHOR Kazunori Nozaki]]^^2^^, [[Shigeo Wada|AUTHOR Shigeo Wada]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Toyohashi University of Technology, Japan; ^^2^^Osaka University Dental Hospital, Japan; ^^3^^Osaka University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 131–135&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To clarify the individual differences of flow and sound characteristics of sibilant /s/, the large eddy simulation of compressible flow was applied to vocal tract geometries of five subjects pronouncing /s/. The vocal tract geometry was extracted by separately collecting images of digital dental casts and the vocal tract of /s/. The computational grids were constructed for each geometry, and flow and acoustic fields were predicted by the simulation. Results of the simulation showed that jet flow in the vocal tract was disturbed and fluctuated, and the sound source of /s/ was generated in different place for each subject. With an increment of the jet velocity, not only the overall sound amplitude but also the spectral mean was increased, indicating that the increment of the jet velocity contributes to the increase of amplitudes in a higher frequency range among different vocal tract geometries.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shashwat Uttam|AUTHOR Shashwat Uttam]]^^1^^, [[Yaman Kumar|AUTHOR Yaman Kumar]]^^2^^, [[Dhruva Sahrawat|AUTHOR Dhruva Sahrawat]]^^3^^, [[Mansi Aggarwal|AUTHOR Mansi Aggarwal]]^^4^^, [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]^^3^^, [[Debanjan Mahata|AUTHOR Debanjan Mahata]]^^5^^, [[Amanda Stent|AUTHOR Amanda Stent]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NSUT, India; ^^2^^Adobe, India; ^^3^^IIIT Delhi, India; ^^4^^DTU, India; ^^5^^Bloomberg, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 136–140&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech Reconstruction is the task of recreation of speech using silent videos as input. In the literature, it is also referred to as  lipreading. In this paper, we design an encoder-decoder architecture which takes silent videos as input and outputs an audio spectrogram of the reconstructed speech. The model, despite being a speaker-independent model, achieves comparable results on speech reconstruction to the current state-of-the-art  speaker-dependent model. We also perform user studies to infer speech intelligibility. Additionally, we test the usability of the trained model using bilingual speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pramit Saha|AUTHOR Pramit Saha]], [[Muhammad Abdul-Mageed|AUTHOR Muhammad Abdul-Mageed]], [[Sidney Fels|AUTHOR Sidney Fels]]
</p><p class="cpabstractcardaffiliationlist">University of British Columbia, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 141–145&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech-related Brain Computer Interface (BCI) technologies provide effective vocal communication strategies for controlling devices through speech commands interpreted from brain signals. In order to infer imagined speech from active thoughts, we propose a novel hierarchical deep learning BCI system for subject-independent classification of 11 speech tokens including phonemes and words. Our novel approach exploits predicted articulatory information of six phonological categories (e.g., nasal, bilabial) as an intermediate step for classifying the phonemes and words, thereby finding discriminative signal responsible for natural speech synthesis. The proposed network is composed of hierarchical combination of spatial and temporal CNN cascaded with a deep autoencoder. Our best models on the KARA database achieve an average accuracy of 83.42% across the six different binary phonological classification tasks, and 53.36% for the individual token identification task, significantly outperforming our baselines. Ultimately, our work suggests the possible existence of a brain imagery footprint for the underlying articulatory movement related to different sounds that can be used to aid imagined speech decoding.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-An Chung|AUTHOR Yu-An Chung]], [[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[Hao Tang|AUTHOR Hao Tang]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 146–150&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a novel unsupervised autoregressive neural model for learning generic speech representations. In contrast to other speech representation learning methods that aim to remove noise or speaker variabilities, ours is designed to preserve information for a wide range of downstream tasks. In addition, the proposed model does not require any phonetic or word boundary labels, allowing the model to benefit from large quantities of unlabeled data. Speech representations learned by our model significantly improve performance on both phone classification and speaker verification over the surface features and other supervised and unsupervised approaches. Further analysis shows that different levels of speech information are captured by our model at different layers. In particular, the lower layers tend to be more discriminative for speakers, while the upper layers provide more phonetic content.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Feng Huang|AUTHOR Feng Huang]], [[Peter Balazs|AUTHOR Peter Balazs]]
</p><p class="cpabstractcardaffiliationlist">Austrian Academy of Sciences, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 151–155&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose harmonic-aligned frame mask for speech signals using non-stationary Gabor transform (NSGT). A frame mask operates on the transfer coefficients of a signal and consequently converts the signal into a counterpart signal. It depicts the difference between the two signals. In preceding studies, frame masks based on regular Gabor transform were applied to single-note instrumental sound analysis. This study extends the frame mask approach to speech signals. For voiced speech, the fundamental frequency is usually changing consecutively over time. We employ NSGT with pitch-dependent and therefore time-varying frequency resolution to attain harmonic alignment in the transform domain and hence yield harmonic-aligned frame masks for speech signals. We propose to apply the harmonic-aligned frame mask to content-dependent speaker comparison. Frame masks, computed from voiced signals of a same vowel but from different speakers, were utilized as similarity measures to compare and distinguish the speaker identities (SID). Results obtained with deep neural networks demonstrate that the proposed frame mask is valid in representing speaker characteristics and shows a potential for SID applications in limited data scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gurunath Reddy M.|AUTHOR Gurunath Reddy M.]], [[K. Sreenivasa Rao|AUTHOR K. Sreenivasa Rao]], [[Partha Pratim Das|AUTHOR Partha Pratim Das]]
</p><p class="cpabstractcardaffiliationlist">IIT Kharagpur, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 156–160&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Glottal closure instants (GCI) also called as instants of significant excitation occur during abrupt closure of vocal folds is a well-studied problem for its many potential applications in speech processing. Speech signal or its transformed linear prediction residual (LPR) is the most popular signal representations for GCI detection. In this paper, we propose a supervised classification based GCI detection method, in which, we train multiple convolution neural networks to determine the suitable feature representation for efficient GCI detection. Also, we show that the combined model trained with joint acoustic-residual deep features and the model trained with low pass filtered speech significantly increases the detection accuracy. We have manually annotated the speech signal for ground truth GCI using electroglottograph (EGG) as a reference signal. The evaluation results showed that the proposed model trained with very small and less diverse data performs significantly better than the traditional signal processing and most recent data-driven approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Santiago Pascual|AUTHOR Santiago Pascual]]^^1^^, [[Mirco Ravanelli|AUTHOR Mirco Ravanelli]]^^2^^, [[Joan Serrà|AUTHOR Joan Serrà]]^^3^^, [[Antonio Bonafonte|AUTHOR Antonio Bonafonte]]^^1^^, [[Yoshua Bengio|AUTHOR Yoshua Bengio]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universitat Politècnica de Catalunya, Spain; ^^2^^Université de Montréal, Canada; ^^3^^Telefónica I+D, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 161–165&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Learning good representations without supervision is still an open issue in machine learning, and is particularly challenging for speech signals, which are often characterized by long sequences with a complex hierarchical structure. Some recent works, however, have shown that it is possible to derive useful speech representations by employing a self-supervised encoder-discriminator approach. This paper proposes an improved self-supervised method, where a single neural encoder is followed by multiple workers that jointly solve different self-supervised tasks. The needed consensus across different tasks naturally imposes meaningful constraints to the encoder, contributing to discover general representations and to minimize the risk of learning superficial ones. Experiments show that the proposed approach can learn transferable, robust, and problem-agnostic features that carry on relevant information from the speech signal, such as speaker identity, phonemes, and even higher-level features such as emotional cues. In addition, a number of design choices make the encoder easily exportable, facilitating its direct usage or adaptation to different problems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bhanu Teja Nellore|AUTHOR Bhanu Teja Nellore]]^^1^^, [[Sri Harsha Dumpala|AUTHOR Sri Harsha Dumpala]]^^2^^, [[Karan Nathwani|AUTHOR Karan Nathwani]]^^3^^, [[Suryakanth V. Gangashetty|AUTHOR Suryakanth V. Gangashetty]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIIT Hyderabad, India; ^^2^^TCS Innovation Labs Mumbai, India; ^^3^^IIT Jammu, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 166–170&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The aim of the current study is to propose acoustic features for detection of nasals in continuous speech. Acoustic features that represent certain characteristics of speech production are extracted. Features representing excitation source characteristics are extracted using zero frequency filtering method. Features representing vocal tract system characteristics are extracted using zero time windowing method.

Feature sets are formed by combining certain subsets of the features mentioned above. These feature sets are evaluated for their representativeness of nasals in continuous speech in three different languages, namely, English, Hindi and Telugu. Results show that nasal detection is reliable and consistent across all the languages mentioned above.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aggelina Chatziagapi|AUTHOR Aggelina Chatziagapi]]^^1^^, [[Georgios Paraskevopoulos|AUTHOR Georgios Paraskevopoulos]]^^2^^, [[Dimitris Sgouropoulos|AUTHOR Dimitris Sgouropoulos]]^^1^^, [[Georgios Pantazopoulos|AUTHOR Georgios Pantazopoulos]]^^1^^, [[Malvina Nikandrou|AUTHOR Malvina Nikandrou]]^^1^^, [[Theodoros Giannakopoulos|AUTHOR Theodoros Giannakopoulos]]^^1^^, [[Athanasios Katsamanis|AUTHOR Athanasios Katsamanis]]^^1^^, [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]]^^2^^, [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Behavioral Signal Technologies, USA; ^^2^^Behavioral Signal Technologies, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 171–175&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we address the problem of data imbalance for the task of Speech Emotion Recognition (SER). We investigate conditioned data augmentation using Generative Adversarial Networks (GANs), in order to generate samples for underrepresented emotions. We adapt and improve a conditional GAN architecture to generate synthetic spectrograms for the minority class. For comparison purposes, we implement a series of signal-based data augmentation methods. The proposed GAN-based approach is evaluated on two datasets, namely IEMOCAP and FEEL-25k, a large multi-domain dataset. Results demonstrate a 10% relative performance improvement in IEMOCAP and 5% in FEEL-25k, when augmenting the minority classes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zvi Kons|AUTHOR Zvi Kons]], [[Slava Shechtman|AUTHOR Slava Shechtman]], [[Alex Sorin|AUTHOR Alex Sorin]], [[Carmel Rabinovitz|AUTHOR Carmel Rabinovitz]], [[Ron Hoory|AUTHOR Ron Hoory]]
</p><p class="cpabstractcardaffiliationlist">IBM, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 176–180&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a lightweight adaptable neural TTS system with high quality output. The system is composed of three separate neural network blocks: prosody prediction, acoustic feature prediction and Linear Prediction Coding Net as a neural vocoder. This system can synthesize speech with close to natural quality while running 3 times faster than real-time on a standard CPU.

The modular setup of the system allows for simple adaptation to new voices with a small amount of data.

We first demonstrate the ability of the system to produce high quality speech when trained on large, high quality datasets. Following that, we demonstrate its adaptability by mimicking unseen voices using 5 to 20 minutes long datasets with lower recording quality. Large scale Mean Opinion Score quality and similarity tests are presented, showing that the system can adapt to unseen voices with quality gap of 0.12 and similarity gap of 3% compared to natural speech for male voices and quality gap of 0.35 and similarity of gap of 9% for female voices.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jaime Lorenzo-Trueba|AUTHOR Jaime Lorenzo-Trueba]]^^1^^, [[Thomas Drugman|AUTHOR Thomas Drugman]]^^2^^, [[Javier Latorre|AUTHOR Javier Latorre]]^^3^^, [[Thomas Merritt|AUTHOR Thomas Merritt]]^^1^^, [[Bartosz Putrycz|AUTHOR Bartosz Putrycz]]^^1^^, [[Roberto Barra-Chicote|AUTHOR Roberto Barra-Chicote]]^^1^^, [[Alexis Moinet|AUTHOR Alexis Moinet]]^^1^^, [[Vatsal Aggarwal|AUTHOR Vatsal Aggarwal]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, UK; ^^2^^Amazon, Germany; ^^3^^Apple, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 181–185&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper explores the potential universality of neural vocoders. We train a WaveRNN-based vocoder on 74 speakers coming from 17 languages. This vocoder is shown to be capable of generating speech of consistently good quality (98% relative mean MUSHRA when compared to natural speech) regardless of whether the input spectrogram comes from a speaker or style seen during training or from an out-of-domain scenario when the recording conditions are studio-quality. When the recordings show significant changes in quality, or when moving towards non-speech vocalizations or singing, the vocoder still significantly outperforms speaker-dependent vocoders, but operates at a lower average relative MUSHRA of 75%. These results are shown to be consistent across languages, regardless of them being seen during training (e.g. English or Japanese) or unseen (e.g. Wolof, Swahili, Ahmaric).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Paarth Neekhara|AUTHOR Paarth Neekhara]], [[Chris Donahue|AUTHOR Chris Donahue]], [[Miller Puckette|AUTHOR Miller Puckette]], [[Shlomo Dubnov|AUTHOR Shlomo Dubnov]], [[Julian McAuley|AUTHOR Julian McAuley]]
</p><p class="cpabstractcardaffiliationlist">University of California at San Diego, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 186–190&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent approaches in text-to-speech (TTS) synthesis employ neural network strategies to vocode perceptually-informed spectrogram representations directly into listenable waveforms. Such vocoding procedures create a computational bottleneck in modern TTS pipelines. We propose an alternative approach which utilizes generative adversarial networks (GANs) to learn mappings from perceptually-informed spectrograms to simple magnitude spectrograms which can be heuristically vocoded. Through a user study, we show that our approach significantly outperforms naïve vocoding strategies while being hundreds of times faster than neural network vocoders used in state-of-the-art TTS systems. We also show that our method can be used to achieve state-of-the-art results in unsupervised synthesis of individual words of speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ahmed Mustafa|AUTHOR Ahmed Mustafa]]^^1^^, [[Arijit Biswas|AUTHOR Arijit Biswas]]^^2^^, [[Christian Bergler|AUTHOR Christian Bergler]]^^3^^, [[Julia Schottenhamml|AUTHOR Julia Schottenhamml]]^^3^^, [[Andreas Maier|AUTHOR Andreas Maier]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fraunhofer IIS, Germany; ^^2^^Dolby, Germany; ^^3^^FAU Erlangen-Nürnberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 191–195&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/1195" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Classical parametric speech coding techniques provide a compact representation for speech signals. This affords a very low transmission rate but with a reduced perceptual quality of the reconstructed signals. Recently, autoregressive deep generative models such as WaveNet and SampleRNN have been used as speech vocoders to scale up the perceptual quality of the reconstructed signals without increasing the coding rate. However, such models suffer from a very slow signal generation mechanism due to their sample-by-sample modelling approach. In this work, we introduce a new methodology for neural speech vocoding based on generative adversarial networks (GANs). A fake speech signal is generated from a very compressed representation of the glottal excitation using conditional GANs as a deep generative model. This fake speech is then refined using the LPC parameters of the original speech signal to obtain a natural reconstruction. The reconstructed speech waveforms based on this approach show a higher perceptual quality than the classical vocoder counterparts according to subjective and objective evaluation scores for a dataset of 30 male and female speakers. Moreover, the usage of GANs enables to generate signals in one-shot compared to autoregressive generative models. This makes GANs promising for exploration to implement high-quality neural vocoders.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]]
</p><p class="cpabstractcardaffiliationlist">Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 196–200&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a quasi-periodic neural network (QPNet) vocoder with a novel network architecture named pitch-dependent dilated convolution (PDCNN) to improve the pitch controllability of WaveNet (WN) vocoder. The effectiveness of the WN vocoder to generate high-fidelity speech samples from given acoustic features has been proved recently. However, because of the fixed dilated convolution and generic network architecture, the WN vocoder hardly generates speech with given F,,0,, values which are outside the range observed in training data. Consequently, the WN vocoder lacks the pitch controllability which is one of the essential capabilities of conventional vocoders. To address this limitation, we propose the PDCNN component which has the time-variant adaptive dilation size related to the given F,,0,, values and a cascade network structure of the QPNet vocoder to generate quasi-periodic signals such as speech. Both objective and subjective tests are conducted, and the experimental results demonstrate the better pitch controllability of the QPNet vocoder compared to the same and double sized WN vocoders while attaining comparable speech qualities.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaohai Tian|AUTHOR Xiaohai Tian]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^2^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^NTU, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 201–205&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a typical voice conversion system, vocoder is commonly used for speech-to-features analysis and features-to-speech synthesis. However, vocoder can be a source of speech quality degradation. This paper presents a novel approach to voice conversion using WaveNet for non-parallel training data. Instead of reconstructing speech with intermediate features, the proposed approach utilizes the WaveNet to map the Phonetic PosteriorGrams (PPGs) to the waveform samples directly. In this way, we avoid the estimation errors arising from vocoding and feature conversion. Additionally, as PPG is assumed to be speaker independent, the proposed approach also reduces the feature mismatch problem in WaveNet vocoder based solutions. Experimental results conducted on the CMU-ARCTIC database show that the proposed approach significantly outperforms the traditional vocoder and WaveNet Vocoder baselines in terms of speech quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kyu J. Han|AUTHOR Kyu J. Han]]^^1^^, [[Ramon Prieto|AUTHOR Ramon Prieto]]^^2^^, [[Tao Ma|AUTHOR Tao Ma]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ASAPP, USA; ^^2^^ASAPP, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Attention is to let neural layers pay more attention to what is relevant to a given task while giving less attention to what is less important, and since its introduction in 2015 for machine translation, has been successfully applied to speech applications in a number of different forms. This survey presents how the attention mechanisms have been applied to speech and speaker recognition tasks. The attention mechanism was firstly applied to sequence-to-sequence speech recognition and later became the critical part of Google’s well-known Listen, Attend and Spell ASR system. In the framework of hybrid DNN/HMM approaches or CTC-based ASR systems, the attention mechanisms recently started to get more traction in the form of self-attention. In a speaker recognition perspective, the attention mechanisms have been utilized to improve the capability of representing speaker characteristics in neural outputs, mostly in the form of attentive pooling. In this survey we detail the attentive strategies that have been successful in both speech and speaker recognition tasks, and discuss challenging issues in practice.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziping Zhao|AUTHOR Ziping Zhao]]^^1^^, [[Zhongtian Bao|AUTHOR Zhongtian Bao]]^^2^^, [[Zixing Zhang|AUTHOR Zixing Zhang]]^^3^^, [[Nicholas Cummins|AUTHOR Nicholas Cummins]]^^4^^, [[Haishuai Wang|AUTHOR Haishuai Wang]]^^2^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin Normal University, China; ^^2^^Tianjin Normal University, China; ^^3^^Imperial College London, UK; ^^4^^Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 206–210&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Discrete  speech emotion recognition (SER), the assignment of a single emotion label to an entire speech utterance, is typically performed as a sequence-to-label task. This approach, however, is limited, in that it can result in models that do not capture temporal changes in the speech signal, including those indicative of a particular emotion. One potential solution to overcome this limitation is to model SER as a sequence-to-sequence task instead. In this regard, we have developed an attention-based  bidirectional long short-term memory (BLSTM) neural network in combination with a  connectionist temporal classification (CTC) objective function (Attention-BLSTM-CTC) for SER. We also assessed the benefits of incorporating two contemporary attention mechanisms, namely component attention and quantum attention, into the CTC framework. To the best of the authors’ knowledge, this is the first time that such a hybrid architecture has been employed for SER.We demonstrated the effectiveness of our approach on the Interactive Emotional Dyadic Motion Capture (IEMOCAP) and FAU-Aibo Emotion corpora. The experimental results demonstrate that our proposed model outperforms current state-of-the-art approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jeng-Lin Li|AUTHOR Jeng-Lin Li]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 211–215&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A growing number of human-centered applications benefit from continuous advancements in the emotion recognition technology. Many emotion recognition algorithms have been designed to model multimodal behavior cues to achieve high performances. However, most of them do not consider the modulating factors of an individual’s personal attributes in his/her expressive behaviors. In this work, we propose a Personalized Attributes-Aware Attention Network (PAaAN) with a novel personalized attention mechanism to perform emotion recognition using speech and language cues. The attention profile is learned from embeddings of an individual’s profile, acoustic, and lexical behavior data. The profile embedding is derived using linguistics inquiry word count computed between the target speaker and a large set of movie scripts. Our method achieves the state-of-the-art 70.3% unweighted accuracy in a four class emotion recognition task on the IEMOCAP. Further analysis reveals that affect-related semantic categories are emphasized differently for each speaker in the corpus showing the effectiveness of our attention mechanism for personalization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ascensión Gallardo-Antolín|AUTHOR Ascensión Gallardo-Antolín]]^^1^^, [[Juan Manuel Montero|AUTHOR Juan Manuel Montero]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad Carlos III de Madrid, Spain; ^^2^^Universidad Politécnica de Madrid, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 216–220&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Cognitive Load (CL) refers to the amount of mental demand that a given task imposes on an individual’s cognitive system and it can affect his/her productivity in very high load situations. In this paper, we propose an automatic system capable of classifying the CL level of a speaker by analyzing his/her voice. Our research on this topic goes into two main directions. In the first one, we focus on the use of Long Short-Term Memory (LSTM) networks with different weighted pooling strategies for CL level classification. In the second contribution, for overcoming the need of a large amount of training data, we propose a novel attention mechanism that uses the Kalinli’s auditory saliency model. Experiments show that our proposal outperforms significantly both, a baseline system based on Support Vector Machines (SVM) and a LSTM-based system with logistic regression attention model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adria Mallol-Ragolta|AUTHOR Adria Mallol-Ragolta]], [[Ziping Zhao|AUTHOR Ziping Zhao]], [[Lukas Stappen|AUTHOR Lukas Stappen]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 221–225&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The high prevalence of depression in society has given rise to a need for new digital tools that can aid its early detection. Among other effects, depression impacts the use of language. Seeking to exploit this, this work focuses on the detection of depressed and non-depressed individuals through the analysis of linguistic information extracted from transcripts of clinical interviews with a virtual agent. Specifically, we investigated the advantages of employing hierarchical attention-based networks for this task. Using Global Vectors (GloVe) pretrained word embedding models to extract low-level representations of the words, we compared hierarchical local-global attention networks and hierarchical contextual attention networks. We performed our experiments on the Distress Analysis Interview Corpus - Wizard of Oz (DAIC-WoZ) dataset, which contains audio, visual, and linguistic information acquired from participants during a clinical session. Our results using the DAIC-WoZ test set indicate that hierarchical contextual attention networks are the most suitable configuration to detect depression from transcripts. The configuration achieves an Unweighted Average Recall (UAR) of .66 using the test set, surpassing our baseline, a Recurrent Neural Network that does not use attention.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andrea Carmantini|AUTHOR Andrea Carmantini]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 226–230&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition models are highly susceptible to mismatch in the acoustic and language domains between the training and the evaluation data. For low resource languages, it is difficult to obtain transcribed speech for target domains, while untranscribed data can be collected with minimal effort. Recently, a method applying lattice-free maximum mutual information (LF-MMI) to untranscribed data has been found to be effective for semi-supervised training. However, weaker initial models and domain mismatch can result in high deletion rates for the semi-supervised model. Therefore, we propose a method to force the base model to overgenerate possible transcriptions, relying on the ability of LF-MMI to deal with uncertainty. On data from the IARPA MATERIAL programme, our new semi-supervised method outperforms the standard semi-supervised method, yielding significant gains when adapting for mismatched bandwidth and domain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christoph Lüscher|AUTHOR Christoph Lüscher]], [[Eugen Beck|AUTHOR Eugen Beck]], [[Kazuki Irie|AUTHOR Kazuki Irie]], [[Markus Kitza|AUTHOR Markus Kitza]], [[Wilfried Michel|AUTHOR Wilfried Michel]], [[Albert Zeyer|AUTHOR Albert Zeyer]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 231–235&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present state-of-the-art automatic speech recognition (ASR) systems employing a standard hybrid DNN/HMM architecture compared to an attention-based encoder-decoder design for the LibriSpeech task. Detailed descriptions of the system development, including model design, pretraining schemes, training schedules, and optimization approaches are provided for both system architectures. Both hybrid DNN/HMM and attention-based systems employ bi-directional LSTMs for acoustic modeling/encoding. For language modeling, we employ both LSTM and Transformer based architectures. All our systems are built using RWTH’s open-source toolkits RASR and RETURNN. To the best knowledge of the authors, the results obtained when training on the full LibriSpeech training set, are the best published currently, both for the hybrid DNN/HMM and the attention-based systems. Our single hybrid system even outperforms previous results obtained from combining eight single systems. Our comparison shows that on the LibriSpeech 960h task, the hybrid DNN/HMM system outperforms the attention-based system by 15% relative on the clean and 40% relative on the other test sets in terms of word error rate. Moreover, experiments on a reduced 100h-subset of the LibriSpeech training corpus even show a more pronounced margin between the hybrid DNN/HMM and attention-based architectures.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]]^^1^^, [[Shota Horiguchi|AUTHOR Shota Horiguchi]]^^1^^, [[Ryoichi Takashima|AUTHOR Ryoichi Takashima]]^^1^^, [[Yusuke Fujita|AUTHOR Yusuke Fujita]]^^1^^, [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Hitachi, Japan; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 236–240&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a novel auxiliary loss function for target-speaker automatic speech recognition (ASR). Our method automatically extracts and transcribes target speaker’s utterances from a monaural mixture of multiple speakers speech given a short sample of the target speaker. The proposed auxiliary loss function attempts to additionally maximize interference speaker ASR accuracy during training. This will regularize the network to achieve a better representation for speaker separation, thus achieving better accuracy on the target-speaker ASR. We evaluated our proposed method using two-speaker-mixed speech in various signal-to-interference-ratio conditions. We first built a strong target-speaker ASR baseline based on the state-of-the-art lattice-free maximum mutual information. This baseline achieved a word error rate (WER) of 18.06% on the test set while a normal ASR trained with clean data produced a completely corrupted result (WER of 84.71%). Then, our proposed loss further reduced the WER by 6.6% relative to this strong baseline, achieving a WER of 16.87%. In addition to the accuracy improvement, we also showed that the auxiliary output branch for the proposed loss can even be used for a secondary ASR for interference speakers’ speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhong Meng|AUTHOR Zhong Meng]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 241–245&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose three regularization-based speaker adaptation approaches to adapt the attention-based encoder-decoder (AED) model with very limited adaptation data from target speakers for end-to-end automatic speech recognition. The first method is Kullback-Leibler divergence (KLD) regularization, in which the output distribution of a speaker-dependent (SD) AED is forced to be close to that of the speaker-independent (SI) model by adding a KLD regularization to the adaptation criterion. To compensate for the asymmetric deficiency in KLD regularization, an adversarial speaker adaptation (ASA) method is proposed to regularize the deep-feature distribution of the SD AED through the adversarial learning of an auxiliary discriminator and the SD AED. The third approach is the multi-task learning, in which an SD AED is trained to jointly perform the primary task of predicting a large number of output units and an auxiliary task of predicting a small number of output units to alleviate the target sparsity issue. Evaluated on a Microsoft short message dictation task, all three methods are highly effective in adapting the AED model, achieving up to 12.2% and 3.0% word error rate improvement over an SI AED trained from 3400 hours data for supervised and unsupervised adaptation, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peidong Wang|AUTHOR Peidong Wang]]^^1^^, [[Jia Cui|AUTHOR Jia Cui]]^^2^^, [[Chao Weng|AUTHOR Chao Weng]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ohio State University, USA; ^^2^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 246–250&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end speech recognition systems are typically evaluated using the maximum a posterior criterion. Since only one hypothesis is involved during evaluation, the ideal number of hypotheses for training should also be one. In this study, we propose a large margin training scheme for attention based end-to-end speech recognition. Using only one training hypothesis, the large margin training strategy achieves the same performance as the minimum word error rate criterion using four hypotheses. The theoretical derivation in this study is widely applicable to other sequence discriminative criteria such as maximum mutual information. In addition, this paper provides a more succinct formulation of the large margin concept, paving the road towards a better combination of support vector machine and deep neural network. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Khoi-Nguyen C. Mac|AUTHOR Khoi-Nguyen C. Mac]]^^1^^, [[Xiaodong Cui|AUTHOR Xiaodong Cui]]^^2^^, [[Wei Zhang|AUTHOR Wei Zhang]]^^2^^, [[Michael Picheny|AUTHOR Michael Picheny]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Illinois at Urbana-Champaign, USA; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 251–255&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In automatic speech recognition (ASR), wideband (WB) and narrowband (NB) speech signals with different sampling rates typically use separate acoustic models. Therefore mixed-bandwidth (MB) acoustic modeling has important practical values for ASR system deployment. In this paper, we extensively investigate large-scale MB deep neural network acoustic modeling for ASR using 1,150 hours of WB data and 2,300 hours of NB data. We study various MB strategies including downsampling, upsampling and bandwidth extension for MB acoustic modeling and evaluate their performance on 8 diverse WB and NB test sets from various application domains. To deal with the large amounts of training data, distributed training is carried out on multiple GPUs using synchronous data parallelism.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Benjamin Milde|AUTHOR Benjamin Milde]], [[Chris Biemann|AUTHOR Chris Biemann]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 256–260&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a sparse sequence autoencoder model for unsupervised acoustic unit discovery, based on bidirectional LSTM encoders/decoders with a sparsity-inducing bottleneck. The sparsity layer is based on memory-augmented neural networks, with a differentiable embedding memory bank addressed from the encoder. The decoder reconstructs the encoded input feature sequence from an utterance-level context embedding and the bottleneck representation. At some time steps, the input to the decoder is randomly omitted by applying sequence dropout, forcing the decoder to learn about the temporal structure of the sequence. We propose a bootstrapping training procedure, after which the network can be trained end-to-end with standard back-propagation. Sparsity of the generated representation can be controlled with a parameter in the proposed loss function. We evaluate the units with the ABX discriminability on minimal triphone pairs and also on entire words. Forcing the network to favor highly sparse memory addressings in the memory component yields symbolic-like representations of speech that are very compact and still offer better ABX discriminability than MFCC.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lucas Ondel|AUTHOR Lucas Ondel]], [[Hari Krishna Vydana|AUTHOR Hari Krishna Vydana]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Jan Černocký|AUTHOR Jan Černocký]]
</p><p class="cpabstractcardaffiliationlist">Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 261–265&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work tackles the problem of learning a set of language specific acoustic units from unlabeled speech recordings given a set of labeled recordings from other languages. Our approach may be described by the following two steps procedure: first the model learns the notion of acoustic units from the labelled data and then the model uses its knowledge to find new acoustic units on the target language. We implement this process with the Bayesian Subspace Hidden Markov Model (SHMM), a model akin to the Subspace Gaussian Mixture Model (SGMM) where each low dimensional embedding represents an acoustic unit rather than just a HMM’s state. The subspace is trained on 3 languages from the GlobalPhone corpus (German, Polish and Spanish) and the AUs are discovered on the TIMIT corpus. Results, measured in equivalent Phone Error Rate, show that this approach significantly outperforms previous HMM based acoustic units discovery systems and compares favorably with the Variational Auto Encoder-HMM.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yosuke Higuchi|AUTHOR Yosuke Higuchi]], [[Naohiro Tawara|AUTHOR Naohiro Tawara]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]], [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]]
</p><p class="cpabstractcardaffiliationlist">Waseda University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 266–270&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a novel framework for extracting speaker-invariant features for zero-resource languages. A deep neural network (DNN)-based acoustic model is normalized against speakers via adversarial training: a multi-task learning process trains a shared bottleneck feature to be discriminative to phonemes and independent of speakers. However, owing to the absence of phoneme labels, zero-resource languages cannot employ adversarial multi-task (AMT) learning for speaker normalization. In this work, we obtain a posteriorgram from a Dirichlet process Gaussian mixture model (DPGMM) and utilize the posterior vector for supervision of the phoneme estimation in the AMT training. The AMT network is designed so that the DPGMM posteriorgram itself is embedded in a speaker-invariant feature space. The proposed network is expected to resolve the potential problem that the posteriorgram may lack reliability as a phoneme representation if the DPGMM components are intermingled with phoneme and speaker information. Based on the Zero Resource Speech Challenges, we conduct phoneme discriminant experiments on the extracted features. The results of the experiments show that the proposed framework extracts discriminative features, suppressing the variety in speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manasa Prasad|AUTHOR Manasa Prasad]]^^1^^, [[Daan van Esch|AUTHOR Daan van Esch]]^^1^^, [[Sandy Ritchie|AUTHOR Sandy Ritchie]]^^2^^, [[Jonas Fromseier Mortensen|AUTHOR Jonas Fromseier Mortensen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Google, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 271–275&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When building automatic speech recognition (ASR) systems, typically some amount of audio and text data in the target language is needed. While text data can be obtained relatively easily across many languages, transcribed audio data is challenging to obtain. This presents a barrier to making voice technologies available in more languages of the world. In this paper, we present a way to build an ASR system system for a language even in the absence of any audio training data in that language at all. We do this by simply re-using an existing acoustic model from a phonologically similar language, without any kind of modification or adaptation towards the target language. The basic insight is that, if two languages are sufficiently similar in terms of their phonological system, an acoustic model should hold up relatively well when used for another language. We describe how we tailor our pronunciation models to enable such re-use, and show experimental results across a number of languages from various language families. We also provide a theoretical analysis of situations in which this approach is likely to work. Our results show that it is possible to achieve less than 20% word error rate (WER) using this method. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Emmanuel Azuh|AUTHOR Emmanuel Azuh]], [[David Harwath|AUTHOR David Harwath]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 276–280&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a method for the discovery of word-like units and their approximate translations from visually grounded speech across multiple languages. We first train a neural network model to map images and their spoken audio captions in both English and Hindi to a shared, multimodal embedding space. Next, we use this model to segment and cluster regions of the spoken captions which approximately correspond to words. Finally, we exploit between-cluster similarities in the embedding space to associate English pseudo-word clusters with Hindi pseudo-word clusters, and show that many of these cluster pairings capture semantic translations between English and Hindi words. We present quantitative cross-lingual clustering results, as well as qualitative results in the form of a bilingual picture dictionary.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siyuan Feng|AUTHOR Siyuan Feng]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 281–285&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study tackles unsupervised subword modeling in the zero-resource scenario, learning frame-level speech representation that is phonetically discriminative and speaker-invariant, using only untranscribed speech for target languages. Frame label acquisition is an essential step in solving this problem. High quality frame labels should be in good consistency with golden transcriptions and robust to speaker variation. We propose to improve frame label acquisition in our previously adopted deep neural network-bottleneck feature (DNN-BNF) architecture by applying the factorized hierarchical variational autoencoder (FHVAE). FHVAEs learn to disentangle linguistic content and speaker identity information encoded in speech. By discarding or unifying speaker information, speaker-invariant features are learned and fed as inputs to DPGMM frame clustering and DNN-BNF training. Experiments conducted on ZeroSpeech 2017 show that our proposed approaches achieve 2.4% and 0.6% absolute ABX error rate reductions in across- and within-speaker conditions, comparing to the baseline DNN-BNF system without applying FHVAEs. Our proposed approaches significantly outperform vocal tract length normalization in improving frame labeling and subword modeling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shawn Nissen|AUTHOR Shawn Nissen]], [[Sharalee Blunck|AUTHOR Sharalee Blunck]], [[Anita Dromey|AUTHOR Anita Dromey]], [[Christopher Dromey|AUTHOR Christopher Dromey]]
</p><p class="cpabstractcardaffiliationlist">Brigham Young University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 286–290&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study evaluated listeners’ ability to identify the gender of preadolescent children from speech samples of varying length and linguistic context. The listeners were presented with a total of 190 speech samples in four different categories of linguistic context: segments, words, sentences, and discourse. The listeners were instructed to evaluate each speech sample and decide whether the speaker was a male or female and rate their level of confidence in their decision. Results showed listeners identified the gender of the speakers with a high degree of accuracy, ranging from 86% to 95%. Significant differences in listener judgments were found across the four levels of linguistic context, with segments having the lowest accuracy (83%) and discourse the highest accuracy (99%). At the segmental level, the listeners’ identification of each speaker’s gender was greater for vowels than for fricatives, with both types of phoneme being identified at a rate well above chance. Significant differences in identification were found between the /s/ and /ʃ/ fricatives, but not between the four corner vowels. The perception of gender is likely multifactorial, with listeners possibly using phonetic, prosodic, or stylistic speech cues to determine a speaker’s gender.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wiebke Ahlers|AUTHOR Wiebke Ahlers]]^^1^^, [[Philipp Meer|AUTHOR Philipp Meer]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Osnabrück, Germany; ^^2^^Universität Münster, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 291–295&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The retraction of /s/, particularly in /str/ clusters, toward [ʃ] has been investigated in British, Australian, and American English and shown to be conditioned phonetically and sociolinguistically. To date, however, no research exists on the retraction of /s/ in New Englishes, the nativized Englishes spoken in postcolonial territories like the Caribbean. We take up this research gap and present the results of a large-scale comparative acoustic analysis of /s/-retraction in Trinidadian English (TrinE) and American English (AmE), using Center of Gravity measurements of more than 23,500 sibilants produced by 181 speakers from two speech corpora.

The results show that, in TrinE, /str/ is considerably retracted toward [ʃtɹ], while all other /sC(r)/ clusters are non-retracted and acoustically close to singleton /s/; less retracted realizations of /str/ occur across word boundaries. Although a statistically significant contrast is overall maintained between /ʃ/ and the sibilant in /str/, there is considerable overlap across many speakers. The comparison between TrinE and AmE indicates that, while sibilants in TrinE overall show acoustically lower values, both varieties have in common that retraction is limited to /str/ contexts and significantly larger in younger speakers. The degree of /str/-retraction, however, is overall larger in TrinE than AmE.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michele Gubian|AUTHOR Michele Gubian]]^^1^^, [[Jonathan Harrington|AUTHOR Jonathan Harrington]]^^1^^, [[Mary Stevens|AUTHOR Mary Stevens]]^^1^^, [[Florian Schiel|AUTHOR Florian Schiel]]^^1^^, [[Paul Warren|AUTHOR Paul Warren]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LMU München, Germany; ^^2^^Victoria University of Wellington, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 296–300&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The focus of the study is the application of functional principal components analysis (FPCA) to a sound change in progress in which the  square and  near falling diphthongs are merging in New Zealand English. FPCA approximated the trajectory shapes of the first two formant frequencies (F1/F2) in a large acoustic database of read New Zealand English speech spanning three different age groups and two regions. The derived FPCA parameters showed a greater degree of centralisation and monophthongisation in  square than in  near. Compatibly with the evidence of an ongoing sound change in which  square is shifting towards  near, these shape differences were more marked for older than for younger/mid-age speakers. There was no effect of region nor of place of articulation of the preceding consonant; there was a trend for the merger to be more advanced in low frequency words. The study underlines the benefits of FPCA for quantifying the many types of sound changes involving subtle shifts in speech dynamics. In particular, multi-dimensional trajectory shape differences can be quantified without the need for vowel targets nor for determining the influence of the parameters — in this case of the first two formant frequencies — independently of each other.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Iona Gessinger|AUTHOR Iona Gessinger]]^^1^^, [[Bernd Möbius|AUTHOR Bernd Möbius]]^^1^^, [[Bistra Andreeva|AUTHOR Bistra Andreeva]]^^1^^, [[Eran Raveh|AUTHOR Eran Raveh]]^^1^^, [[Ingmar Steiner|AUTHOR Ingmar Steiner]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität des Saarlandes, Germany; ^^2^^audEERING, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 301–305&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper discusses phonetic accommodation of 20 native German speakers interacting with the simulated spoken dialogue system Mirabella in a Wizard-of-Oz experiment. The study examines intonation of wh-questions and pronunciation of allophonic contrasts in German. In a question-and-answer exchange with the system, the users produce predominantly falling intonation patterns for wh-questions when the system does so as well. The number of rising patterns on the part of the users increases significantly when Mirabella produces questions with rising intonation. In a map task, Mirabella provides information about hidden items while producing variants of two allophonic contrasts which are dispreferred by the users. For the [ɪç] vs. [ɪk] contrast in the suffix ⟨-ig⟩, the number of dispreferred variants on the part of the users increases significantly during the map task. For the [εː] vs. [eː] contrast as a realization of stressed ⟨-ä-⟩, such a convergence effect is not found on the group level, yet still occurs for some individual users. Almost every user converges to the system to a substantial degree for a subset of the examined features, but we also find maintenance of preferred variants and even occasional divergence. This individual variation is in line with previous findings in accommodation research.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]^^1^^, [[Jan Michalsky|AUTHOR Jan Michalsky]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern Denmark, Denmark; ^^2^^FAU Erlangen-Nürnberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 306–310&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Strong communication skills in public-speaking and team-working exercises are associated with specific acoustic-prosodic profiles and strategies. We hypothesize that analyzing and assessing these profiles and strategies allows us to predict communicative skills. To that end, we used two analysis methods, one for charismatic and persuasive public speaking (PASCAL), and one for cooperative communication (DPA). PASCAL and DPA competency scores are determined on an acoustic basis for speech recordings of 21 students whose task was to co-create, in 7 teams of 3 students, a fully functioning weather station over 14 weeks in an Electrical Engineering project course — and to jointly write a development report about it afterwards. Results show that the students’ PASCAL scores are significantly correlated with both the grade in their final oral project presentation and the grade of their written report as assessed by an independent lecturer group. The DPA scores correlate with better time-management and team working as well as with the quality and functionality of the designed product. Explanations for the links between student performance and acoustic competence scores are discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Michalsky|AUTHOR Jan Michalsky]]^^1^^, [[Heike Schoormann|AUTHOR Heike Schoormann]]^^2^^, [[Thomas Schultze|AUTHOR Thomas Schultze]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FAU Erlangen-Nürnberg, Germany; ^^2^^Carl von Ossietzky Universität Oldenburg, Germany; ^^3^^GAU Göttingen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 311–315&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prosodic features play a key role in a speaker’s persuasive power. However, previous studies on persuasion have been focused on public speaking and the signaling of leadership, while acoustic studies on negotiation have been primarily concerned with cooperative interactions. In this study we are taking a first step into investigating the role of acoustic-prosodic cues in competitive negotiation, focusing on f0 in same-sex negotiations. Specifically, we ask whether the prosodic correlates of persuasive speech are comparable for public speaking and negotiation. Sixty-two speakers (44f/18m) in 31 same-sex pairs participated in a competitive task to bargain over the selling price of a fictional company. We find a significant correlation between a speaker’s f0 features and his/her interlocutor’s concession range. In line with findings from public speaking, greater f0 excursions and higher f0 minima correlate with negotiation success. However, while the female speakers also show an expected elevated f0 mean, the opposite is the case for male speakers. We propose that in competitive negotiation, displaying dominance may overrule showing passion in contrast to public speaking, but only for male speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jacob Sager|AUTHOR Jacob Sager]], [[Ravi Shankar|AUTHOR Ravi Shankar]], [[Jacob Reinhold|AUTHOR Jacob Reinhold]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 316–320&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce the Varied Emotion in Syntactically Uniform Speech (VESUS) repository as a new resource for the speech community. VESUS is a lexically controlled database, in which a semantically neutral script is portrayed with different emotional inflections. In total, VESUS contains over 250 distinct phrases, each read by ten actors in five emotional states. We use crowd sourcing to obtain ten human ratings for the perceived emotional content of each utterance. Our unique database construction enables a multitude of scientific and technical explorations. To jumpstart this effort, we provide benchmark performance on three distinct emotion recognition tasks using VESUS: longitudinal speaker analysis, extrapolating across syntactical complexity, and generalization to a new speaker.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jia Xin Koh|AUTHOR Jia Xin Koh]]^^1^^, [[Aqilah Mislan|AUTHOR Aqilah Mislan]]^^2^^, [[Kevin Khoo|AUTHOR Kevin Khoo]]^^2^^, [[Brian Ang|AUTHOR Brian Ang]]^^2^^, [[Wilson Ang|AUTHOR Wilson Ang]]^^2^^, [[Charmaine Ng|AUTHOR Charmaine Ng]]^^1^^, [[Ying-Ying Tan|AUTHOR Ying-Ying Tan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^IMDA, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 321–325&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The National Speech Corpus (NSC) is the first large-scale Singapore English corpus spearheaded by the Info-communications and Media Development Authority of Singapore. It aims to become an important source of open speech data for automatic speech recognition (ASR) research and speech-related applications. The first release of the corpus features more than 2000 hours of orthographically transcribed read speech data designed with the inclusion of locally relevant words. It is available for public and commercial use upon request at “www.imda.gov.sg/nationalspeechcorpus”, under the Singapore Open Data License. An accompanying lexicon is currently in the works and will be published soon. In addition, another 1000 hours of conversational speech data will be made available in the near future under the second release of NSC. This paper reports on the development and collection process of the read speech and conversational speech corpora.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michael Picheny|AUTHOR Michael Picheny]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[George Saon|AUTHOR George Saon]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 326–330&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>There has been huge progress in speech recognition over the last several years. Tasks once thought extremely difficult, such as SWITCHBOARD, now approach levels of human performance. The MALACH corpus (LDC catalog LDC2012S05), a 375-Hour subset of a large archive of Holocaust testimonies collected by the Survivors of the Shoah Visual History Foundation, presents significant challenges to the speech community. The collection consists of unconstrained, natural speech filled with disfluencies, heavy accents, age-related coarticulations, un-cued speaker and language switching, and emotional speech - all still open problems for speech recognition systems. Transcription is challenging even for skilled human annotators. This paper proposes that the community place focus on the MALACH corpus to develop speech recognition systems that are more robust with respect to accents, disfluencies and emotional speech. To reduce the barrier for entry, a lexicon and training and testing setups have been created and baseline results using current deep learning technologies are presented. The metadata has just been released by LDC (LDC2019S11). It is hoped that this resource will enable the community to build on top of these baselines so that the extremely important information in these and related oral histories becomes accessible to a wider audience.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pravin Bhaskar Ramteke|AUTHOR Pravin Bhaskar Ramteke]]^^1^^, [[Sujata Supanekar|AUTHOR Sujata Supanekar]]^^1^^, [[Pradyoth Hegde|AUTHOR Pradyoth Hegde]]^^1^^, [[Hanna Nelson|AUTHOR Hanna Nelson]]^^2^^, [[Venkataraja Aithal|AUTHOR Venkataraja Aithal]]^^2^^, [[Shashidhar G. Koolagudi|AUTHOR Shashidhar G. Koolagudi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NITK Surathkal, India; ^^2^^SOAHS Manipal, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 331–335&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces speech database for analyzing children’s speech. The proposed database of children is recorded in Kannada language (one of the South Indian languages) from children between age 2.5 to 6.5 years. The database is named as National Institute of Technology Karnataka Kids’ Speech Corpus (NITK Kids’ Speech Corpus). The relevant design considerations for the database collection are discussed in detail. It is divided into four age groups with an interval of 1 year between each age group. The speech corpus includes nearly 10 hours of speech recordings from 160 children. For each age range, the data is recorded from 40 children (20 male and 20 female). Further, the effect of developmental changes on the speech from 2.5 to 6.5 years are analyzed using pitch and formant analysis. Some of the potential applications, of the NITK Kids’ Speech Corpus, such as, systematic study on the language learning ability of children, phonological process analysis and children speech recognition are discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ahmed Ali|AUTHOR Ahmed Ali]]^^1^^, [[Salam Khalifa|AUTHOR Salam Khalifa]]^^2^^, [[Nizar Habash|AUTHOR Nizar Habash]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^HBKU, Qatar; ^^2^^New York University Abu Dhabi, UAE</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 336–340&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We study the problem of evaluating automatic speech recognition (ASR) systems that target dialectal speech input. A major challenge in this case is that the orthography of dialects is typically not standardized. From an ASR evaluation perspective, this means that there is no clear gold standard for the expected output, and several possible outputs could be considered correct according to different human annotators, which makes standard word error rate (WER) inadequate as an evaluation metric. Specifically targeting the case of Arabic dialects, which are also morphologically rich and complex, we propose a number of alternative WER-based metrics that vary in terms of text representation, including different degrees of morphological abstraction and spelling normalization.We evaluate the efficacy of these metrics by comparing their correlation with human judgments on a validation set of 1,000 utterances. Our results show that the use of morphological abstractions and spelling normalization produces systems with higher correlation with human judgment. We released the code and the datasets to the research community.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Per Fallgren|AUTHOR Per Fallgren]], [[Zofia Malisz|AUTHOR Zofia Malisz]], [[Jens Edlund|AUTHOR Jens Edlund]]
</p><p class="cpabstractcardaffiliationlist">KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 341–345&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech data found in the wild hold many advantages over artificially constructed speech corpora in terms of ecological validity and cultural worth. Perhaps most importantly, there is a lot of it. However, the combination of great quantity, noisiness and variation poses a challenge for its access and processing. Generally speaking, automatic approaches to tackle the problem require good labels for training, while manual approaches require time. In this study, we provide further evidence for a semi-supervised, human-in-the-loop framework that previously has shown promising results for browsing and annotating large quantities of found audio data quickly. The findings of this study show that a 100-hour long subset of the Fearless Steps corpus can be annotated for speech activity in less than 45 minutes, a fraction of the time it would take traditional annotation methods, without a loss in performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mireia Diez|AUTHOR Mireia Diez]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Jan Černocký|AUTHOR Jan Černocký]]
</p><p class="cpabstractcardaffiliationlist">Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 346–350&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a simplified version of the previously proposed diarization algorithm based on Bayesian Hidden Markov Models, which uses Variational Bayesian inference for very fast and robust clustering of x-vector (neural network based speaker embeddings). The presented results show that this clustering algorithm provides significant improvements in diarization performance as compared to the previously used Agglomerative Hierarchical Clustering. The output of this system can be further employed as an initialization for a second stage VB diarization system, using frame-wise MFCC features as input, to obtain optimal results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tae Jin Park|AUTHOR Tae Jin Park]]^^1^^, [[Kyu J. Han|AUTHOR Kyu J. Han]]^^2^^, [[Jing Huang|AUTHOR Jing Huang]]^^2^^, [[Xiaodong He|AUTHOR Xiaodong He]]^^3^^, [[Bowen Zhou|AUTHOR Bowen Zhou]]^^3^^, [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]]^^1^^, [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern California, USA; ^^2^^JD.com, USA; ^^3^^JD.com, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 391–395&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work presents a novel approach for speaker diarization to leverage lexical information provided by automatic speech recognition. We propose a speaker diarization system that can incorporate word-level speaker turn probabilities with speaker embeddings into a speaker clustering process to improve the overall diarization accuracy. To integrate lexical and acoustic information in a comprehensive way during clustering, we introduce an adjacency matrix integration for spectral clustering. Since words and word boundary information for word-level speaker turn probability estimation are provided by a speech recognition system, our proposed method works without any human intervention for manual transcriptions. We show that the proposed method improves diarization performance on various evaluation datasets compared to the baseline diarization system using acoustic information only in speaker embeddings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Laurent El Shafey|AUTHOR Laurent El Shafey]], [[Hagen Soltau|AUTHOR Hagen Soltau]], [[Izhak Shafran|AUTHOR Izhak Shafran]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 396–400&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech applications dealing with conversations require not only recognizing the spoken words, but also determining who spoke when. The task of assigning words to speakers is typically addressed by merging the outputs of two separate systems, namely, an automatic speech recognition (ASR) system and a speaker diarization (SD) system. The two systems are trained independently with different objective functions. Often the SD systems operate directly on the acoustics and are not constrained to respect word boundaries and this deficiency is overcome in an  ad hoc manner. Motivated by recent advances in sequence to sequence learning, we propose a novel approach to tackle the two tasks by a joint ASR and SD system using a recurrent neural network transducer. Our approach utilizes both linguistic and acoustic cues to infer speaker roles, as opposed to typical SD systems, which only use acoustic cues. We evaluated the performance of our approach on a large corpus of medical conversations between physicians and patients. Compared to a competitive conventional baseline, our approach improves word-level diarization error rate from 15.8% to 2.2%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sandro Cumani|AUTHOR Sandro Cumani]]
</p><p class="cpabstractcardaffiliationlist">Politecnico di Torino, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 401–405&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Generative calibration models have shown to be an effective alternative to traditional discriminative score calibration techniques, such as Logistic Regression (LogReg). Provided that the score distribution assumptions are sufficiently accurate, generative approaches not only have similar or better performance with respect to LogReg, but also allow for unsupervised or semi-supervised training.

Recently, we have proposed non-Gaussian linear calibration models able to overcome the limitations of Gaussian approaches. Although these models allow for better characterization of score distributions, they still require the target and non-target distributions to be reciprocally symmetric.

In this work we further extend these models to cover asymmetric score distributions, as to improve calibration for both supervised and unsupervised scenarios. The improvements have been assessed on NIST SRE 2010 telephone data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Koji Okabe|AUTHOR Koji Okabe]], [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]]
</p><p class="cpabstractcardaffiliationlist">NEC, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 406–410&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates a novel data augmentation approach to train deep neural networks (DNNs) used for speaker embedding, i.e. to extract representation that allows easy comparison between speaker voices with a simple geometric operation. Data augmentation is used to create new examples from an existing training set, thereby increasing the quantity of training data improves the robustness of the model. We attempt to increase the number of speakers in the training set by generating new speakers via voice conversion. This speaker augmentation expands the coverage of speakers in the embedding space in contrast to conventional audio augmentation methods which focus on within-speaker variability. With an increased number of speakers in the training set, the DNN is trained to produce a better speaker-discriminative embedding. We also advocate using bandwidth extension to augment narrowband speech for a wideband application. Text-independent speaker recognition experiments in Speakers in the Wild (SITW) demonstrate a 17.9% reduction in minimum detection cost with speaker augmentation. The combined use of the two techniques provides further improvement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Emre Yılmaz|AUTHOR Emre Yılmaz]]^^1^^, [[Adem Derinel|AUTHOR Adem Derinel]]^^1^^, [[Kun Zhou|AUTHOR Kun Zhou]]^^1^^, [[Henk van den Heuvel|AUTHOR Henk van den Heuvel]]^^2^^, [[Niko Brummer|AUTHOR Niko Brummer]]^^3^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^, [[David A. van Leeuwen|AUTHOR David A. van Leeuwen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^Radboud Universiteit Nijmegen, The Netherlands; ^^3^^Cyberupt, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 411–415&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes our initial efforts to build a large-scale speaker diarization (SD) and identification system on a recently digitized radio broadcast archive from the Netherlands which has more than 6500 audio tapes with 3000 hours of Frisian-Dutch speech recorded between 1950–2016. The employed large-scale diarization scheme involves two stages: (1) tape-level speaker diarization providing pseudo-speaker identities and (2) speaker linking to relate pseudo-speakers appearing in multiple tapes. Having access to the speaker models of several frequently appearing speakers from the previously collected FAME! speech corpus, we further perform speaker identification by linking these known speakers to the pseudo-speakers identified at the first stage. In this work, we present a recently created longitudinal and multilingual SD corpus designed for large-scale SD research and evaluate the performance of a new speaker linking system using x-vectors with PLDA to quantify cross-tape speaker similarity on this corpus. The performance of this speaker linking system is evaluated on a small subset of the archive which is manually annotated with speaker information. The speaker linking performance reported on this subset (53 hours) and the whole archive (3000 hours) is compared to quantify the impact of scaling up in the amount of speech data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Harishchandra Dubey|AUTHOR Harishchandra Dubey]], [[Abhijeet Sangwan|AUTHOR Abhijeet Sangwan]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 416–420&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker diarization determines  who spoke and when? in an audio stream. In this study, we propose a model-based approach for robust speaker clustering using i-vectors. The i-vectors extracted from different segments of same speaker are correlated. We model this correlation with a Markov Random Field (MRF) network. Leveraging the advancements in MRF modeling, we used Toeplitz Inverse Covariance (TIC) matrix to represent the MRF correlation network for each speaker. This approaches captures the sequential structure of i-vectors (or equivalent speaker turns) belonging to same speaker in an audio stream. A variant of standard Expectation Maximization (EM) algorithm is adopted for deriving closed-form solution using dynamic programming (DP) and the alternating direction method of multiplier (ADMM). Our diarization system has four steps: (1) ground-truth segmentation; (2) i-vector extraction; (3) post-processing (mean subtraction, principal component analysis, and length-normalization) ; and (4) proposed speaker clustering. We employ cosine K-means and movMF speaker clustering as baseline approaches. Our evaluation data is derived from: (i) CRSS-PLTL corpus, and (ii) two meetings subset of the AMI corpus. Relative reduction in diarization error rate (DER) for CRSS-PLTL corpus is 43.22% using the proposed advancements as compared to baseline. For AMI meetings IS1000a and IS1003b, relative DER reduction is 29.37% and 9.21%, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ville Vestman|AUTHOR Ville Vestman]]^^1^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^1^^, [[Tomi H. Kinnunen|AUTHOR Tomi H. Kinnunen]]^^2^^, [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NEC, Japan; ^^2^^University of Eastern Finland, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 351–355&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker embeddings are continuous-value vector representations that allow easy comparison between voices of speakers with simple geometric operations. Among others, i-vector and x-vector have emerged as the mainstream methods for speaker embedding. In this paper, we illustrate the use of modern computation platform to harness the benefit of GPU acceleration for i-vector extraction. In particular, we achieve an acceleration of 3000 times in frame posterior computation compared to real time and 25 times in training the i-vector extractor compared to the CPU baseline from Kaldi toolkit. This significant speed-up allows the exploration of ideas that were hitherto impossible. In particular, we show that it is beneficial to update the universal background model (UBM) and re-compute frame alignments while training the i-vector extractor. Additionally, we are able to study different variations of i-vector extractors more rigorously than before. In this process, we reveal some undocumented details of Kaldi’s i-vector extractor and show that it outperforms the standard formulation by a margin of 1 to 2% when tested with VoxCeleb speaker verification protocol. All of our findings are asserted by ensemble averaging the results from multiple runs with random start.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suwon Shon|AUTHOR Suwon Shon]]^^1^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^2^^, [[Douglas Reynolds|AUTHOR Douglas Reynolds]]^^3^^, [[James Glass|AUTHOR James Glass]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^Johns Hopkins University, USA; ^^3^^MIT Lincoln Laboratory, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 356–360&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Multi-target Challenge aims to assess how well current speech technology is able to determine whether or not a recorded utterance was spoken by one of a large number of blacklisted speakers. It is a form of multi-target speaker detection based on real-world telephone conversations. Data recordings are generated from call center customer-agent conversations. The task is to measure how accurately one can detect 1) whether a test recording is spoken by a blacklisted speaker, and 2) which specific blacklisted speaker was talking. This paper outlines the challenge and provides its baselines, results, and discussions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhifu Gao|AUTHOR Zhifu Gao]]^^1^^, [[Yan Song|AUTHOR Yan Song]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^2^^, [[Pengcheng Li|AUTHOR Pengcheng Li]]^^1^^, [[Yiheng Jiang|AUTHOR Yiheng Jiang]]^^1^^, [[Li-Rong Dai|AUTHOR Li-Rong Dai]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^University of Kent, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 361–365&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep embedding learning based speaker verification (SV) methods have recently achieved significant performance improvement over traditional i-vector systems, especially for short duration utterances. Embedding learning commonly consists of three components: frame-level feature processing, utterance-level embedding learning, and loss function to discriminate between speakers. For the learned embeddings, a back-end model (i.e., Linear Discriminant Analysis followed by Probabilistic Linear Discriminant Analysis (LDA-PLDA)) is generally applied as a similarity measure. In this paper, we propose to further improve the effectiveness of deep embedding learning methods in the following components: (1) A multi-stage aggregation strategy, exploited to hierarchically fuse time-frequency context information for effective frame-level feature processing. (2) A discriminant analysis loss is designed for end-to-end training, which aims to explicitly learn the discriminative embeddings, i.e. with small intra-speaker and large inter-speaker variances. To evaluate the effectiveness of the proposed improvements, we conduct extensive experiments on the VoxCeleb1 dataset. The results outperform state-of-the-art systems by a significant margin. It is also worth noting that the results are obtained using a simple cosine metric instead of the more complex LDA-PLDA backend scoring.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qingjian Lin|AUTHOR Qingjian Lin]]^^1^^, [[Ruiqing Yin|AUTHOR Ruiqing Yin]]^^2^^, [[Ming Li|AUTHOR Ming Li]]^^1^^, [[Hervé Bredin|AUTHOR Hervé Bredin]]^^2^^, [[Claude Barras|AUTHOR Claude Barras]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Duke Kunshan University, China; ^^2^^LIMSI (UPR 3251), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 366–370&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>More and more neural network approaches have achieved considerable improvement upon submodules of speaker diarization system, including speaker change detection and segment-wise speaker embedding extraction. Still, in the clustering stage, traditional algorithms like probabilistic linear discriminant analysis (PLDA) are widely used for scoring the similarity between two speech segments. In this paper, we propose a supervised method to measure the similarity matrix between all segments of an audio recording with sequential bidirectional long short-term memory networks (Bi-LSTM). Spectral clustering is applied on top of the similarity matrix to further improve the performance. Experimental results show that our system significantly outperforms the state-of-the-art methods and achieves a diarization error rate of 6.63% on the NIST SRE 2000 CALLHOME database.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joon Son Chung|AUTHOR Joon Son Chung]]^^1^^, [[Bong-Jin Lee|AUTHOR Bong-Jin Lee]]^^2^^, [[Icksang Han|AUTHOR Icksang Han]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Naver, Korea; ^^2^^Naver, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 371–375&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The goal of this work is to determine ‘who spoke when’ in real-world meetings. The method takes surround-view video and single or multi-channel audio as inputs, and generates robust diarisation outputs.

To achieve this, we propose a novel iterative approach that first enrolls speaker models using audio-visual correspondence, then uses the enrolled models together with the visual information to determine the active speaker.

We show strong quantitative and qualitative performance on a dataset of real-world meetings. The method is also evaluated on the public AMI meeting corpus, on which we demonstrate results that exceed all comparable methods. We also show that beamforming can be used together with the video to further improve the performance when multi-channel audio is available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiamin Xie|AUTHOR Jiamin Xie]], [[Leibny Paola García-Perera|AUTHOR Leibny Paola García-Perera]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 376–380&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Children’s speech and other vocalizations pose challenges for speaker diarization. The spontaneity of kids causes rapid or delayed phonetic variations in an utterance, which makes speaker’s information difficult to extract. Fast speaker turns and long overlap in conversations between children and their guardians makes correct segmentation even harder compared to, say a business meeting. In this work, we explore diarization of child-guardian interactions. We investigate the effectiveness of adding children’s speech to adult data in Probabilistic Linear Discriminant Analysis (PLDA) training. We also train each of two PLDAs with separate objective to a coarse or fine classification of speakers. A fusion of the two PLDAs is examined. By performing this fusion, we expect to improve on children’s speech while preserving adult segmentations. Our experimental results show that including children’s speech helps reduce DER by 2.7%, achieving a best overall DER of 33.1% with the x-vector system. A fusion system yields a reasonable 33.3% DER that validates our concept.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alan McCree|AUTHOR Alan McCree]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 381–385&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many modern systems for speaker diarization, such as the top-performing JHU system in the DIHARD 2018 challenge, rely on clustering of DNN speaker embeddings followed by HMM resegmentation. Two problems with this approach are that parameters need significant retuning for different applications, and that the DNN contributes only to the clustering task and not the resegmentation. This paper presents two contributions: an improved HMM segment assignment algorithm using leave-one-out Gaussian PLDA scoring, and an approach to training the DNN such that embeddings directly optimize performance of this scoring method with generatively updated PLDA parameters. Initial experiments with this new system are very promising, achieving state-of-the-art performance for two separate tasks (Callhome and DIHARD18) without any task-dependent parameter tuning.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Omid Ghahabi|AUTHOR Omid Ghahabi]], [[Volker Fischer|AUTHOR Volker Fischer]]
</p><p class="cpabstractcardaffiliationlist">EML European Media Laboratory, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 386–390&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker diarization is more challenging in presence of background noise or music, frequent speaker changes, and cross talks. In an online scenario, the decision should be made at time, given only the current short segment and the speakers detected in the past, which makes the task even harder. In this work, an online robust speaker diarization algorithm is proposed in which speech segments are represented by low dimensional vectors referred to as speaker-corrupted embeddings. The proposed speaker embedding network is a deep neural network which takes speaker-corrupted supervectors as input, uses variable ReLU (VReLU) as an activation function, and tries to discriminate the background speakers. Speaker corruption is performed by adding supervectors built by 20 speech frames from other speakers to the supervectors of a given speaker. It is shown that speaker corruption, VReLU, and input dropout increase the generalization power of the proposed network. To increase the robustness, the proposed embeddings are concatenated with LDA transformed supervectors. Experimental results on the Albayzin 2018 evaluation set show a competitive accuracy, more robustness, and much lower computational cost compared to typical offline algorithms.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[György Kovács|AUTHOR György Kovács]]^^1^^, [[László Tóth|AUTHOR László Tóth]]^^2^^, [[Dirk Van Compernolle|AUTHOR Dirk Van Compernolle]]^^3^^, [[Marcus Liwicki|AUTHOR Marcus Liwicki]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Luleå University of Technology, Sweden; ^^2^^University of Szeged, Hungary; ^^3^^Katholieke Universiteit Leuven, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 421–425&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A pivotal question in Automatic Speech Recognition (ASR) is the robustness of the trained models. In this study, we investigate the combination of two methods commonly applied to increase the robustness of ASR systems. On the one hand, inspired by auditory experiments and signal processing considerations, multi-band band processing has been used for decades to improve the noise robustness of speech recognition. On the other hand, dropout is a commonly used regularization technique to prevent overfitting by keeping the model from becoming over-reliant on a small set of neurons. We hypothesize that the careful combination of the two approaches would lead to increased robustness, by preventing the resulting model from over-rely on any given band.

To verify our hypothesis, we investigate various approaches for the combination of the two methods using the Aurora-4 corpus. The results obtained corroborate our initial assumption, and show that the proper combination of the two techniques leads to increased robustness, and to significantly lower word error rates (WERs). Furthermore, we find that the accuracy scores attained here compare favourably to those reported recently on the clean training scenario of the Aurora-4 corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jian Wu|AUTHOR Jian Wu]]^^1^^, [[Yong Xu|AUTHOR Yong Xu]]^^2^^, [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]]^^2^^, [[Lianwu Chen|AUTHOR Lianwu Chen]]^^3^^, [[Meng Yu|AUTHOR Meng Yu]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^, [[Dong Yu|AUTHOR Dong Yu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Tencent, USA; ^^3^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 466–470&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper summarizes several contributions for improving the speaker-dependent separation system for CHiME-5 challenge, which aims to solve the problem of multi-channel, highly-overlapped conversational speech recognition in a dinner party scenario with reverberations and non-stationary noises. Specifically, we adopt a speaker-aware training method by using i-vector as the target speaker information for multi-talker speech separation. With only one unified separation model for all speakers, we achieve a 10% absolute improvement in terms of word error rate (WER) over the previous baseline of 80.28% on the development set by leveraging our newly proposed data processing techniques and beamforming approach. With our improved back-end acoustic model, we further reduce WER to 60.15% which surpasses the result of our submitted CHiME-5 challenge system without applying any fusion techniques.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peidong Wang|AUTHOR Peidong Wang]], [[Ke Tan|AUTHOR Ke Tan]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 471–475&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Monaural speech enhancement has made dramatic advances in recent years. Although enhanced speech has been demonstrated to have better intelligibility and quality for human listeners, feeding it directly to automatic speech recognition (ASR) systems trained with noisy speech has not produced expected improvements in ASR performance. The lack of an enhancement benefit on recognition, or the gap between monaural speech enhancement and recognition, is often attributed to speech distortions introduced in the enhancement process. In this study, we analyze the distortion problem and propose a distortion-independent acoustic modeling scheme. Experimental results show that the distortion-independent acoustic model is able to overcome the distortion problem. Moreover, it can be used with various speech enhancement models. Both the distortion-independent and a noise-dependent acoustic model perform better than the previous best system on the CHiME-2 corpus. The noise-dependent acoustic model achieves a word error rate of 8.7%, outperforming the previous best result by 6.5% relatively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peidong Wang|AUTHOR Peidong Wang]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 476–480&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It has recently been shown that a distortion-independent acoustic modeling method is able to overcome the distortion problem caused by speech enhancement. In this study, we improve the distortion-independent acoustic model by feeding it with enhanced spectral features. Using enhanced magnitude spectra, the automatic speech recognition (ASR) system achieves a word error rate of 7.8% on the CHiME-2 corpus, outperforming our previous best system by more than 10% relatively. Compared with the corresponding enhanced waveform signal based system, systems using enhanced spectral features obtain up to 24% relative improvement. These comparisons show that speech enhancement is helpful for robust ASR and that enhanced spectral features are more suitable for ASR tasks than enhanced waveform signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Paarth Neekhara|AUTHOR Paarth Neekhara]], [[Shehzeen Hussain|AUTHOR Shehzeen Hussain]], [[Prakhar Pandey|AUTHOR Prakhar Pandey]], [[Shlomo Dubnov|AUTHOR Shlomo Dubnov]], [[Julian McAuley|AUTHOR Julian McAuley]], [[Farinaz Koushanfar|AUTHOR Farinaz Koushanfar]]
</p><p class="cpabstractcardaffiliationlist">University of California at San Diego, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 481–485&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we demonstrate the existence of universal adversarial audio perturbations that cause mis-transcription of audio signals by automatic speech recognition (ASR) systems. We propose an algorithm to find a single quasi-imperceptible perturbation, which when added to any arbitrary speech signal, will most likely fool the victim speech recognition model. Our experiments demonstrate the application of our proposed technique by crafting audio-agnostic universal perturbations for the state-of-the-art ASR system — Mozilla DeepSpeech. Additionally, we show that such perturbations generalize to a significant extent across models that are not available during training, by performing a transferability test on a WaveNet based ASR system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masakiyo Fujimoto|AUTHOR Masakiyo Fujimoto]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]
</p><p class="cpabstractcardaffiliationlist">NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 486–490&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a method of noise-robust automatic speech recognition (ASR) that remains effective under one-pass single-channel processing. Under these constraints, the use of single-channel speech enhancement seems to be a reasonable noise-robust approach to ASR, because complicated techniques requiring multi-pass processing cannot be used. However, in many cases, single-channel speech enhancement seriously deteriorates the accuracy of ASR because of speech distortion. In addition, the advanced acoustic modeling framework (joint training) is relatively ineffective in the case of single-channel processing. To overcome these problems, we propose a noise-robust acoustic modeling framework based on a feature-level combination of noisy speech and enhanced speech. To obtain further improvements, we also adopt a sub-network-level combination of noisy and enhanced speech, and a gating mechanism that can dynamically select appropriate speech features. Through comparative evaluations, we confirm that the proposed method successfully improves the accuracy of ASR in noisy environments under strong constraints.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bin Liu|AUTHOR Bin Liu]]^^1^^, [[Shuai Nie|AUTHOR Shuai Nie]]^^1^^, [[Shan Liang|AUTHOR Shan Liang]]^^1^^, [[Wenju Liu|AUTHOR Wenju Liu]]^^1^^, [[Meng Yu|AUTHOR Meng Yu]]^^2^^, [[Lianwu Chen|AUTHOR Lianwu Chen]]^^3^^, [[Shouye Peng|AUTHOR Shouye Peng]]^^4^^, [[Changliang Li|AUTHOR Changliang Li]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Tencent, USA; ^^3^^Tencent, China; ^^4^^Xueersi Online School, China; ^^5^^Kingsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 491–495&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, the end-to-end system has made significant breakthroughs in the field of speech recognition. However, this single end-to-end architecture is not especially robust to the input variations interfered of noises and reverberations, resulting in performance degradation dramatically in reality. To alleviate this issue, the mainstream approach is to use a well-designed speech enhancement module as the front-end of ASR. However, enhancement modules would result in speech distortions and mismatches to training, which sometimes degrades the ASR performance. In this paper, we propose a jointly adversarial enhancement training to boost robustness of end-to-end systems. Specifically, we use a jointly compositional scheme of mask-based enhancement network, attention-based encoder-decoder network and discriminant network during training. The discriminator is used to distinguish between the enhanced features from enhancement network and clean features, which could guide enhancement network to output towards the realistic distribution. With the joint optimization of the recognition, enhancement and adversarial loss, the compositional scheme is expected to learn more robust representations for the recognition task automatically. Systematic experiments on AISHELL-1 show that the proposed method improves the noise robustness of end-to-end systems and achieves the relative error rate reduction of 4.6% over the multi-condition training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meet Soni|AUTHOR Meet Soni]], [[Ashish Panda|AUTHOR Ashish Panda]]
</p><p class="cpabstractcardaffiliationlist">TCS Innovation Labs Mumbai, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 426–430&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The application of Time-Frequency (T-F) masking based approaches for Automatic Speech Recognition has been shown to provide significant gains in system performance in the presence of additive noise. Such approaches give performance improvement when the T-F masking front-end is trained jointly with the acoustic model. However, such systems still rely on a pre-trained T-F masking enhancement block, trained using pairs of clean and noisy speech signals. Pre-training is necessary due to large number of parameters associated with the enhancement network. In this paper, we propose a flat-start joint training of a network that has both a T-F masking based enhancement block and a phoneme classification block. In particular, we use fully convolutional network as an enhancement front-end to reduce the number of parameters. We train the network by jointly updating the parameters of both these blocks using tied Context-Dependent phoneme states as targets. We observe that pretraining of the proposed enhancement block is not necessary for the convergence. In fact, the proposed flat-start joint training converges faster than the baseline multi-condition trained model. The experiments performed on Aurora-4 database show 7.06% relative improvement over multi-conditioned baseline. We get similar improvements for unseen test conditions as well.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Long Wu|AUTHOR Long Wu]], [[Hangting Chen|AUTHOR Hangting Chen]], [[Li Wang|AUTHOR Li Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 431–435&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Feature mapping (FM) jointly trained with acoustic model (AFM) is commonly used for single-channel speech enhancement. However, the performance is affected by the inter-speaker variability. In this paper, we propose speaker-invariant AFM (SIAFM) aiming at curtailing the inter-talker variability while achieving speech enhancement. In SIAFM, a feature-mapping network, an acoustic model and a speaker classifier network are jointly optimized to minimize the feature-mapping loss and the senone classification loss, and simultaneously min-maximize the speaker classification loss. Evaluated on AMI dataset, the proposed SIAFM achieves 4.8% and 7.0% relative word error rate (WER) reduction on the overlapped and non-overlapped condition over the baseline acoustic model trained with single distant microphone (SDM) data. Additionally, the SIAFM obtains 3.0% relative overlapped WER and 4.2% relative non-overlapped WER decrease over the multi-conditional (MCT) acoustic model. To further promote the performance of SIAFM, we employ teacher-student learning (TS), in which the posterior probabilities generated by the individual headset microphone (IHM) data can be used in lieu of labels to train the SIAFM model. The experiments show that compared with MCT model, SIAFM with TS (SIAFM-TS) can reach 4.2% relative overlapped WER and 6.3% relative non-overlapped WER decrease respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ji Ming|AUTHOR Ji Ming]], [[Danny Crookes|AUTHOR Danny Crookes]]
</p><p class="cpabstractcardaffiliationlist">Queen’s University Belfast, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 436–440&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe the theory and implementation of full-sentence speech correlation for speech recognition, and demonstrate its superior robustness to unseen/untrained noise. For the Aurora 2 data, trained with only clean speech, the new method performs competitively against the state-of-the-art with multicondition training and adaptation, and achieves the lowest word error rate in very low SNR (-5 dB). Further experiments with highly nonstationary noise (pop song, broadcast news, etc.) show the surprising ability of the new method to handle unpredictable noise. The new method adds several novel developments to our previous research, including the modeling of the speaker characteristics along with other acoustic and semantic features of speech for separating speech from noise, and a novel Viterbi algorithm to implement full-sentence correlation for speech recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meet Soni|AUTHOR Meet Soni]], [[Sonal Joshi|AUTHOR Sonal Joshi]], [[Ashish Panda|AUTHOR Ashish Panda]]
</p><p class="cpabstractcardaffiliationlist">TCS Innovation Labs Mumbai, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 441–445&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multi-conditioned training is a state-of-the-art approach to achieve robustness in Automatic Speech Recognition (ASR) systems. This approach works well in practice for seen degradation conditions. However, the performance of such system is still an issue for unseen degradation conditions. In this work we consider distortions due to additive noise and channel mismatch. To achieve the robustness to additive noise, we propose a parametric generative model for noise signals. By changing the parameters of the proposed generative model, various noise signals can be generated and used to develop a multi-conditioned dataset for ASR system training. The generative model is designed to span the feature space of Mel Filterbank Energies by using band-limited white noise signals as basis. To simulate channel distortions, we propose to shift the mean of log spectral magnitude using utterances with estimated channel distortions. Experiments performed on the Aurora 4 noisy speech database show that using noise types generated from the proposed generative model for multi-conditioned training provides significant performance gain for additive noise in unseen conditions. We compare our results with those from multi-conditioning by various real noise databases including environmental and other real life noises.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shashi Kumar|AUTHOR Shashi Kumar]], [[Shakti P. Rath|AUTHOR Shakti P. Rath]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 446–450&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) systems trained on clean speech do not perform well in far-field scenario. Degradation in word error rate (WER) can be as large as 40% in this mismatched scenario. Typically, speech enhancement is applied to map speech from far-field condition to clean condition using a neural network, commonly known as denoising autoencoder (DA). Such speech enhancement technique has shown significant improvement in ASR accuracy. It is a common practice to use mean-square error (MSE) loss to train DA which is based on regression model with residual noise modeled by zero-mean and constant co-variance Gaussian distribution. However, both these assumptions are not optimal, especially in highly non-stationary noisy and far-field scenario. Here, we propose a more generalized loss based on non-zero mean and heteroscedastic co-variance distribution for the residual variables. On the top, we present several novel DA architectures that are more suitable for the heteroscedastic loss. It is shown that the proposed methods outperform the conventional DA and MSE loss by a large margin. We observe relative improvement of 7.31% in WER compared to conventional DA and overall, a relative improvement of 14.4% compared to mismatched train and test scenario.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marc Delcroix|AUTHOR Marc Delcroix]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]]^^1^^, [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]^^1^^, [[Shigeki Karita|AUTHOR Shigeki Karita]]^^1^^, [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]]^^1^^, [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 451–455&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end (E2E) automatic speech recognition (ASR) that directly maps a sequence of speech features into a sequence of characters using a single neural network has received a lot of attention as it greatly simplifies the training and decoding pipelines and enables optimizing the whole system E2E. Recently, such systems have been extended to recognize speech mixtures by inserting a speech separation mechanism into the neural network, allowing to output recognition results for each speaker in the mixture. However, speech separation suffers from a global permutation ambiguity issue, i.e. arbitrary mapping between source speakers and outputs. We argue that this ambiguity would seriously limit the practical use of E2E separation systems. SpeakerBeam has been proposed as an alternative to speech separation to mitigate the global permutation ambiguity. SpeakerBeam aims at extracting only a target speaker in a mixture based on his/her speech characteristics, thus avoiding the global permutation problem. In this paper, we combine SpeakerBeam and an E2E ASR system to allow E2E training of a target speech recognition system. We show promising target speech recognition results in mixtures of two speakers, and discuss interesting properties of the proposed system in terms of speech enhancement and diarization ability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[I-Hung Hsu|AUTHOR I-Hung Hsu]], [[Ayush Jaiswal|AUTHOR Ayush Jaiswal]], [[Premkumar Natarajan|AUTHOR Premkumar Natarajan]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 456–460&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural network models for speech recognition have achieved great success recently, but they can learn incorrect associations between the target and nuisance factors of speech (e.g., speaker identities, background noise, etc.), which can lead to overfitting. While several methods have been proposed to tackle this problem, existing methods incorporate additional information about nuisance factors during training to develop invariant models. However, enumeration of all possible nuisance factors in speech data and the collection of their annotations is difficult and expensive. We present a robust training scheme for end-to-end speech recognition that adopts an unsupervised adversarial invariance induction framework to separate out essential factors for speech-recognition from nuisances without using any supplementary labels besides the transcriptions. Experiments show that the speech recognition model trained with the proposed training scheme achieves relative improvements of 5.48% on WSJ0, 6.16% on CHiME3, and 6.61% on TIMIT dataset over the base model. Additionally, the proposed method achieves a relative improvement of 14.44% on the combined WSJ0+CHiME3 dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takahito Suzuki|AUTHOR Takahito Suzuki]]^^1^^, [[Jun Ogata|AUTHOR Jun Ogata]]^^2^^, [[Takashi Tsunakawa|AUTHOR Takashi Tsunakawa]]^^1^^, [[Masafumi Nishida|AUTHOR Masafumi Nishida]]^^1^^, [[Masafumi Nishimura|AUTHOR Masafumi Nishimura]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shizuoka University, Japan; ^^2^^AIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 461–465&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Throat microphones are robust against external noise because they receive vibrations directly from the skin, however, their available speech data is limited. This work aims to improve the speech recognition accuracy of throat microphones, and we propose a knowledge distillation method of hybrid DNN-HMM acoustic model. This method distills the knowledge from acoustic model trained with a large amount of close-talk microphone speech data (teacher model) to acoustic model for throat microphones (student model) using a small amount of parallel data of throat and close-talk microphones. The frontend network of the student model contains a feature mapping network from throat microphone acoustic features to close-talk microphone bottleneck features, and the back-end network is a phonetic discrimination network from close-talk microphone bottleneck features. We attempted to improve recognition accuracy further by initializing student model parameters using pretrained front-end and back-end networks. Experimental results using Japanese read speech data showed that the proposed approach achieved 9.8% relative improvement of character error rate (14.3% → 12.9%) compared to the hybrid acoustic model trained only with throat microphone speech data. Furthermore, under noise environments of approximately 70 dBA or higher, the throat microphone system with our approach outperformed the close-talk microphone system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zixiaofan Yang|AUTHOR Zixiaofan Yang]], [[Bingyan Hu|AUTHOR Bingyan Hu]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]
</p><p class="cpabstractcardaffiliationlist">Columbia University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 496–500&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we describe a novel approach for generating unsupervised humor labels using time-aligned user comments, and predicting humor using audio information alone. We collected 241 videos of comedy movies and gameplay videos from one of the largest Chinese video-sharing websites. We generate unsupervised humor labels from laughing comments, and find high agreement between these labels and human annotations. From these unsupervised labels, we build deep learning models using speech and text features, which obtain an AUC of 0.751 in predicting humor on a manually annotated test set. To our knowledge, this is the first study predicting perceived humor in large-scale audio data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alice Baird|AUTHOR Alice Baird]]^^1^^, [[Eduardo Coutinho|AUTHOR Eduardo Coutinho]]^^2^^, [[Julia Hirschberg|AUTHOR Julia Hirschberg]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^University of Liverpool, UK; ^^3^^Columbia University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 539–543&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ability to discern an individual’s level of sincerity varies from person to person and across cultures. Sincerity is typically a key indication of personality traits such as trustworthiness, and portraying sincerity can be integral to an abundance of scenarios, e. g. , when apologising. Speech signals are one important factor when discerning sincerity and, with more modern interactions occurring remotely, automatic approaches for the recognition of sincerity from speech are beneficial during both interpersonal and professional scenarios. In this study we present details of the Sincere Apology Corpus ( Sina-C). Annotated by 22 individuals for their perception of sincerity,  Sina-C is an English acted-speech corpus of 32 speakers, apologising in multiple ways. To provide an updated baseline for the corpus, various machine learning experiments are conducted. Finding that extracting deep data-representations (utilising the  Deep Spectrum toolkit) from the speech signals is best suited. Classification results on the binary (sincere / not sincere) task are at best 79.2% Unweighted Average Recall and for regression, in regards to the degree of sincerity, a Root Mean Square Error of 0.395 from the standardised range [-1.51; 1.72] is obtained.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oliver Niebuhr|AUTHOR Oliver Niebuhr]], [[Kerstin Fischer|AUTHOR Kerstin Fischer]]
</p><p class="cpabstractcardaffiliationlist">University of Southern Denmark, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 544–548&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we test whether the perception of filled-pause (FP) frequency and public-speaking performance are mediated by the phonetic characteristics of FPs. In particular, total duration, vowel-formant pattern (if present), and nasal-segment proportion of FPs were correlated with perceptual data of 29 German listeners who rated excerpts of business presentations given by 68 German-speaking managers. Results show strong inter-speaker differences in how and how often FPs are realized. Moreover, differences in FP duration and nasal proportion are significantly correlated with estimated (i.e. subjective) FP frequency and perceived speaker performance. The shorter and more nasal a speaker’s FPs are, the more do listeners underestimate the speaker’s actual FP frequency and the higher they rate the speaker’s public-speaking performance. The results are discussed in terms of their implications for FP saliency and rhetorical training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]], [[Philipp Klumpp|AUTHOR Philipp Klumpp]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]], [[Elmar Nöth|AUTHOR Elmar Nöth]]
</p><p class="cpabstractcardaffiliationlist">FAU Erlangen-Nürnberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 549–553&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>There are a lot of features that can be extracted from speech signals for different applications such as automatic speech recognition or speaker verification. However, for pathological speech processing there is a need to extract features about the presence of the disease or the state of the patients that are comprehensible for clinical experts. Phonological posteriors are a group of features that can be interpretable by the clinicians and at the same time carry suitable information about the patient’s speech. This paper presents a tool to extract phonological posteriors directly from speech signals. The proposed method consists of a bank of parallel bidirectional recurrent neural networks to estimate the posterior probabilities of the occurrence of different phonological classes. The proposed models are able to detect the phonological classes with accuracies over 90%. In addition, the trained models are available to be used by the research community interested in the topic.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yoan Dinkov|AUTHOR Yoan Dinkov]]^^1^^, [[Ahmed Ali|AUTHOR Ahmed Ali]]^^2^^, [[Ivan Koychev|AUTHOR Ivan Koychev]]^^1^^, [[Preslav Nakov|AUTHOR Preslav Nakov]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Sofia University, Bulgaria; ^^2^^HBKU, Qatar</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 501–505&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We address the problem of predicting the leading political ideology, i.e., left-center-right bias, for YouTube channels of news media. Previous work on the problem has focused exclusively on text and on analysis of the language used, topics discussed, sentiment, and the like. In contrast, here we study videos, which yields an interesting multimodal setup. Starting with gold annotations about the leading political ideology of major world news media from Media Bias/Fact Check, we searched on YouTube to find their corresponding channels, and we downloaded a recent sample of videos from each channel. We crawled more than 1,000 YouTube hours along with the corresponding subtitles and metadata, thus producing a new multimodal dataset. We further developed a multimodal deep-learning architecture for the task. Our analysis shows that the use of acoustic signal helped to improve bias detection by more than 6% absolute over using text and metadata only. We release the dataset to the research community, hoping to help advance the field of multi-modal political bias detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guozhen An|AUTHOR Guozhen An]]^^1^^, [[Rivka Levitan|AUTHOR Rivka Levitan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUNY Queensborough Community College, USA; ^^2^^CUNY Graduate Center, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 506–509&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic detection of speaker states and traits is made more difficult by intergroup differences in how they are distributed and expressed in speech and language. In this study, we explore various deep learning architectures for incorporating demographic information into the classification task. We find that early and late fusion of demographic information both improve performance on the task of personality recognition, and a multitask learning model, which performs best, also significantly improves deception detection accuracy. Our findings establish a new state-of-the-art for personality recognition and deception detection on the CXD corpus, and suggest new best practices for mitigating intergroup differences to improve speaker state and trait recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felix Weninger|AUTHOR Felix Weninger]]^^1^^, [[Yang Sun|AUTHOR Yang Sun]]^^2^^, [[Junho Park|AUTHOR Junho Park]]^^1^^, [[Daniel Willett|AUTHOR Daniel Willett]]^^2^^, [[Puming Zhan|AUTHOR Puming Zhan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nuance Communications, USA; ^^2^^Nuance Communications, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 510–514&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present an in-depth study on the classification of regional accents in Mandarin speech. Experiments are carried out on Mandarin speech data systematically collected from 15 different geographical regions in China for broad coverage. We explore bidirectional Long Short-Term Memory (bLSTM) networks and i-vectors to model longer-term acoustic context. Starting from the classification of the collected data into the 15 regional accents, we derive a three-class grouping via non-metric dimensional scaling (NMDS), for which 68.4% average recall can be obtained. Furthermore, we evaluate a state-of-the-art ASR system on the accented data and demonstrate that the character error rate (CER) strongly varies among these accent groups, even if i-vector speaker adaptation is used. Finally, we show that model selection based on the prediction of our bLSTM accent classifier can yield up to 7.6% CER reduction for accented speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]^^1^^, [[László Tóth|AUTHOR László Tóth]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MTA-SZTE RGAI, Hungary; ^^2^^University of Szeged, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 515–519&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>To detect social signals such as laughter or filler events from audio data, a straightforward choice is to apply a Hidden Markov Model (HMM) in combination with a Deep Neural Network (DNN) that supplies the local class posterior estimates ( HMM/DNN hybrid model). However, the posterior estimates of the DNN may be suboptimal due to a mismatch between the cost function used during training (e.g. frame-level cross-entropy) and the actual evaluation metric (e.g. segment-level F,,1,, score). In this study, we show experimentally that by employing a simple posterior probability calibration technique on the DNN outputs, the performance of the HMM/DNN workflow can be significantly improved. Specifically, we apply a linear transformation on the activations of the output layer right before using the softmax function, and fine-tune the parameters of this transformation. Out of the calibration approaches tested, we got the best F,,1,, scores when the posterior calibration process was adjusted so as to maximize the actual HMM-based evaluation metric.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroki Mori|AUTHOR Hiroki Mori]]^^1^^, [[Tomohiro Nagata|AUTHOR Tomohiro Nagata]]^^1^^, [[Yoshiko Arimoto|AUTHOR Yoshiko Arimoto]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Utsunomiya University, Japan; ^^2^^Chiba Institute of Technology, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 520–523&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The studies of laughter synthesis are relatively few, and they are still in a preliminary stage. We explored the possibility of applying WaveNet to laughter synthesis. WaveNet is potentially more suitable to model laughter waveforms that do not have a well-established theory of production like speech signals. Conversational laughter was modelled with a spontaneous dialogue speech corpus based on WaveNet. To obtain more stable laughter generation, conditioning WaveNet by power contour was proposed. Experimental results showed that the synthesized laughter by WaveNet was perceived as closer to natural laughter than HMM-based synthesized laughter.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bogdan Ludusan|AUTHOR Bogdan Ludusan]], [[Petra Wagner|AUTHOR Petra Wagner]]
</p><p class="cpabstractcardaffiliationlist">Universität Bielefeld, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 524–528&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Human verbal communication is a complex phenomenon involving dynamics that normally result in the alignment of participants on several modalities, and across various linguistic domains. We examined here whether such dynamics occur also for paralinguistic events, in particular, in the case of laughter. Using a conversational corpus containing dyadic interactions in three languages (French, German and Mandarin Chinese), we investigated three measures of alignment: convergence, synchrony and agreement. Support for convergence and synchrony was found in all three languages, although the level of support varied with the language, while the agreement in laughter type was found to be significant for the German data. The implications of these findings towards a better understanding of the role of laughter in human communication are discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Khiet P. Truong|AUTHOR Khiet P. Truong]]^^1^^, [[Jürgen Trouvain|AUTHOR Jürgen Trouvain]]^^2^^, [[Michel-Pierre Jansen|AUTHOR Michel-Pierre Jansen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universiteit Twente, The Netherlands; ^^2^^Universität des Saarlandes, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 529–533&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although laughter research has gained quite some interest over the past few years, a shared description of how to annotate laughter and its sub-units is still missing. We present a first attempt towards an annotation scheme that contributes to improving the homogeneity and transparency with which laughter is annotated. This includes the integration of respiratory noises as well as stretches of speech-laughs, and to a limited extend to smiled speech and short silent intervals. Inter-annotator agreement is assessed while applying the scheme to different corpora where laughter is evoked through different methods and varying settings. Annotating laughter becomes more complex when the situation in which laughter occurs becomes more spontaneous and social. There is a substantial disagreement among the annotators with respect to temporal alignment (when does a unit start and when does it end) and unit classification, particularly the determination of starts/ends of laughter episodes. In summary, this detailed laughter annotation study reflects the need for better investigations of the various components of laughter.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alice Baird|AUTHOR Alice Baird]]^^1^^, [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]]^^1^^, [[Nicholas Cummins|AUTHOR Nicholas Cummins]]^^1^^, [[Sarah Sturmbauer|AUTHOR Sarah Sturmbauer]]^^2^^, [[Johanna Janson|AUTHOR Johanna Janson]]^^2^^, [[Eva-Maria Messner|AUTHOR Eva-Maria Messner]]^^3^^, [[Harald Baumeister|AUTHOR Harald Baumeister]]^^3^^, [[Nicolas Rohleder|AUTHOR Nicolas Rohleder]]^^2^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^FAU Erlangen-Nürnberg, Germany; ^^3^^Universität Ulm, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 534–538&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The effect of stress on the human body is substantial, potentially resulting in serious health implications. Furthermore, with modern stressors seemingly on the increase, there is an abundance of contributing factors which lead to a diagnosis of acute stress. However, observing biological stress reactions usually includes costly and time consuming sequential fluid-based samples to determine the degree of biological stress. On the contrary, a speech monitoring approach would allow for a non-invasive indication of stress. To evaluate the efficacy of the speech signal as a marker of stress, we explored, for the first time, the relationship between sequential cortisol samples and speech-based features. Utilising a novel corpus of 43 individuals undergoing a standardised Trier Social Stress Test (TSST), we extract a variety of feature sets and observe a correlation between speech and sequential cortisol measurements. For prediction of mean cortisol levels from speech, results show that for the entire TSST oral presentation, handcrafted COMPARE features achieve best results of 0.244 root mean square error [0 ;1] for the sample 20 minutes after the TSST. Correlation also increases at minute 20, with a Spearman’s correlation coefficient of 0.421, and Cohen’s d of 0.883 between the baseline and minute 20 cortisol predictions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ching-Ting Chang|AUTHOR Ching-Ting Chang]], [[Shun-Po Chuang|AUTHOR Shun-Po Chuang]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 554–558&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching is about dealing with alternative languages in speech or text. It is partially speaker-dependent and domain-related, so completely explaining the phenomenon by linguistic rules is challenging. Compared to most monolingual tasks, insufficient data is an issue for code-switching. To mitigate the issue without expensive human annotation, we proposed an unsupervised method for code-switching data augmentation. By utilizing a generative adversarial network, we can generate intra-sentential code-switching sentences from monolingual sentences. We applied the proposed method on two corpora, and the result shows that the generated code-switching sentences improve the performance of code-switching language models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongyin Luo|AUTHOR Hongyin Luo]]^^1^^, [[Mitra Mohtarami|AUTHOR Mitra Mohtarami]]^^1^^, [[James Glass|AUTHOR James Glass]]^^1^^, [[Karthik Krishnamurthy|AUTHOR Karthik Krishnamurthy]]^^2^^, [[Brigitte Richardson|AUTHOR Brigitte Richardson]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^Ford, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 599–603&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Traditional video question answering models have been designed to retrieve videos to answer input questions. A drawback of this scenario is that users have to watch the entire video to find their desired answer. Recent work presented unsupervised neural models with attention mechanisms to find moments or segments from retrieved videos to provide accurate answers to input questions. Although these two tasks look similar, the latter is more challenging because the former task only needs to judge whether the question is answered in a video and returns the entire video, while the latter is expected to judge which moment within a video matches the question and accurately returns a segment of the video. Moreover, there is a lack of labeled data for training moment detection models. In this paper, we focus on integrating video retrieval and moment detection in a unified corpus. We further develop two models — a self-attention convolutional network and a memory network — for the tasks. Experimental results on our corpus show that the neural models can accurately detect and retrieve moments in supervised settings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Moritz Meier|AUTHOR Moritz Meier]], [[Celeste Mason|AUTHOR Celeste Mason]], [[Felix Putze|AUTHOR Felix Putze]], [[Tanja Schultz|AUTHOR Tanja Schultz]]
</p><p class="cpabstractcardaffiliationlist">Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 559–563&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe our efforts to compare data collection methods using two think-aloud protocols in preparation to be used as a basis for automatic structuring and labeling of a large database of high-dimensional human activities data into a valuable resource for research in cognitive robotics. The envisioned dataset, currently in development, will contain synchronously recorded multimodal data, including audio, video, and biosignals (eye-tracking, motion-tracking, muscle and brain activity) from about 100 participants performing everyday activities while describing their task through use of think-aloud protocols. This paper provides details of our pilot recordings in the well-established and scalable “table setting scenario,” describes the concurrent and retrospective think-aloud protocols used, the methods used to analyze them, and compares their potential impact on the data collected as well as the automatic data segmentation and structuring process.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Doug Beeferman|AUTHOR Doug Beeferman]], [[William Brannon|AUTHOR William Brannon]], [[Deb Roy|AUTHOR Deb Roy]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 564–568&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce RadioTalk, a corpus of speech recognition transcripts sampled from talk radio broadcasts in the United States between October of 2018 and March of 2019. The corpus is intended for use by researchers in the fields of natural language processing, conversational analysis, and the social sciences. The corpus encompasses approximately 2.8 billion words of automatically transcribed speech from 284,000 hours of radio, together with metadata about the speech, such as geographical location, speaker turn boundaries, gender, and radio program information. In this paper we summarize why and how we prepared the corpus, give some descriptive statistics on stations, shows and speakers, and carry out a few high-level analyses.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Salima Mdhaffar|AUTHOR Salima Mdhaffar]]^^1^^, [[Yannick Estève|AUTHOR Yannick Estève]]^^2^^, [[Nicolas Hernandez|AUTHOR Nicolas Hernandez]]^^3^^, [[Antoine Laurent|AUTHOR Antoine Laurent]]^^1^^, [[Richard Dufour|AUTHOR Richard Dufour]]^^2^^, [[Solen Quiniou|AUTHOR Solen Quiniou]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIUM (EA 4023), France; ^^2^^LIA (EA 4128), France; ^^3^^LS2N (UMR 6004), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 569–573&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Lectures are usually known to be highly specialised in that they deal with multiple and domain specific topics. This context is challenging for Automatic Speech Recognition (ASR) systems since they are sensitive to topic variability. Language Model (LM) adaptation is a commonly used technique to address the mismatch problem between training and test data. In this paper, we are interested in a qualitative analysis in order to relevantly compare the accuracy of the LM adaptation. While word error rate is the most common metric used to evaluate ASR systems, we consider that this metric cannot provide accurate information. Consequently, we explore the use of other metrics based on individual word error rate, indexability, and capability of building relevant requests for information retrieval from the ASR outputs. Experiments are carried out on the PASTEL corpus, a new dataset in French language, composed of lecture recordings, manual chaptering, manual transcriptions, and slides. While an adapted LM allows us to reduce the global classical word error rate by 15.62% in relative, we show that this reduction reaches 44.2% when computed on relevant words only. These observations are confirmed with the high LM adaptation gains obtained with indexability and information retrieval metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Federico Marinelli|AUTHOR Federico Marinelli]]^^1^^, [[Alessandra Cervone|AUTHOR Alessandra Cervone]]^^1^^, [[Giuliano Tortoreto|AUTHOR Giuliano Tortoreto]]^^2^^, [[Evgeny A. Stepanov|AUTHOR Evgeny A. Stepanov]]^^2^^, [[Giuseppe Di Fabbrizio|AUTHOR Giuseppe Di Fabbrizio]]^^3^^, [[Giuseppe Riccardi|AUTHOR Giuseppe Riccardi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Trento, Italy; ^^2^^VUI, Italy; ^^3^^VUI, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 574–578&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Natural Language Understanding (NLU) models are typically trained in a supervised learning framework. In the case of intent classification, the predicted labels are predefined and based on the designed annotation schema while the labeling process is based on a laborious task where annotators manually inspect each utterance and assign the corresponding label. We propose an Active Annotation (AA) approach where we combine an unsupervised learning method in the embedding space, a human-in-the-loop verification process, and linguistic insights to create lexicons that can be open categories and adapted over time. In particular, annotators define the y-label space on-the-fly during the annotation using an iterative process and without the need for prior knowledge about the input data. We evaluate the proposed annotation paradigm in a real use-case NLU scenario. Results show that our Active Annotation paradigm achieves accurate and higher quality training data, with an annotation speed of an order of magnitude higher with respect to the traditional human-only driven baseline annotation methodology.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gerardo Roa Dabike|AUTHOR Gerardo Roa Dabike]], [[Jon Barker|AUTHOR Jon Barker]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 579–583&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic sung speech recognition is a relatively understudied topic that has been held back by a lack of large and freely available datasets. This has recently changed thanks to the release of the DAMP Sing! dataset, a 1100 hour karaoke dataset originating from the social music-making company, Smule. This paper presents work undertaken to define an easily replicable, automatic speech recognition benchmark for this data. In particular, we describe how transcripts and alignments have been recovered from Karaoke prompts and timings; how suitable training, development and test sets have been defined with varying degrees of accent variability; and how language models have been developed using lyric data from the LyricWikia website. Initial recognition experiments have been performed using factored-layer TDNN acoustic models with lattice-free MMI training using Kaldi. The best WER is 19.60% — a new state-of-the-art for this type of data. The paper concludes with a discussion of the many challenging problems that remain to be solved. Dataset definitions and Kaldi scripts have been made available so that the benchmark is easily replicable.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 584–588&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose to detect mismatches between speech and transcriptions using deep neural networks. Although it is generally assumed there are no mismatches in some speech related applications, it is hard to avoid the errors due to one reason or another. Moreover, the use of mismatched data probably leads to performance reduction when training a model. In our work, instead of detecting the errors by computing the distance between manual transcriptions and text strings obtained using a speech recogniser, we view mismatch detection as a classification task and merge speech and transcription features using deep neural networks. To enhance detection ability, we use cross-modal attention mechanism in our approach by learning the relevance between the features obtained from the two modalities. To evaluate the effectiveness of our approach, we test it on Factored WSJCAM0 by randomly setting three kinds of mismatch, word deletion, insertion or substitution. To test its robustness, we train our models using a small number of samples and detect mismatch with different number of words being removed, inserted, and substituted. In our experiments, the results show the use of our approach for mismatch detection is close to 80% on insertion and deletion and outperforms the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jazmín Vidal|AUTHOR Jazmín Vidal]], [[Luciana Ferrer|AUTHOR Luciana Ferrer]], [[Leonardo Brambilla|AUTHOR Leonardo Brambilla]]
</p><p class="cpabstractcardaffiliationlist">UBA, Argentina</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 589–593&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we describe the methodology for collecting and annotating a new database designed for conducting research and development on pronunciation assessment. While a significant amount of research has been done in the area of pronunciation assessment, to our knowledge, no database is available for public use for research in the field. Considering this need, we created EpaDB (English Pronunciation by Argentinians Database), which is composed of English phrases read by native Spanish speakers with different levels of English proficiency. The recordings are annotated with ratings of pronunciation quality at phrase-level and detailed phonetic alignments and transcriptions indicating which phones were actually pronounced by the speakers. We present inter-rater agreement, the effect of each phone on overall perceived non-nativeness, and the frequency of specific pronunciation errors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Katrin Angerbauer|AUTHOR Katrin Angerbauer]], [[Heike Adel|AUTHOR Heike Adel]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]
</p><p class="cpabstractcardaffiliationlist">Universität Stuttgart, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 594–598&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Understanding spoken language can be impeded through factors like noisy environments, hearing impairments or lack of proficiency. Subtitles can help in those cases. However, for fast speech or limited screen size, it might be advantageous to compress the subtitles to their most relevant content. Therefore, we address automatic sentence compression in this paper. We propose a neural network model based on an encoder-decoder approach with the possibility of integrating the desired compression ratio. Using this model, we conduct a user study to investigate the effects of compressed subtitles on user experience. Our results show that compressed subtitles can suffice for comprehension but may pose additional cognitive load.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sarah E. Gutz|AUTHOR Sarah E. Gutz]]^^1^^, [[Jun Wang|AUTHOR Jun Wang]]^^2^^, [[Yana Yunusova|AUTHOR Yana Yunusova]]^^3^^, [[Jordan R. Green|AUTHOR Jordan R. Green]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MGH Institute of Health Professions, USA; ^^2^^University of Texas at Austin, USA; ^^3^^University of Toronto, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 604–608&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We used a machine learning (ML) approach to detect bulbar amyotrophic lateral sclerosis (ALS) prior to the onset of overt speech symptoms. The dataset included speech samples from 123 participants who were stratified by sex and into three groups: healthy controls, ALS symptomatic, and ALS presymptomatic. We compared models trained on three group pairs (symptomatic-control, presymptomatic-control, and all ALS-control participants). Using acoustic features obtained with the OpenSMILE ComParE13 configuration, we tested several feature filtering techniques. ML classification was achieved using an SVM model and leave-one-out cross-validation. The most successful model, which was trained on symptomatic-control data, yielded an AUC=0.99 for females and AUC=0.91 for males. Models trained on all ALS-control participants had high diagnostic accuracy for classifying symptomatic and presymptomatic ALS participants (females: AUC=0.85; males: AUC=0.91). Additionally, probabilities from these models correlated with speaking rate (females: Spearman coefficient=-0.60, p<0.001; males: Spearman coefficient=-0.43, p<0.001) and intelligible speaking rate (females: Spearman coefficient=-0.65, p<0.001; males: Spearman coefficient=-0.40, p<0.01), indicating their possible use as a severity index of bulbar motor involvement in ALS. These results highlight the importance of stratifying patients by speech severity when testing diagnostic models and demonstrate the potential of ML classification in early detection and progress monitoring of ALS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lukas Mateju|AUTHOR Lukas Mateju]], [[Petr Cerva|AUTHOR Petr Cerva]], [[Jindrich Zdansky|AUTHOR Jindrich Zdansky]]
</p><p class="cpabstractcardaffiliationlist">Technical University of Liberec, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 649–653&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a new approach to speaker change point (SCP) detection is presented. This method is suitable for online applications (e.g., real-time broadcast monitoring). It is designed in a series of consecutive experiments, aiming at quality of detection as well as low latency. The resulting scheme utilizes a convolution neural network (CNN), whose output is smoothed by a decoder. The CNN is trained using data complemented by artificial examples to reduce different types of errors, and the decoder is based on a weighted finite state transducer (WFST) with the forced length of the transition model. Results obtained on data taken from the COST278 database show that our online approach yields results comparable with an offline multi-pass LIUM toolkit while operating online with a low latency.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenyu Tang|AUTHOR Zhenyu Tang]], [[John D. Kanu|AUTHOR John D. Kanu]], [[Kevin Hogan|AUTHOR Kevin Hogan]], [[Dinesh Manocha|AUTHOR Dinesh Manocha]]
</p><p class="cpabstractcardaffiliationlist">University of Maryland at College Park, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 654–658&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel learning-based approach to estimate the direction-of-arrival (DOA) of a sound source using a convolutional recurrent neural network (CRNN) trained via regression on synthetic data and Cartesian labels. We also describe an improved method to generate synthetic data to train the neural network using state-of-the-art sound propagation algorithms that model specular as well as diffuse reflections of sound. We compare our model against three other CRNNs trained using different formulations of the same problem: classification on categorical labels, and regression on spherical coordinate labels. In practice, our model achieves up to 43% decrease in angular error over prior methods. The use of diffuse reflection results in 34% and 41% reduction in angular prediction errors on LOCATA and SOFA datasets, respectively, over prior methods based on image-source methods. Our method results in an additional 3% error reduction over prior schemes that use classification networks, and we use 36% fewer network parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohamed Ismail Yasar Arafath K.|AUTHOR Mohamed Ismail Yasar Arafath K.]], [[Aurobinda Routray|AUTHOR Aurobinda Routray]]
</p><p class="cpabstractcardaffiliationlist">IIT Kharagpur, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 609–613&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/2434" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Breath detection during speech has broad applications ranging from emotion recognition to detection of diseases. Most of the breath detection equipment are contact based. In the proposed method, we use a voice activity detector (VAD) to find the non-speech region and searches the breath only in this region since breath is a non-speech activity. This reduces the execution time. A support vector machine (SVM) classifier is used with radial basis function (RBF) kernel trained on the cepstrogram feature to detect the breaths in the non-speech regions. The classifier output is post-processed to join breathing segments which are closely spaced and remove small duration breaths. Speech breathing rate is calculated as the ratio of the number of breaths to the time between the first and last breath. The algorithm is tested on a student evaluation database. The algorithm yields an F1 Score of 94% and root mean square error (RMSE) of 7.08 breaths/min for the speech-breathing rate. The output has been validated using thermal videos. The breaths have been classified as full and partial detection based on the Intersection over Union (IOU). The algorithm is also tested on some news channel reports which gave a minimum F1 Score of 73%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 614–618&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic scene classification identifies an input segment into one of the pre-defined classes using spectral information. The spectral information of acoustic scenes may not be mutually exclusive due to common acoustic properties across different classes, such as babble noises included in both airports and shopping malls. However, conventional training procedure based on one-hot labels does not consider the similarities between different acoustic scenes. We exploit teacher-student learning with the purpose to derive soft-labels that consider common acoustic properties among different acoustic scenes. In teacher-student learning, the teacher network produces soft-labels, based on which the student network is trained. We investigate various methods to extract soft-labels that better represent similarities across different scenes. Such attempts include extracting soft-labels from multiple audio segments that are defined as an identical acoustic scene. Experimental results demonstrate the potential of our approach, showing a classification accuracy of 77.36% on the DCASE 2018 task 1 validation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yanping Chen|AUTHOR Yanping Chen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]
</p><p class="cpabstractcardaffiliationlist">Samsung, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 619–623&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>There is an increasing interest in smart environment and a growing adoption of smart devices. Smart assistants such as Google Home and Amazon Alexa, although focus on speech, could be extended to identify domestic events in real-time to provide more and better smart functions. Sound event detection aims to detect multiple target sound events that may happen simultaneously. The task is challenging due to the overlapping of sound events, the highly imbalanced nature of target and non-target data, and the complicated real-world background noise. In this paper, we proposed a unified approach that takes advantages of both the deep learning and data augmentation. A convolutional neural network (CNN) was combined with a feed-forward neural network (FNN) to improve the detection performance, and a dynamic time warping based data augmentation (DA) method was proposed to address the data imbalance problem. Experiments on several datasets showed a more than 7% increase in accuracy compared to the state-of-the-art approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 624–628&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech and singing are different in many ways. In this work, we propose a novel method to align phonetically identical spoken lyric with a singing vocal in a speech-singing parallel corpus, that is needed in speech-to-singing conversion. We attempt to align speech to singing vocal using a combination of model-based forced alignment and feature-based dynamic time warping (DTW). We first obtain the word boundaries of speech and singing vocals with forced alignment using speech and singing adapted acoustic models, respectively. We consider that speech acoustic models are more accurate than singing acoustic models, therefore, boundaries of spoken words are more accurate than sung words. By searching in the neighborhood of the sung word boundaries in the singing vocal, we hope to improve the alignment between spoken words and sung words. Considering the word boundaries as landmark, we perform speech-to-singing alignment at frame-level using DTW. The proposed method is able to achieve a 47.5% reduction in terms of word boundary error over the baseline, and subsequent improvement of singing quality in a speech-to-singing conversion system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yosi Shrem|AUTHOR Yosi Shrem]]^^1^^, [[Matthew Goldrick|AUTHOR Matthew Goldrick]]^^2^^, [[Joseph Keshet|AUTHOR Joseph Keshet]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Bar-Ilan University, Israel; ^^2^^Northwestern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 629–633&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice Onset Time (VOT), a key measurement of speech for basic research and applied medical studies, is the time between the onset of a stop burst and the onset of voicing. When the voicing onset precedes burst onset the VOT is negative; if voicing onset follows the burst, it is positive. In this work, we present a deep-learning model for accurate and reliable measurement of VOT in naturalistic speech. The proposed system addresses two critical issues: it can measure positive and negative VOT equally well, and it is trained to be robust to variation across annotations. Our approach is based on the structured prediction framework, where the feature functions are defined to be RNNs. These learn to capture segmental variation in the signal. Results suggest that our method substantially improves over the current state-of-the-art. In contrast to previous work, our Deep and Robust VOT annotator, Dr.VOT, can successfully estimate negative VOTs while maintaining state-of-the-art performance on positive VOTs. This high level of performance generalizes to new corpora without further retraining.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[J. Hui|AUTHOR J. Hui]], [[Y. Wei|AUTHOR Y. Wei]], [[S.T. Chen|AUTHOR S.T. Chen]], [[R.H.Y. So|AUTHOR R.H.Y. So]]
</p><p class="cpabstractcardaffiliationlist">HKUST, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 634–638&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Base-frequencies (F0) and spectral envelopes play an important role in speech separation and recognition by humans. Two experiments were conducted to study how trained networks for multi-speaker speech separation/recognition are affected by difference of F0 and spectral envelopes between source signals. The first experiment examined the effects of natural F0/envelope on the performance of speech separation. Results showed that when the two target signals differed in F0 by ±3 semitones or more or differed in the envelope by a scaling factor larger than 1.08 or less than 0.92, separation performance improved significantly. This is consistent with human listeners and is the first finding for deep learning-network (DNN) models. The second experiment tested the effect of F0/envelope difference on multi-speaker automatic speech recognition(ASR) system’s performance. Results showed that multi-speaker recognition result also significantly rely on F0/envelope differences. The overall results indicated that the dependency of the existing automatic systems on monaural cues is similar to that of human, while automatic systems still perform inferior than human on same tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nirmesh J. Shah|AUTHOR Nirmesh J. Shah]], [[Hemant A. Patil|AUTHOR Hemant A. Patil]]
</p><p class="cpabstractcardaffiliationlist">DA-IICT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 639–643&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Nearest Neighbor (NN)-based alignment techniques are popular in non-parallel Voice Conversion (VC). The performance of NN-based alignment improves with the information about phone boundary. However, estimating the exact phone boundary is a challenging task. If text corresponding to the utterance is available, the Hidden Markov Model (HMM) can be used to identify the phone boundaries. However, it requires a large amount of training data that is difficult to collect in realistic VC scenarios. Hence, we propose to exploit a Spectral Transition Measure (STM)-based alignment technique that does not require apriori training data. The idea behind STM is that neurons in the auditory or visual cortex respond strongly to the  transitional stimuli compared to the steady-state stimuli. The phone boundaries estimated using the STM algorithm are then applied to the NN technique to obtain the aligned spectral features of the source and target speakers. Proposed STM+NN alignment technique is giving on an average 13.67% relative improvement in phonetic accuracy (PA) compared to the NN-based alignment technique. The improvement in %PA after alignment has positively reflected in the better performance in terms of speech quality and speaker similarity (in particular, a relative improvement of 13.63% and 13.26% , respectively) of the converted voice.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 644–648&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel approach for blind syllable segmentation that combines model-based feature selection with data-driven classification. In particular, we learn a function that maps short-term energy peaks of a speech utterance onto either the vowel or consonant class. The features used for classification capture spectral and energy signatures which are characteristic of the phonetic properties of the English language. The identified vowel peaks subsequently act as the nucleus of our syllable segments. We demonstrate the effectiveness of our proposed method using nested cross validation on 400 unique test utterances taken randomly from the TIMIT dataset containing over 5000 syllables in total. Our hybrid approach achieves lower insertion rate than the state-of-the-art segmentation methods and a lower deletion rate than all the baseline comparisons.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dipjyoti Paul|AUTHOR Dipjyoti Paul]]^^1^^, [[Yannis Pantazis|AUTHOR Yannis Pantazis]]^^2^^, [[Yannis Stylianou|AUTHOR Yannis Stylianou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Crete, Greece; ^^2^^FORTH, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 659–663&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we suggest a novel way to train Generative Adversarial Network (GAN) for the purpose of non-parallel, many-to-many voice conversion. The goal of voice conversion (VC) is to transform speech from a source speaker to that of a target speaker without changing the phonetic contents. Based on ideas from Game Theory, we suggest to multiply the gradient of the Generator with suitable weights. Weights are calculated so that they increase the power of fake samples that fool the Discriminator resulting in a stronger Generator. Motivated by a recently presented GAN based approach for VC, StarGAN-VC, we suggest a variation to StarGAN, referred to as Weighted StarGAN (WeStarGAN). The experiments are conducted on standard CMU ARCTIC database. WeStarGAN-VC approach achieves significantly better relative performance and is clearly preferred over recently proposed StarGAN-VC method in terms of speech subjective quality and speaker similarity with 75% and 65% preference scores, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seyed Hamidreza Mohammadi|AUTHOR Seyed Hamidreza Mohammadi]], [[Taehwan Kim|AUTHOR Taehwan Kim]]
</p><p class="cpabstractcardaffiliationlist">ObEN, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 704–708&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose voice conversion model from arbitrary source speaker to arbitrary target speaker with disentangled representations. Voice conversion is a task to convert the voice of spoken utterance of source speaker to that of target speaker. Most prior work require to know either source speaker or target speaker or both in training, with either parallel or non-parallel corpus. Instead, we study the problem of voice conversion in nonparallel speech corpora and one-shot learning setting. We convert an arbitrary sentences of an arbitrary source speaker to target speakers given only one or few target speaker training utterances. To achieve this, we propose to use disentangled representations of speaker identity and linguistic context. We use a recurrent neural network (RNN) encoder for speaker embedding and phonetic posteriorgram as linguistic context encoding, along with a RNN decoder to generate converted utterances. Ours is a simpler model without adversarial training or hierarchical model design and thus more efficient. In the subjective tests, our approach achieved significantly better results compared to baseline regarding similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wen-Chin Huang|AUTHOR Wen-Chin Huang]]^^1^^, [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]]^^2^^, [[Chen-Chou Lo|AUTHOR Chen-Chou Lo]]^^1^^, [[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]]^^2^^, [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]]^^2^^, [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]]^^2^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^2^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica, Taiwan; ^^2^^Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 709–713&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we investigate the effectiveness of two techniques for improving variational autoencoder (VAE) based voice conversion (VC). First, we reconsider the relationship between vocoder features extracted using the high quality vocoders adopted in conventional VC systems, and hypothesize that the spectral features are in fact F0 dependent. Such hypothesis implies that during the conversion phase, the latent codes and the converted features in VAE based VC are in fact source F0 dependent. To this end, we propose to utilize the F0 as an additional input of the decoder. The model can learn to disentangle the latent code from the F0 and thus generates converted F0 dependent converted features. Second, to better capture temporal dependencies of the spectral features and the F0 pattern, we replace the frame wise conversion structure in the original VAE based VC framework with a fully convolutional network structure. Our experiments demonstrate that the degree of disentanglement as well as the naturalness of the converted speech are indeed improved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Songxiang Liu|AUTHOR Songxiang Liu]]^^1^^, [[Yuewen Cao|AUTHOR Yuewen Cao]]^^1^^, [[Xixin Wu|AUTHOR Xixin Wu]]^^1^^, [[Lifa Sun|AUTHOR Lifa Sun]]^^2^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^SpeechX, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 714–718&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The N10 system in the Voice Conversion Challenge 2018 (VCC 2018) has achieved high voice conversion (VC) performance in terms of speech naturalness and speaker similarity. We believe that further improvements can be gained from joint optimization (instead of separate optimization) of the conversion model and WaveNet vocoder, as well as leveraging information from the acoustic representation of the speech waveform, e.g. from Mel-spectrograms. In this paper, we propose a VC architecture to jointly train a conversion model that maps phonetic posteriorgrams (PPGs) to Mel-spectrograms and a WaveNet vocoder. The conversion model has a bottle-neck layer, whose outputs are concatenated with PPGs before being fed into the WaveNet vocoder as local conditioning. A weighted sum of a Mel-spectrogram prediction loss and a WaveNet loss is used as the objective function to jointly optimize parameters of the conversion model and the WaveNet vocoder. Objective and subjective evaluation results show that the proposed approach is capable of achieving significantly improved quality in voice conversion in terms of speech naturalness and speaker similarity of the converted speech for both cross-gender and intra-gender conversions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Li-Wei Chen|AUTHOR Li-Wei Chen]]^^1^^, [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan University, Taiwan; ^^2^^Academia Sinica, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 719–723&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper focuses on using voice conversion (VC) to improve the speech intelligibility of surgical patients who have had parts of their articulators removed. Due to the difficulty of data collection, VC without parallel data is highly desired. Although techniques for unparallel VC — for example, CycleGAN — have been developed, they usually focus on transforming the speaker identity, and directly transforming the speech of one speaker to that of another speaker and as such do not address the task here. In this paper, we propose a new approach for unparallel VC. The proposed approach transforms impaired speech to normal speech while preserving the linguistic content and speaker characteristics. To our knowledge, this is the first end-to-end GAN-based unsupervised VC model applied to impaired speech. The experimental results show that the proposed approach outperforms CycleGAN.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shaojin Ding|AUTHOR Shaojin Ding]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]
</p><p class="cpabstractcardaffiliationlist">Texas A&M University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 724–728&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a Group Latent Embedding for Vector Quantized Variational Autoencoders (VQ-VAE) used in nonparallel Voice Conversion (VC). Previous studies have shown that VQ-VAE can generate high-quality VC syntheses when it is paired with a powerful decoder. However, in a conventional VQ-VAE, adjacent atoms in the embedding dictionary can represent entirely different phonetic content. Therefore, the VC syntheses can have mispronunciations and distortions whenever the output of the encoder is quantized to an atom representing entirely different phonetic content. To address this issue, we propose an approach that divides the embedding dictionary into groups and uses the weighted average of atoms in the nearest group as the latent embedding. We conducted both objective and subjective experiments on the non-parallel CSTR VCTK corpus. Results show that the proposed approach significantly improves the acoustic quality of the VC syntheses compared to the traditional VQ-VAE (13.7% relative improvement) while retaining the voice identity of the target speaker.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cory Stephenson|AUTHOR Cory Stephenson]], [[Gokce Keskin|AUTHOR Gokce Keskin]], [[Anil Thomas|AUTHOR Anil Thomas]], [[Oguz H. Elibol|AUTHOR Oguz H. Elibol]]
</p><p class="cpabstractcardaffiliationlist">Intel, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 729–733&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we introduce a semi-supervised approach to the voice conversion problem, in which speech from a source speaker is converted into speech of a target speaker. The proposed method makes use of both parallel and non-parallel utterances from the source and target simultaneously during training. This approach can be used to extend existing parallel data voice conversion systems such that they can be trained with semi-supervision. We show that incorporating semi-supervision improves the voice conversion performance compared to fully supervised training when the number of parallel utterances is limited as in many practical applications. Additionally, we find that increasing the number non-parallel utterances used in training continues to improve performance when the amount of parallel training data is held constant.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ju-chieh Chou|AUTHOR Ju-chieh Chou]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 664–668&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, voice conversion (VC) without parallel data has been successfully adapted to multi-target scenario in which a single model is trained to convert the input voice to many different speakers. However, such model suffers from the limitation that it can only convert the voice to the speakers in the training data, which narrows down the applicable scenario of VC. In this paper, we proposed a novel one-shot VC approach which is able to perform VC by only an example utterance from source and target speaker respectively, and the source and target speaker do not even need to be seen during training. This is achieved by disentangling speaker and content representations with instance normalization (IN). Objective and subjective evaluation shows that our model is able to generate the voice similar to target speaker. In addition to the performance measurement, we also demonstrate that this model is able to learn meaningful speaker representations without any supervision.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hui Lu|AUTHOR Hui Lu]]^^1^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Dongyang Dai|AUTHOR Dongyang Dai]]^^1^^, [[Runnan Li|AUTHOR Runnan Li]]^^1^^, [[Shiyin Kang|AUTHOR Shiyin Kang]]^^2^^, [[Jia Jia|AUTHOR Jia Jia]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 669–673&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Building a voice conversion (VC) system for a new target speaker typically requires a large amount of speech data from the target speaker. This paper investigates a method to build a VC system for arbitrary target speaker using one given utterance without any adaptation training process. Inspired by global style tokens (GSTs), which recently has been shown to be effective in controlling the style of synthetic speech, we propose the use of global speaker embeddings (GSEs) to control the conversion target of the VC system. Speaker-independent phonetic posteriorgrams (PPGs) are employed as the local condition input to a conditional WaveNet synthesizer for waveform generation of the target speaker. Meanwhile, spectrograms are extracted from the given utterance and fed into a reference encoder, the generated reference embedding is then employed as attention query to the GSEs to produce the speaker embedding, which is employed as the global condition input to the WaveNet synthesizer to control the generated waveform’s speaker identity. In experiments, when compared with an adaptation training based any-to-any VC system, the proposed GSEs based VC approach performs equally well or better in both speech naturalness and speaker similarity, with apparently higher flexibility to the comparison.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]]
</p><p class="cpabstractcardaffiliationlist">Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 674–678&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel technique for a non-parallel voice conversion (VC) with the use of cyclic variational auto-encoder (CycleVAE)-based spectral modeling. In a variational autoencoder (VAE) framework, a latent space, usually with a Gaussian prior, is used to encode a set of input features. In a VAE-based VC, the encoded latent features are fed into a decoder, along with speaker-coding features, to generate estimated spectra with either the original speaker identity (reconstructed) or another speaker identity (converted). Due to the non-parallel modeling condition, the converted spectra can not be directly optimized, which heavily degrades the performance of a VAE-based VC. In this work, to overcome this problem, we propose to use CycleVAE-based spectral model that indirectly optimizes the conversion flow by recycling the converted features back into the system to obtain corresponding cyclic reconstructed spectra that can be directly optimized. The cyclic flow can be continued by using the cyclic reconstructed features as input for the next cycle. The experimental results demonstrate the effectiveness of the proposed CycleVAE-based VC, which yields higher accuracy of converted spectra, generates latent features with higher correlation degree, and significantly improves the quality and conversion accuracy of the converted speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuhiro Kaneko|AUTHOR Takuhiro Kaneko]], [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]], [[Kou Tanaka|AUTHOR Kou Tanaka]], [[Nobukatsu Hojo|AUTHOR Nobukatsu Hojo]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 679–683&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Non-parallel multi-domain voice conversion (VC) is a technique for learning mappings among multiple domains without relying on parallel data. This is important but challenging owing to the requirement of learning multiple mappings and the non-availability of explicit supervision. Recently, StarGAN-VC has garnered attention owing to its ability to solve this problem only using a single generator. However, there is still a gap between real and converted speech. To bridge this gap, we rethink conditional methods of StarGAN-VC, which are key components for achieving non-parallel multi-domain VC in a single model, and propose an improved variant called StarGAN-VC2. Particularly, we rethink conditional methods in two aspects: training objectives and network architectures. For the former, we propose a source-and-target conditional adversarial loss that allows all source domain data to be convertible to the target domain data. For the latter, we introduce a modulation-based conditional method that can transform the modulation of the acoustic feature in a domain-specific manner. We evaluated our methods on non-parallel multi-speaker VC. An objective evaluation demonstrates that our proposed methods improve speech quality in terms of both global and local structure measures. Furthermore, a subjective evaluation shows that StarGAN-VC2 outperforms StarGAN-VC in terms of naturalness and speaker similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yusuke Kurita|AUTHOR Yusuke Kurita]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Kazuya Takeda|AUTHOR Kazuya Takeda]], [[Tomoki Toda|AUTHOR Tomoki Toda]]
</p><p class="cpabstractcardaffiliationlist">Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 684–688&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an investigation of the robustness of statistical voice conversion (VC) under noisy environments. To develop various VC applications, such as augmented vocal production and augmented speech production, it is necessary to handle noisy input speech because some background sounds, such as external noise and an accompanying sound, usually exist in a real environment. In this paper, we investigate an impact of the background sounds on the conversion performance in singing voice conversion focusing on two main VC frameworks, 1) vocoder-based VC and 2) vocoder-free VC based on direct waveform modification. We conduct a subjective evaluation on the converted singing voice quality under noisy conditions and reveal that the vocoder-free VC is more robust against background sounds compared with the vocoder-based VC. We also analyze the robustness of statistical VC and show that a kurtosis ratio of power spectral components before and after conversion is useful as an objective metric to evaluate it without using any target reference signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shengkui Zhao|AUTHOR Shengkui Zhao]], [[Trung Hieu Nguyen|AUTHOR Trung Hieu Nguyen]], [[Hao Wang|AUTHOR Hao Wang]], [[Bin Ma|AUTHOR Bin Ma]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 689–693&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a fast learning framework for non-parallel many-to-many voice conversion with residual Star Generative Adversarial Networks (StarGAN). In addition to the state-of-the-art StarGAN-VC approach that learns an unreferenced mapping between a group of speakers’ acoustic features for nonparallel many-to-many voice conversion, our method, which we call Res-StarGAN-VC, presents an enhancement by incorporating a residual mapping. The idea is to leverage on the shared linguistic content between source and target features during conversion. The residual mapping is realized by using identity shortcut connections from the input to the output of the generator in Res-StarGAN-VC. Such shortcut connections accelerate the learning process of the network with no increase of parameters and computational complexity. They also help generate high-quality fake samples at the very beginning of the adversarial training. Experiments and subjective evaluations show that the proposed method offers (1) significantly faster convergence in adversarial training and (2) clearer pronunciations and better speaker similarity of converted speech, compared to the StarGAN-VC baseline on both mono-lingual and cross-lingual many-to-many voice conversion tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lauri Juvela|AUTHOR Lauri Juvela]]^^1^^, [[Bajibabu Bollepalli|AUTHOR Bajibabu Bollepalli]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^2^^, [[Paavo Alku|AUTHOR Paavo Alku]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 694–698&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances in neural network -based text-to-speech have reached human level naturalness in synthetic speech. The present sequence-to-sequence models can directly map text to mel-spectrogram acoustic features, which are convenient for modeling, but present additional challenges for vocoding (i.e., waveform generation from the acoustic features). High-quality synthesis can be achieved with neural vocoders, such as WaveNet, but such autoregressive models suffer from slow sequential inference. Meanwhile, their existing parallel inference counterparts are difficult to train and require increasingly large model sizes. In this paper, we propose an alternative training strategy for a parallel neural vocoder utilizing generative adversarial networks, and integrate a linear predictive synthesis filter into the model. Results show that the proposed model achieves significant improvement in inference speed, while outperforming a WaveNet in copy-synthesis quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryuichi Yamamoto|AUTHOR Ryuichi Yamamoto]]^^1^^, [[Eunwoo Song|AUTHOR Eunwoo Song]]^^2^^, [[Jae-Min Kim|AUTHOR Jae-Min Kim]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LINE, Japan; ^^2^^NAVER, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 699–703&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes an effective probability density distillation (PDD) algorithm for WaveNet-based parallel waveform generation (PWG) systems. Recently proposed teacher-student frameworks in the PWG system have successfully achieved a real-time generation of speech signals. However, the difficulties optimizing the PDD criteria without auxiliary losses result in quality degradation of synthesized speech. To generate more natural speech signals within the teacher-student framework, we propose a novel optimization criterion based on generative adversarial networks (GANs). In the proposed method, the inverse autoregressive flow-based student model is incorporated as a generator in the GAN framework, and jointly optimized by the PDD mechanism with the proposed adversarial learning method. As this process encourages the student to model the distribution of realistic speech waveform, the perceptual quality of the synthesized speech becomes much more natural. Our experimental results verify that the PWG systems with the proposed method outperform both those using conventional approaches, and also autoregressive generation systems with a well-trained teacher WaveNet.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Subhadeep Dey|AUTHOR Subhadeep Dey]]^^1^^, [[Petr Motlicek|AUTHOR Petr Motlicek]]^^1^^, [[Trung Bui|AUTHOR Trung Bui]]^^2^^, [[Franck Dernoncourt|AUTHOR Franck Dernoncourt]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Idiap Research Institute, Switzerland; ^^2^^Adobe, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 734–738&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we explore various approaches for semi-supervised learning in an end-to-end automatic speech recognition (ASR) framework. The first step in our approach involves training a seed model on the limited amount of labelled data. Additional unlabelled speech data is employed through a data-selection mechanism to obtain the best hypothesized output, further used to retrain the seed model. However, uncertainties of the model may not be well captured with a single hypothesis. As opposed to this technique, we apply a dropout mechanism to capture the uncertainty by obtaining multiple hypothesized text transcripts of an speech recording. We assume that the diversity of automatically generated transcripts for an utterance will implicitly increase the reliability of the model. Finally, the data-selection process is also applied on these hypothesized transcripts to reduce the uncertainty. Experiments on freely-available TEDLIUM corpus and proprietary Adobe’s internal dataset show that the proposed approach significantly reduces ASR errors, compared to the baseline model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abhinav Jain|AUTHOR Abhinav Jain]], [[Vishwanath P. Singh|AUTHOR Vishwanath P. Singh]], [[Shakti P. Rath|AUTHOR Shakti P. Rath]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 779–783&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A major challenge in Automatic Speech Recognition(ASR) systems is to handle speech from a diverse set of accents. A model trained using a single accent performs rather poorly when confronted with different accents. One of the solutions is a multi-condition model trained on all the accents. However the performance improvement in this approach might be rather limited. Otherwise, accent-specific models might be trained but they become impractical as number of accents increases. In this paper, we propose a novel acoustic model architecture based on Mixture of Experts (MoE) which works well on multiple accents without having the overhead of training separate models for separate accents. The work is based on our earlier work, termed as MixNet, where we showed performance improvement by separation of phonetic class distributions in the feature space. In this paper, we propose an architecture that helps to compensate phonetic and accent variabilities which helps in even better discrimination among the classes. These variabilities are learned in a joint frame-work, and produce consistent improvements over all the individual accents, amounting to an overall 18% relative improvement in accuracy compared to baseline trained in multi-condition style.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joel Shor|AUTHOR Joel Shor]]^^1^^, [[Dotan Emanuel|AUTHOR Dotan Emanuel]]^^1^^, [[Oran Lang|AUTHOR Oran Lang]]^^1^^, [[Omry Tuval|AUTHOR Omry Tuval]]^^1^^, [[Michael Brenner|AUTHOR Michael Brenner]]^^1^^, [[Julie Cattiau|AUTHOR Julie Cattiau]]^^1^^, [[Fernando Vieira|AUTHOR Fernando Vieira]]^^2^^, [[Maeve McNally|AUTHOR Maeve McNally]]^^2^^, [[Taylor Charbonneau|AUTHOR Taylor Charbonneau]]^^2^^, [[Melissa Nollstadt|AUTHOR Melissa Nollstadt]]^^2^^, [[Avinatan Hassidim|AUTHOR Avinatan Hassidim]]^^1^^, [[Yossi Matias|AUTHOR Yossi Matias]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^ALS TDI, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 784–788&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) systems have dramatically improved over the last few years. ASR systems are most often trained from ‘typical’ speech, which means that underrepresented groups don’t experience the same level of improvement. In this paper, we present and evaluate finetuning techniques to improve ASR for users with non-standard speech. We focus on two types of non-standard speech: speech from people with amyotrophic lateral sclerosis (ALS) and accented speech. We train personalized models that achieve 62% and 35% relative WER improvement on these two groups, bringing the absolute WER for ALS speakers, on a test set of message bank phrases, down to 10% for mild dysarthria and 20% for more serious dysarthria. We show that 71% of the improvement comes from only 5 minutes of training data. Finetuning a particular subset of layers (with many fewer parameters) often gives better results than finetuning the entire model. This is the first step towards building state of the art ASR models for dysarthric speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chanwoo Kim|AUTHOR Chanwoo Kim]], [[Minkyu Shin|AUTHOR Minkyu Shin]], [[Abhinav Garg|AUTHOR Abhinav Garg]], [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 739–743&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present an improved vocal tract length perturbation (VTLP) algorithm as a data augmentation technique. VTLP is usually accomplished by adjusting the center frequencies of mel filterbank in [1]. Compared to the conventional approach, we re-synthesize waveforms from the frequency-warped spectra using overlap and addition (OLA). This approach had two advantages: First, we can apply an “acoustic simulator” [2, 3] after performing the VTLP-based frequency warping. Second, we may use a different window length for frequency warping from that used in feature processing. We observe that the best performance was obtained when the warping coefficient distribution is between 0.8 and 1.2, and the window length is 50 ms. We obtained 3.66% WER and 12.39% WER on the Librispeech test-clean and test-other using an attention-based end-to-end speech recognition system without using any Language Models (LMs). Using the shallow-fusion technique with a Transformer LM, we achieved 2.44% WER and 8.29% WER on the Librispeech test-clean and test-other sets. To the best of our knowledge, the 2.44% WER on the test-clean is the best result ever reported on this test set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Han Zhu|AUTHOR Han Zhu]], [[Li Wang|AUTHOR Li Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 744–748&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When only a limited amount of accented speech data is available, to promote multi-accent speech recognition performance, the conventional approach is accent-specific adaptation, which adapts the baseline model to multiple target accents independently. To simplify the adaptation procedure, we explore adapting the baseline model to multiple target accents simultaneously with multi-accent mixed data. Thus, we propose using accent-specific top layer with gate mechanism (AST-G) to realize multi-accent adaptation. Compared with the baseline model and accent-specific adaptation, AST-G achieves 9.8% and 1.9% average relative WER reduction respectively. However, in real-world applications, we can’t obtain the accent category label for inference in advance. Therefore, we apply using an accent classifier to predict the accent label. To jointly train the acoustic model and the accent classifier, we propose the multi-task learning with gate mechanism (MTL-G). As the accent label prediction could be inaccurate, it performs worse than the accent-specific adaptation. Yet, in comparison with the baseline model, MTL-G achieves 5.1% average relative WER reduction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pengcheng Guo|AUTHOR Pengcheng Guo]], [[Sining Sun|AUTHOR Sining Sun]], [[Lei Xie|AUTHOR Lei Xie]]
</p><p class="cpabstractcardaffiliationlist">Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 749–753&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent adversarial methods proposed for unsupervised domain adaptation of acoustic models try to fool a specific domain discriminator and learn both senone-discriminative and domain-invariant hidden feature representations. However, a drawback of these approaches is that the feature generator simply aligns different features into the same distribution without considering the class boundaries of the target domain data. Thus, ambiguous target domain features can be generated near the decision boundaries, decreasing speech recognition performance. In this study, we propose to use Adversarial Dropout Regularization (ADR) in acoustic modeling to overcome the foregoing issue. Specifically, we optimize the senone classifier to make its decision boundaries lie in the class boundaries of unlabeled target data. Then, the feature generator learns to create features far away from the decision boundaries, which are more discriminative. We apply the ADR approach on the CHiME-3 corpus and the proposed method yields up to 12.9% relative WER reductions compared with the baseline trained on source domain data only and further improvement over the widely used gradient reversal layer method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Markus Kitza|AUTHOR Markus Kitza]], [[Pavel Golik|AUTHOR Pavel Golik]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 754–758&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper addresses the robust speech recognition problem as an adaptation task. Specifically, we investigate the cumulative application of adaptation methods. A bidirectional Long Short-Term Memory (BLSTM) based neural network, capable of learning temporal relationships and translation invariant representations, is used for robust acoustic modeling. Further, i-vectors were used as an input to the neural network to perform instantaneous speaker and environment adaptation, providing 8% relative improvement in word error rate on the NIST Hub5 2000 evaluation testset. By enhancing the first-pass i-vector based adaptation with a second-pass adaptation using speaker and environment dependent transformations within the network, a further relative improvement of 5% in word error rate was achieved. We have reevaluated the features used to estimate i-vectors and their normalization to achieve the best performance in a modern large scale automatic speech recognition system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xurong Xie|AUTHOR Xurong Xie]]^^1^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Tan Lee|AUTHOR Tan Lee]]^^1^^, [[Lan Wang|AUTHOR Lan Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 759–763&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker adaptation techniques play a key role in reducing the mismatch between automatic speech recognition (ASR) systems and target users. Deep neural network (DNN) acoustic model adaptation by learning speaker-dependent hidden unit contributions (LHUC) scaling vectors has been widely used. The standard LHUC method not only requires multiple decoding passes in test time but also a substantial amount of adaptation data for robust parameter estimation. In order to address the issues, an efficient method of predicting and compressing the LHUC scaling vectors directly from acoustic features using a time-delay DNN (TDNN) and an online averaging layer is proposed in this paper. The resulting LHUC vectors are then used as auxiliary features to adapt DNN acoustic models. Experiments conducted on a 300-hour Switchboard corpus showed that the DNN and TDNN systems using the proposed predicted LHUC features consistently outperformed the corresponding baseline systems by up to about 9% relative reductions of word error rate. Being combined with i-Vector based adaptation, the LHUC feature adapted TDNN systems demonstrated consistent improvement over comparable i-Vector adapted TDNN system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Emiru Tsunoo|AUTHOR Emiru Tsunoo]], [[Yosuke Kashiwagi|AUTHOR Yosuke Kashiwagi]], [[Satoshi Asakawa|AUTHOR Satoshi Asakawa]], [[Toshiyuki Kumakura|AUTHOR Toshiyuki Kumakura]]
</p><p class="cpabstractcardaffiliationlist">Sony, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 764–768&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An on-device DNN-HMM speech recognition system efficiently works with a limited vocabulary in the presence of a variety of predictable noise. In such a case, vocabulary and environment adaptation is highly effective. In this paper, we propose a novel method of end-to-end (E2E) adaptation, which adjusts not only an acoustic model (AM) but also a weighted finite-state transducer (WFST). We convert a pretrained WFST to a trainable neural network and adapt the system to target environments/vocabulary by E2E joint training with an AM. We replicate Viterbi decoding with forward-backward neural network computation, which is similar to recurrent neural networks (RNNs). By pooling output score sequences, a vocabulary posterior for each utterance is obtained and used for discriminative loss computation. Experiments using 2–10 hours of English/Japanese adaptation datasets indicate that the fine-tuning of only WFSTs and that of only AMs are both comparable to a state-of-the-art adaptation method, and E2E joint training of the two components achieves the best recognition performance. We also adapt each language system to the other language using the adaptation data, and the results show that the proposed method also works well for language adaptations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Leda Sarı|AUTHOR Leda Sarı]]^^1^^, [[Samuel Thomas|AUTHOR Samuel Thomas]]^^2^^, [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Illinois at Urbana-Champaign, USA; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 769–773&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we present an unsupervised long short-term memory (LSTM) layer normalization technique that we call adaptation by speaker aware offsets (ASAO). These offsets are learned using an auxiliary network attached to the main senone classifier. The auxiliary network takes main network LSTM activations as input and tries to reconstruct speaker, (speaker,phone) and (speaker,senone)-level averages of the activations by minimizing the mean-squared error. Once the auxiliary network is jointly trained with the main network, during test time we do not need additional information for the test data as the network will generate the offset itself. Unlike many speaker adaptation studies which only adapt fully connected layers, our method is applicable to LSTM layers in addition to fully-connected layers. In our experiments, we investigate the effect of ASAO of LSTM layers at different depths. We also show its performance when the inputs are already speaker adapted by feature space maximum likelihood linear regression (fMLLR). In addition, we compare ASAO with a speaker adversarial training framework. ASAO achieves higher senone classification accuracy and lower word error rate (WER) than both the unadapted models and the adversarial model on the HUB4 dataset, with an absolute WER reduction of up to 2%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Khe Chai Sim|AUTHOR Khe Chai Sim]], [[Petr Zadrazil|AUTHOR Petr Zadrazil]], [[Françoise Beaufays|AUTHOR Françoise Beaufays]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 774–778&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker-independent speech recognition systems trained with data from many users are generally robust against speaker variability and work well for a large population of speakers. However, these systems do not always generalize well for users with very different speech characteristics. This issue can be addressed by building personalized systems that are designed to work well for each specific user. In this paper, we investigate the idea of securely training personalized end-to-end speech recognition models on mobile devices so that user data and models never leave the device and are never stored on a server. We study how the mobile training environment impacts performance by simulating on-device data consumption. We conduct experiments using data collected from speech impaired users for personalization. Our results show that personalization achieved 63.7% relative word error rate reduction when trained in a server environment and 58.1% in a mobile environment. Moving to on-device personalization resulted in 18.7% performance degradation, in exchange for improved scalability and data privacy. To train the model on device, we split the gradient computation into two and achieved 45% memory reduction at the expense of 42% increase in training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Denis Peskov|AUTHOR Denis Peskov]]^^1^^, [[Joe Barrow|AUTHOR Joe Barrow]]^^1^^, [[Pedro Rodriguez|AUTHOR Pedro Rodriguez]]^^1^^, [[Graham Neubig|AUTHOR Graham Neubig]]^^2^^, [[Jordan Boyd-Graber|AUTHOR Jordan Boyd-Graber]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Maryland at College Park, USA; ^^2^^Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 789–793&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Natural language processing systems are often downstream of unreliable inputs: machine translation, optical character recognition, or speech recognition. For instance, virtual assistants can only answer your questions after understanding your speech. We investigate and mitigate the effects of noise from Automatic Speech Recognition systems on two factoid Question Answering ( qa) tasks. Integrating confidences into the model and forced decoding of unknown words are empirically shown to improve the accuracy of downstream neural  qa systems. We create and train models on a synthetic corpus of over 500,000 noisy sentences and evaluate on two human corpora from Quizbowl and Jeopardy! competitions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryo Masumura|AUTHOR Ryo Masumura]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Atsushi Ando|AUTHOR Atsushi Ando]], [[Hosana Kamiyama|AUTHOR Hosana Kamiyama]], [[Takanobu Oba|AUTHOR Takanobu Oba]], [[Satoshi Kobashikawa|AUTHOR Satoshi Kobashikawa]], [[Yushi Aono|AUTHOR Yushi Aono]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 834–838&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we integrate fully neural network based conversation-context language models (CCLMs) that are suitable for handling multi-turn conversational automatic speech recognition (ASR) tasks, with multiple neural spoken language understanding (SLU) models. A main strength of CCLMs is their capacity to take long-range interactive contexts beyond utterance boundaries into consideration. However, it is hard to optimize the CCLMs so as to fully exploit the long-range interactive contexts because conversation-level training datasets are often limited. In order to mitigate this problem, our key idea is to introduce various SLU models that are developed for spoken dialogue systems into the CCLMs. In our proposed method (which we call “SLU-assisted CCLM”), hierarchical recurrent encoder-decoder based language modeling is extended so as to handle various utterance-level SLU results of preceding utterances in a continuous space. We expect that the SLU models will help the CCLMs to properly understand semantic meanings of long-range interactive contexts and to fully leverage them for estimating a next utterance. Our experiments on contact center dialogue ASR tasks demonstrate that SLU-assisted CCLMs combined with three types of SLU models can yield ASR performance improvements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Wei Xiang Lieow|AUTHOR Wei Xiang Lieow]]
</p><p class="cpabstractcardaffiliationlist">National Chiao Tung University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 839–843&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of dialogue system based on deep reinforcement learning (DRL) highly depends on the selected hyperparameters in DRL algorithms. Traditionally, Gaussian process (GP) provides a probabilistic approach to Bayesian optimization for sequential search which is beneficial to select optimal hyperparameter. However, GP suffers from the expanding computation when the dimension of hyperparameters and the number of search points are increased. This paper presents a meta learning approach to carry out multifidelity Bayesian optimization where a two-level recurrent neural network (RNN) is developed for sequential learning and optimization. The search space is explored via the first-level RNN with cheap and low fidelity over a global region of hyperparameters. The optimization is then exploited and leveraged by the second-level RNN with a high fidelity on the successively small regions. The experiments on the hyperparameter optimization for dialogue system based on the deep Q network show the effectiveness and efficiency by using the proposed multifidelity Bayesian optimization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kyle Williams|AUTHOR Kyle Williams]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 844–848&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe a zero shot approach to intent classification that allows for the identification of intents that were not present during training. Our approach makes use of a Long-short Term Memory neural network to encode user queries and intents and uses these encodings to score previously unseen intents based on their semantic similarity to the queries. We test our model on intent classification in a personal digital assistant and show an improvement of 15% over a strong baseline. We also investigate the effect of adding a few training samples for the previously unseen intents in a few shot learning setting and show improvements of up to 16% over the baseline method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mandy Korpusik|AUTHOR Mandy Korpusik]]^^1^^, [[Zoe Liu|AUTHOR Zoe Liu]]^^2^^, [[James Glass|AUTHOR James Glass]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Loyola Marymount University, USA; ^^2^^MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 849–853&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we compare a suite of neural networks (recurrent, convolutional, and the recently proposed BERT model) to a CRF with hand-crafted features on three semantic tagging corpora: the Air Travel Information System (ATIS) benchmark, restaurant queries, and written and spoken meal descriptions. Our motivation is to investigate pre-trained BERT’s transferability to the domains we are interested in. We demonstrate that neural networks without feature engineering outperform state-of-the-art statistical and deep learning approaches on all three tasks (except written meal descriptions, where the CRF is slightly better) and that deep, attention-based BERT, in particular, surpasses state-of-the-art results on these tasks. Error analysis shows the models are less confident when making errors, enabling the system to follow up with the user when uncertain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuka Kobayashi|AUTHOR Yuka Kobayashi]], [[Takami Yoshida|AUTHOR Takami Yoshida]], [[Kenji Iwata|AUTHOR Kenji Iwata]], [[Hiroshi Fujimura|AUTHOR Hiroshi Fujimura]]
</p><p class="cpabstractcardaffiliationlist">Toshiba, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 854–858&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a new method for slot filling of out-of-domain (OOD) slot values, which are not included in the training data, in spoken dialogue systems. Word embeddings have been proposed to estimate the OOD slot values included in the word embedding model from keyword information. At the same time, context information is an important clue for estimation because the values in a given slot tend to appear in similar contexts. The proper use of either or both keyword and context information depends on the sentence. Conventional methods input a whole sentence into an encoder and extract important clues by the attention mechanism. However, it is difficult to properly distinguish context and keyword information from the encoder outputs because these two features are already mixed. Our proposed method uses two encoders, which distinctly encode contexts and keywords, respectively. The model calculates weights for the two encoders based on a user utterance and estimates a slot with weighted outputs from the two encoders. Experimental results show that the proposed method achieves a 50% relative improvement in F1 score compared with a baseline model, which detects slot values from user utterances and estimates slots at once with a single encoder.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rahul Gupta|AUTHOR Rahul Gupta]], [[Aman Alok|AUTHOR Aman Alok]], [[Shankar Ananthakrishnan|AUTHOR Shankar Ananthakrishnan]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 794–798&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Any given classification problem can be modeled using multiclass or One-vs-All (OVA) architecture. An OVA system consists of as many OVA models as the number of classes, providing the advantage of asynchrony, where each OVA model can be re-trained independent of other models. This is particularly advantageous in settings where scalable model training is a consideration (for instance in an industrial environment where multiple and frequent updates need to be made to the classification system). In this paper, we conduct empirical analysis on realizing independent updates to OVA models and its impact on the accuracy of the overall OVA system. Given that asynchronous updates lead to differences in training datasets for OVA models, we first define a metric to quantify the differences in datasets. Thereafter, using Natural Language Understanding as a task of interest, we estimate the impact of three factors: (i) number of classes, (ii) number of data points and, (iii) divergences in training datasets across OVA models; on the OVA system accuracy. Finally, we observe the accuracy impact of increased asynchrony in a Spoken Language Understanding system. We analyze the results and establish that the proposed metric correlates strongly with the model performances in both the experimental settings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gabriel Marzinotto|AUTHOR Gabriel Marzinotto]]^^1^^, [[Géraldine Damnati|AUTHOR Géraldine Damnati]]^^1^^, [[Frédéric Béchet|AUTHOR Frédéric Béchet]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Orange Labs, France; ^^2^^LIS (UMR 7020), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 799–803&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a new semantic frame parsing model, based on Berkeley FrameNet, adapted to process spoken documents in order to perform information extraction from broadcast contents. Building upon previous work that had shown the effectiveness of adversarial learning for domain generalization in the context of semantic parsing of encyclopedic written documents, we propose to extend this approach to elocutionary style generalization. The underlying question throughout this study is whether adversarial learning can be used to combine data from different sources and train models on a higher level of abstraction in order to increase their robustness to lexical and stylistic variations as well as automatic speech recognition errors. The proposed strategy is evaluated on a French corpus of encyclopedic written documents and a smaller corpus of radio podcast transcriptions, both annotated with a FrameNet paradigm. We show that adversarial learning increases all models generalization capabilities both on manual and automatic speech transcription as well as on encyclopedic data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Titouan Parcollet|AUTHOR Titouan Parcollet]]^^1^^, [[Mohamed Morchid|AUTHOR Mohamed Morchid]]^^1^^, [[Xavier Bost|AUTHOR Xavier Bost]]^^2^^, [[Georges Linarès|AUTHOR Georges Linarès]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIA (EA 4128), France; ^^2^^Orkis, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 804–808&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep learning is at the core of recent spoken language understanding (SLU) related tasks. More precisely, deep neural networks (DNNs) drastically increased the performances of SLU systems, and numerous architectures have been proposed. In the real-life context of theme identification of telephone conversations, it is common to hold both a human, manual (TRS) and an automatically transcribed (ASR) versions of the conversations. Nonetheless, and due to production constraints, only the ASR transcripts are considered to build automatic classifiers. TRS transcripts are only used to measure the performances of ASR systems. Moreover, the recent performances in term of classification accuracy, obtained by DNN related systems are close to the performances reached by humans, and it becomes difficult to further increase the performances by only considering the ASR transcripts. This paper proposes to distillates the TRS knowledge available during the training phase within the ASR representation, by using a new generative adversarial network called M2H-GAN to generate a TRS-like version of an ASR document, to improve the theme identification performances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Munir Georges|AUTHOR Munir Georges]]^^1^^, [[Krzysztof Czarnowski|AUTHOR Krzysztof Czarnowski]]^^2^^, [[Tobias Bocklet|AUTHOR Tobias Bocklet]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Intel, Germany; ^^2^^Intel, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 809–813&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes an approach for intent classification and tagging on embedded devices, such as smart watches. We describe a technique to train neuronal networks where the final neuronal network weights are binary. This enables memory bandwidth optimized inference and efficient computation even on constrained/embedded platforms.

The flow of the approach is as follows: tf-idf word selection method reduces the number of overall weights. Bag-of-Words features are used with a feedforward and recurrent neuronal network for intent classification and tagging, respectively. A novel double Gaussian based regularization term is used to train the network. Finally, the weights are almost clipped lossless to -1 or 1 which results in a tiny binary neuronal network for intent classification and tagging.

Our technique is evaluated using a text corpus of transcribed and annotated voice queries. The test domain is “lights control”. We compare the intent and tagging accuracy of the ultra-compact binary neuronal network with our baseline system. The novel approach yields comparable accuracy but reduces the model size by a factor of 16: from 160kB to 10kB.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Loren Lugosch|AUTHOR Loren Lugosch]]^^1^^, [[Mirco Ravanelli|AUTHOR Mirco Ravanelli]]^^1^^, [[Patrick Ignoto|AUTHOR Patrick Ignoto]]^^2^^, [[Vikrant Singh Tomar|AUTHOR Vikrant Singh Tomar]]^^2^^, [[Yoshua Bengio|AUTHOR Yoshua Bengio]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Université de Montréal, Canada; ^^2^^Fluent.ai, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 814–818&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Whereas conventional spoken language understanding (SLU) systems map speech to text, and then text to intent, end-to-end SLU systems map speech directly to intent through a single trainable model. Achieving high accuracy with these end-to-end models without a large amount of training data is difficult. We propose a method to reduce the data requirements of end-to-end SLU in which the model is first pre-trained to predict words and phonemes, thus learning good features for SLU. We introduce a new SLU dataset, Fluent Speech Commands, and show that our method improves performance both when the full dataset is used for training and when only a small subset is used. We also describe preliminary experiments to gauge the model’s ability to generalize to new phrases not heard during training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prashanth Gurunath Shivakumar|AUTHOR Prashanth Gurunath Shivakumar]], [[Mu Yang|AUTHOR Mu Yang]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 819–823&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Decoding speaker’s intent is a crucial part of spoken language understanding (SLU). The presence of noise or errors in the text transcriptions, in real life scenarios make the task more challenging. In this paper, we address the spoken language intent detection under noisy conditions imposed by automatic speech recognition (ASR) systems. We propose to employ confusion2vec word feature representation to compensate for the errors made by ASR and to increase the robustness of the SLU system. The confusion2vec, motivated from human speech production and perception, models acoustic relationships between words in addition to the semantic and syntactic relations of words in human language. We hypothesize that ASR often makes errors relating to acoustically similar words, and the confusion2vec with inherent model of acoustic relationships between words is able to compensate for the errors. We demonstrate through experiments on the ATIS benchmark dataset, the robustness of the proposed model to achieve state-of-the-art results under noisy ASR conditions. Our system reduces classification error rate (CER) by 20.84% and improves robustness by 37.48% (lower CER degradation) relative to the previous state-of-the-art going from clean to noisy transcripts. Improvements are also demonstrated when training the intent detection models on noisy transcripts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Natalia Tomashenko|AUTHOR Natalia Tomashenko]]^^1^^, [[Antoine Caubrière|AUTHOR Antoine Caubrière]]^^2^^, [[Yannick Estève|AUTHOR Yannick Estève]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIA (EA 4128), France; ^^2^^LIUM (EA 4023), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 824–828&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work investigates speaker adaptation and transfer learning for spoken language understanding (SLU). We focus on the direct extraction of semantic tags from the audio signal using an end-to-end neural network approach. We demonstrate that the learning performance of the target predictive function for the semantic slot filling task can be substantially improved by speaker adaptation and by various knowledge transfer approaches. First, we explore speaker adaptive training (SAT) for end-to-end SLU models and propose to use zero pseudo i-vectors for more efficient model initialization and pretraining in SAT. Second, in order to improve the learning convergence for the target semantic slot filling (SF) task, models trained for different tasks, such as automatic speech recognition and named entity extraction are used to initialize neural end-to-end models trained for the target task. In addition, we explore the impact of the knowledge transfer for SLU from a speech recognition task trained in a different language. These approaches allow to develop end-to-end SLU systems in low-resource data scenarios when there is no enough in-domain semantically labeled data, but other resources, such as word transcriptions for the same or another language or named entity annotation, are available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuanfeng Song|AUTHOR Yuanfeng Song]]^^1^^, [[Di Jiang|AUTHOR Di Jiang]]^^2^^, [[Xueyang Wu|AUTHOR Xueyang Wu]]^^1^^, [[Qian Xu|AUTHOR Qian Xu]]^^2^^, [[Raymond Chi-Wing Wong|AUTHOR Raymond Chi-Wing Wong]]^^1^^, [[Qiang Yang|AUTHOR Qiang Yang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^HKUST, China; ^^2^^WeBank, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 829–833&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Dialogue speech widely exists in scenarios such as chitchat, meeting and customer service. General-purpose speech recognition systems usually neglect the topic information in the context of dialogue speech, which has great potential for improving the performance of speech recognition. In this paper, we propose a transfer learning mechanism to conduct topic-aware recognition for dialogue speech. We first propose a new probabilistic topic model named  Dialogue Speech Topic Model (DSTM) that is specialized for modeling the context of dialogue speech. We further propose a novel transfer learning mechanism for DSTM to significantly reduce its training cost while preserving its effectiveness for accurate topic inference. The experiment results demonstrate that proposed techniques in language model adaptation effectively improve the performance of the state-of-the-art Automatic Speech Recognition (ASR) system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nadee Seneviratne|AUTHOR Nadee Seneviratne]]^^1^^, [[Ganesh Sivaraman|AUTHOR Ganesh Sivaraman]]^^2^^, [[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Maryland at College Park, USA; ^^2^^Pindrop, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 859–863&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>There are several technologies like Electromagnetic articulometry (EMA), ultrasound, real-time Magnetic Resonance Imaging (MRI), and X-ray microbeam that are used to measure speech articulatory movements. Each of these techniques provides a different view of the vocal tract. The measurements performed using the similar techniques also differ greatly due to differences in the placement of sensors, and the anatomy of speakers. This limits most articulatory studies to single datasets. However to yield better results in its applications, the speech inversion systems should be more generalized, which requires the combination of data from multiple sources. This paper proposes a multi-task learning based deep neural network architecture for acoustic-to-articulatory speech inversion trained using three different articulatory datasets — two of them were measured using EMA, and one using X-ray microbeam. Experiments show improved accuracy of the proposed acoustic-to-articulatory mapping compared to the systems trained using single datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hironori Takemoto|AUTHOR Hironori Takemoto]]^^1^^, [[Tsubasa Goto|AUTHOR Tsubasa Goto]]^^1^^, [[Yuya Hagihara|AUTHOR Yuya Hagihara]]^^1^^, [[Sayaka Hamanaka|AUTHOR Sayaka Hamanaka]]^^1^^, [[Tatsuya Kitamura|AUTHOR Tatsuya Kitamura]]^^2^^, [[Yukiko Nota|AUTHOR Yukiko Nota]]^^3^^, [[Kikuo Maekawa|AUTHOR Kikuo Maekawa]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chiba Institute of Technology, Japan; ^^2^^Konan University, Japan; ^^3^^NINJAL, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 904–908&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Real-time MRI can be used to obtain videos that describe articulatory movements during running speech. For detailed analysis based on a large number of video frames, it is necessary to extract the contours of speech organs, such as the tongue, semi-automatically. The present study attempted to extract the contours of speech organs from videos using a machine learning method. First, an expert operator manually extracted the contours from the frames of a video to build training data sets. The learning operators, or learners, then extracted the contours from each frame of the video. Finally, the errors representing the geometrical distance between the extracted contours and the ground truth, which were the contours excluded from the training data sets, were examined. The results showed that the contours extracted using machine learning were closer to the ground truth than the contours traced by other expert and non-expert operators. In addition, using the same learners, the contours were extracted from other naive videos obtained during different speech tasks of the same subject. As a result, the errors in those videos were similar to those in the video in which the learners were trained.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[K.G. van Leeuwen|AUTHOR K.G. van Leeuwen]], [[P. Bos|AUTHOR P. Bos]], [[S. Trebeschi|AUTHOR S. Trebeschi]], [[M.J.A. van Alphen|AUTHOR M.J.A. van Alphen]], [[L. Voskuilen|AUTHOR L. Voskuilen]], [[L.E. Smeele|AUTHOR L.E. Smeele]], [[F. van der Heijden|AUTHOR F. van der Heijden]], [[R.J.J.H. van Son|AUTHOR R.J.J.H. van Son]]
</p><p class="cpabstractcardaffiliationlist">NKI, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 909–913&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent advances in real-time magnetic resonance imaging (rtMRI) of the vocal tract provides opportunities for studying human speech. This modality together with acquired speech may enable the mapping of articulatory configurations to acoustic features. In this study, we take the first step by training a deep learning model to classify 27 different phonemes from midsagittal MR images of the vocal tract.

An American English database was used to train a convolutional neural network for classifying vowels (13 classes), consonants (14 classes) and all phonemes (27 classes) of 17 subjects. Classification top-1 accuracy of the test set for all phonemes was 57%. Error analysis showed voiced and unvoiced sounds often being confused. Moreover, we performed principal component analysis on the network’s embedding and observed topological similarities between the network learned representation and the vowel diagram. Saliency maps gave insight into the anatomical regions most important for classification and show congruence with known regions of articulatory importance.

We demonstrate the feasibility for deep learning to distinguish between phonemes from MRI. Network analysis can be used to improve understanding of normal articulation and speech and, in the future, impaired speech. This study brings us a step closer to the articulatory-to-acoustic mapping from rtMRI.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Doris Mücke|AUTHOR Doris Mücke]]^^1^^, [[Anne Hermes|AUTHOR Anne Hermes]]^^1^^, [[Sam Tilsen|AUTHOR Sam Tilsen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität zu Köln, Germany; ^^2^^Cornell University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 914–918&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>According to the segmental anchor hypothesis within the Autosegmental-Metrical approach, tones are aligned with segmental boundaries of consonant and vowels in the acoustic domain. In prenuclear rising pitch accents (LH*), the rise is assumed to occur in the vicinity of the accented syllable it is phonologically associated with. However, there are differences in the alignment patterns within and across languages that cannot be captured within the AM approach. In the present study, we investigate the coordination of tonal and oral constriction gestures within Articulatory Phonology. Therefore, we model the coordination of prenuclear LH* pitch accents in Catalan, Northern and Southern German with respect to syllable production on the basis of recordings with a 2D electromagnetic articulography. We provide an extended coupled oscillators model that allows for balanced and imbalanced coupling strengths. Based on examples, we show that the observed differences in alignment patterns for prenuclear rising pitch accents can be modelled with the same underlying coordinative structures/coupling modes for vocalic and tonal gestures and that surface differences arise from gradient variation in coupling strengths.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Debadatta Dash|AUTHOR Debadatta Dash]]^^1^^, [[Alan Wisler|AUTHOR Alan Wisler]]^^1^^, [[Paul Ferrari|AUTHOR Paul Ferrari]]^^2^^, [[Jun Wang|AUTHOR Jun Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^University of Texas at Austin, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 864–868&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neurodegenerative diseases such as amyotrophic lateral sclerosis (ALS) can cause locked-in-syndrome (fully paralyzed but aware). Brain-computer interface (BCI) may be the only option to restore their communication. Current BCIs typically use visual or attention correlates in neural activities to select letters randomly displayed on a screen, which are extremely slow (a few words per minute). Speech-BCIs, which aim to convert the brain activity patterns to speech (neural speech decoding), hold the potential to enable faster communication. Although a few recent studies have shown the potential of neural speech decoding, those are focused on speaker-dependent models. In this study, we investigated speaker-independent neural speech decoding of five continuous phrases from Magnetoencephalography (MEG) signals while 8 subjects produced speech covertly (imagination) or overtly (articulation). We have used both supervised and unsupervised speaker adaptation strategies for implementing a speaker independent model. Experimental results demonstrated that the proposed adaptation-based speaker-independent model has significantly improved decoding performance. To our knowledge, this is the first demonstration of the possibility of speaker-independent neural speech decoding.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Janaki Sheth|AUTHOR Janaki Sheth]]^^1^^, [[Ariel Tankus|AUTHOR Ariel Tankus]]^^2^^, [[Michelle Tran|AUTHOR Michelle Tran]]^^1^^, [[Lindy Comstock|AUTHOR Lindy Comstock]]^^1^^, [[Itzhak Fried|AUTHOR Itzhak Fried]]^^1^^, [[William Speier|AUTHOR William Speier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of California at Los Angeles, USA; ^^2^^Tel Aviv University, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 869–873&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One of the main goals in Brain-Computer Interface (BCI) research is to help patients with faltering communication abilities due to neurodegenerative diseases produce text or speech output using their neural recordings. However, practical implementation of such a system has proven difficult due to limitations in the speed, accuracy, and training time of existing interfaces. In this paper, we contribute to this endeavour by isolating appropriate input features from speech-producing neural signals that will feed into a machine learning classifier to identify target phonemes. Analysing data from six subjects, we discern frequency bands that encapsulate differential information regarding production of vowels and consonants broadly, and more specifically nasals and semivowels. Subsequent spatial localization analysis reveals the underlying cortical regions responsible for different phoneme categories. Anatomical locations along with their respective frequency bands act as prospective feature sets for machine learning classifiers. We demonstrate this classification ability in a preliminary language reconstruction task and show an average word classification accuracy of 30.6% (p<0.001).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Samuel Silva|AUTHOR Samuel Silva]]^^1^^, [[António Teixeira|AUTHOR António Teixeira]]^^1^^, [[Conceição Cunha|AUTHOR Conceição Cunha]]^^2^^, [[Nuno Almeida|AUTHOR Nuno Almeida]]^^1^^, [[Arun A. Joseph|AUTHOR Arun A. Joseph]]^^3^^, [[Jens Frahm|AUTHOR Jens Frahm]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidade de Aveiro, Portugal; ^^2^^LMU München, Germany; ^^3^^MPI for Biophysical Chemistry, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 874–878&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The study of the static and dynamic aspects of speech production can profit from technologies such as electromagnetic midsagittal articulography (EMA) and real-time magnetic resonance (RTMRI). These can improve our knowledge on which articulators and gestures are involved in producing specific sounds and foster improved speech production models, paramount to advance, e.g., articulatory speech synthesis. Previous work, by the authors, has shown that critical articulator identification could be performed from RTMRI data of the vocal tract, with encouraging results, by extending the applicability of an unsupervised statistical identification method previously proposed for EMA data. Nevertheless, the slower time resolution of the considered RT-MRI corpus (14 Hz), when compared to EMA, potentially influencing the ability to select the most suitable representative configuration for each phone — paramount for strongly dynamic phones, e.g., nasal vowels  —, and the lack of a richer set of contexts — relevant for observing coarticulation effects —, were identified as limitations. This article addresses these limitations by exploring critical articulator identification from a faster RTMRI corpus (50 Hz), for European Portuguese, providing a richer set of contexts, and testing how fusing the articulatory data of two speakers might influence critical articulator determination.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ioannis K. Douros|AUTHOR Ioannis K. Douros]]^^1^^, [[Anastasiia Tsukanova|AUTHOR Anastasiia Tsukanova]]^^1^^, [[Karyna Isaieva|AUTHOR Karyna Isaieva]]^^2^^, [[Pierre-André Vuissoz|AUTHOR Pierre-André Vuissoz]]^^2^^, [[Yves Laprie|AUTHOR Yves Laprie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Loria (UMR 7503), France; ^^2^^IADI (Inserm U1254), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 879–883&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an algorithm for augmenting the shape of the vocal tract using 3D static and 2D dynamic speech MRI data. While static 3D images have better resolution and provide spatial information, 2D dynamic images capture the transitions. The aim of this work is to combine strong points of these two types of data to obtain better image quality of 2D dynamic images and extend the 2D dynamic images to the 3D domain.

To produce a 3D dynamic consonant-vowel (CV) sequence, our algorithm takes as input the 2D CV transition and the static 3D targets for C and V. To obtain the enhanced sequence of images, the first step is to find a transformation between the 2D images and the mid-sagittal slice of the acoustically corresponding 3D image stack, and then find a transformation between neighbouring sagittal slices in the 3D static image stack. Combination of these transformations allows producing the final set of images. In the present study we first examined the transformation from the 3D mid-sagittal frame to the 2D video in order to improve image quality and then we examined the extension of the 2D video to the 3rd dimension with the aim to enrich spatial information.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oksana Rasskazova|AUTHOR Oksana Rasskazova]]^^1^^, [[Christine Mooshammer|AUTHOR Christine Mooshammer]]^^1^^, [[Susanne Fuchs|AUTHOR Susanne Fuchs]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Humboldt-Universität zu Berlin, Germany; ^^2^^ZAS, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 884–888&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The investigation of the speech planning processes, in particular the timing between acoustic and articulatory onset, has recently received a lot of attention. Respiration has not been considered in this process so far, although it is involved and may be well coordinated with the oral articulators prior and at the onset of the utterance. In light of these considerations, we investigated the temporal coordination between acoustic, respiratory and articulatory events prior to utterance onset. For this purpose 12 native speakers of German have been recorded with Electromagnetic Articulography and Inductance Plethysmography reading sentences that were controlled for length and stress of the first word. The initial segment of the utterance was either /t/ or /n/. The results for six speakers so far indicate that early speech preparation consists of mouth opening during the inhalation phase. The onset of expiration seems to be tightly coupled with the acoustic and the articulatory onset, particularly with the constriction interval of the tongue tip gesture in the first segment. Manner of articulation of the initial segment seems to affect the temporal fine-tuning of preparatory events.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michele Gubian|AUTHOR Michele Gubian]], [[Manfred Pastätter|AUTHOR Manfred Pastätter]], [[Marianne Pouplier|AUTHOR Marianne Pouplier]]
</p><p class="cpabstractcardaffiliationlist">LMU München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 889–893&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It has long been proposed in speech production research that in CV sequences, the movement for consonant and vowel are initiated synchronously. However, mostly due to limitations on the statistical analysis of articulator motion over time, this could only be shown in a limited fashion, based on positional differences at a single time point during consonantal constriction formation. It is unknown to which extent this observation generalizes to earlier timepoints. In this paper, we illustrate the use of functional principal component analysis (FPCA) for the statistical analysis of articulator motion over time. Using articulography data, we quantify CV coarticulation during constriction formation of [k] in two vowel contexts. We show how FPCA enables us to analyse both horizontal and vertical movement components over time in a single model while preserving information on temporal variability. We combine FPCA with linear mixed modelling to obtain estimated mean trajectories and confidence bands for [k] in the two vowel contexts. Results show that well before the timepoint of peak velocity the vowel causes a substantial spatial separation of the consonantal trajectories, estimated to be at least 3 mm at peak velocity. This lends support to the hypothesis that vowel and consonant are initiated synchronously.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]^^1^^, [[Mohammed Salah Al-Radhi|AUTHOR Mohammed Salah Al-Radhi]]^^2^^, [[Géza Németh|AUTHOR Géza Németh]]^^2^^, [[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]^^3^^, [[Tamás Grósz|AUTHOR Tamás Grósz]]^^4^^, [[László Tóth|AUTHOR László Tóth]]^^4^^, [[Alexandra Markó|AUTHOR Alexandra Markó]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BME, Hungary; ^^2^^BME, Hungary; ^^3^^MTA-SZTE RGAI, Hungary; ^^4^^University of Szeged, Hungary; ^^5^^MTA-ELTE LingArt, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 894–898&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently it was shown that within the Silent Speech Interface (SSI) field, the prediction of F0 is possible from Ultrasound Tongue Images (UTI) as the articulatory input, using Deep Neural Networks for articulatory-to-acoustic mapping. Moreover, text-to-speech synthesizers were shown to produce higher quality speech when using a continuous pitch estimate, which takes non-zero pitch values even when voicing is not present. Therefore, in this paper on UTI-based SSI, we use a simple continuous F0 tracker which does not apply a strict voiced /unvoiced decision. Continuous vocoder parameters (ContF0, Maximum Voiced Frequency and Mel-Generalized Cepstrum) are predicted using a convolutional neural network, with UTI as input. The results demonstrate that during the articulatory-to-acoustic mapping experiments, the continuous F0 is predicted with lower error, and the continuous vocoder produces slightly more natural synthesized speech than the baseline vocoder using standard discontinuous F0.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eugen Klein|AUTHOR Eugen Klein]]^^1^^, [[Jana Brunner|AUTHOR Jana Brunner]]^^1^^, [[Phil Hoole|AUTHOR Phil Hoole]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Humboldt-Universität zu Berlin, Germany; ^^2^^LMU München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 899–903&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although most modern theories of speech production assume that representations of speech sounds are multidimensional encompassing acoustic and articulatory information, speech motor learning studies which assess the degree of adaptation in both dimensions are few and far between. In the current paper, we present an auditory perturbation study of German sibilant [s] in which speakers’ audio and articulatory movements were recorded by means of electromagnetic articulography. Random Forest, a supervised learning algorithm, was employed to classify speakers’ responses produced under unaltered or perturbed feedback based either on acoustic or articulatory parameters. Preliminary results demonstrate that while classification accuracy increases in the acoustic dimension as the perturbation session goes on, the classification accuracy in the articulatory dimension, although overall higher, remains approximately at the same level. This suggests that the adaptation process is characterized by active exploration of the articulatory space which is guided by speakers’ auditory feedback.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]^^1^^, [[Felicia S.C. Lim|AUTHOR Felicia S.C. Lim]]^^2^^, [[Michael Chinen|AUTHOR Michael Chinen]]^^2^^, [[Jan Skoglund|AUTHOR Jan Skoglund]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 919–923&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We define  salient features as features that are shared by signals that are defined as being  equivalent by a system designer. The definition allows the designer to contribute qualitative information. We aim to find salient features that are useful as conditioning for generative networks. We extract salient features by jointly training a set of clones of an encoder network. Each network clone receives as input a different signal from a set of equivalent signals. The objective function encourages the network clones to map their input into a set of features that is identical across the clones. It additionally encourages feature independence and, optionally, reconstruction of a desired target signal by a decoder. As an application, we train a system that extracts a time-sequence of feature vectors of speech and uses it as a conditioning of a WaveNet generative system, facilitating both coding and enhancement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Atreyee Saha|AUTHOR Atreyee Saha]]^^1^^, [[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]]^^2^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Jadavpur University, India; ^^2^^Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 959–963&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Second language learners of British English (BE) are typically trained to learn four intonation classes — Glide-up, Glide-down, Dive and Take-off. We predict the intonation class in a learner’s utterance by modeling the temporal dependencies in the pitch patterns with gated recurrent unit (GRU) networks. For these, we pre-train the GRU network using a set of synthesized pitch patterns representing each intonation class. For the synthesis, we propose to obtain pitch patterns from the tone sequences representing each intonation class obtained from domain knowledge. Experiments are conducted on speech data collected from experts in a spoken English training material for teaching BE intonation. The absolute improvements in the unweighted average recall (UAR) using the proposed scheme with pre-training are found to be 4.14% and 6.01% respectively over the proposed approach without pre-training and the baseline scheme that uses hidden Markov models (HMMs).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manoj Kumar Ramanathi|AUTHOR Manoj Kumar Ramanathi]], [[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 924–928&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic syllable stress detection is typically performed with a supervised classifier considering manually annotated stress markings and features computed within the syllable segments derived from phoneme transcriptions and their time-aligned boundaries. However, the manual annotation is tedious and the errors in estimating segmental information could degrade stress detection accuracy. In order to circumvent these, we propose to estimate stress markings in automatic speech recognition (ASR) framework involving finite-state-transducer (FST) without using annotated stress markings and segmental information. For this, we train the ASR system with native English data along with pronunciation lexicon containing canonical stress markings and decode non-native utterances as pronunciations embedded with stress markings. In the decoding, we use an FST encoded with the pronunciations derived using phoneme transcriptions and the instructions involved in a typical manual annotation. Experiments are conducted on polysyllabic words taken from ISLE corpus containing utterances spoken by Italian and German speakers and using the ASR models trained with three corpora. Among all the three models, the highest stress detection accuracies with the proposed approach respectively on Italian & German speakers are found to be 2.07% & 1.19% higher than and comparable to those with the two supervised classification approaches used as baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Renuka Mannem|AUTHOR Renuka Mannem]]^^1^^, [[Jhansi Mallela|AUTHOR Jhansi Mallela]]^^2^^, [[Aravind Illa|AUTHOR Aravind Illa]]^^1^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^RGUKT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 929–933&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a speech rate estimation approach using a convolutional dense neural network (CDNN). The CDNN based approach uses the acoustic and articulatory features for speech rate estimation. The Mel Frequency Cepstral Coefficients (MFCCs) are used as acoustic features and the articulograms representing time-varying vocal tract profile are used as articulatory features. The articulogram is computed from a real-time magnetic resonance imaging (rtMRI) video in the midsagittal plane of a subject while speaking. However, in practice, the articulogram features are not directly available, unlike acoustic features from speech recording. Thus, we use an Acoustic-to-Articulatory Inversion method using a bidirectional long-short-term memory network which estimates the articulogram features from the acoustics. The proposed CDNN based approach using estimated articulatory features requires both acoustic and articulatory features during training but it requires only acoustic data during testing. Experiments are conducted using rtMRI videos from four subjects each speaking 460 sentences. The Pearson correlation coefficient is used to evaluate the speech rate estimation. It is found that the CDNN based approach gives a better correlation coefficient than the temporal and selected sub-band correlation (TCSSBC) based baseline scheme by 81.58% and 73.68% (relative) in seen and unseen subject conditions respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sebastian Springenberg|AUTHOR Sebastian Springenberg]], [[Egor Lakomkin|AUTHOR Egor Lakomkin]], [[Cornelius Weber|AUTHOR Cornelius Weber]], [[Stefan Wermter|AUTHOR Stefan Wermter]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 934–938&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unsupervised learning represents an important opportunity for obtaining useful speech representations. Recently, variational autoencoders (VAEs) have been shown to extract useful representations in an unsupervised manner. These models are usually not designed to explicitly disentangle specific sources of information. When processing data of sequential nature which involves multi-timescale information, disentanglement can however be beneficial. In this paper we address this issue by developing a predictive auxiliary variational autoencoder to obtain speech representations at different timescales. We will present an auxiliary lower bound which is used to develop a model that we call the Predictive Aux-VAE. The model is designed to disentangle global from local information into a dedicated auxiliary variable. Learned representations are analysed with respect to their ability to capture global speech characteristics. We observe that representations of individual speakers are separated well in the latent space and can successfully be used in a subsequent speaker identification task where they achieve high classification accuracy, comparable to a fully supervised model. Moreover, manipulating the global variable allows to change global characteristics while retaining the local content during generation which demonstrates the success of our model to disentangle global from local information.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Georgios Paraskevopoulos|AUTHOR Georgios Paraskevopoulos]]^^1^^, [[Efthymios Tzinis|AUTHOR Efthymios Tzinis]]^^2^^, [[Nikolaos Ellinas|AUTHOR Nikolaos Ellinas]]^^1^^, [[Theodoros Giannakopoulos|AUTHOR Theodoros Giannakopoulos]]^^3^^, [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTUA, Greece; ^^2^^University of Illinois at Urbana-Champaign, USA; ^^3^^Behavioral Signal Technologies, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 939–943&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We examine the use of linear and non-linear dimensionality reduction algorithms for extracting low-rank feature representations for speech emotion recognition. Two feature sets are used, one based on low-level descriptors and their aggregations (IS10) and one modeling recurrence dynamics of speech (RQA), as well as their fusion. We report speech emotion recognition (SER) results for learned representations on two databases using different classification methods. Classification with low-dimensional representations yields performance improvement in a variety of settings. This indicates that dimensionality reduction is an effective way to combat the curse of dimensionality for SER. Visualization of features in two dimensions provides insight into discriminatory abilities of reduced feature sets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jitendra Kumar Dhiman|AUTHOR Jitendra Kumar Dhiman]]^^1^^, [[Nagaraj Adiga|AUTHOR Nagaraj Adiga]]^^2^^, [[Chandra Sekhar Seelamantula|AUTHOR Chandra Sekhar Seelamantula]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^University of Crete, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 944–948&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We address the problem of estimating the time-varying spectral envelope of a speech signal using a spectro-temporal demodulation technique. Unlike the conventional spectrogram, we consider a pitch-adaptive spectrogram and model a spectro-temporal patch using an amplitude- and frequency-modulated two-dimensional (2-D) cosine signal. We employ a demodulation technique based on the Riesz transform that we proposed recently to estimate the amplitude and frequency modulations. The amplitude modulation (AM) corresponds to the vocal-tract filter magnitude response (or envelope) and the frequency modulation (FM) corresponds to the excitation. We consider the AM and demonstrate its effectiveness by incorporating it as an acoustic feature for local conditioning in the statistical WaveNet vocoder for the task of speech synthesis. The quality of the synthesized speech obtained with the Riesz envelope is compared with that obtained using the envelope estimated by the WORLD vocoder. Objective measures and subjective listening tests on the CMU-Arctic database show that the quality of synthesis is superior to that obtained using the WORLD envelope. This study thus establishes the Riesz envelope as an efficient alternative to the WORLD envelope.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinzhou Xu|AUTHOR Xinzhou Xu]]^^1^^, [[Jun Deng|AUTHOR Jun Deng]]^^2^^, [[Nicholas Cummins|AUTHOR Nicholas Cummins]]^^3^^, [[Zixing Zhang|AUTHOR Zixing Zhang]]^^4^^, [[Li Zhao|AUTHOR Li Zhao]]^^5^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NJUPT, China; ^^2^^Agile Robots, Germany; ^^3^^Universität Augsburg, Germany; ^^4^^Imperial College London, UK; ^^5^^Southeast University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 949–953&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Conventionally, speech emotion recognition is achieved using passive learning approaches. Differing from such approaches, we herein propose and develop a dynamic method of autonomous emotion learning based on zero-shot learning. The proposed methodology employs emotional dimensions as the attributes in the zero-shot learning paradigm, resulting in two phases of learning, namely attribute learning and label learning. Attribute learning connects the paralinguistic features and attributes utilising speech with known emotional labels, while label learning aims at defining unseen emotions through the attributes. The experimental results achieved on the CINEMO corpus indicate that zero-shot learning is a useful technique for autonomous speech-based emotion learning, achieving accuracies considerably better than chance level and an attribute-based gold-standard setup. Furthermore, different emotion recognition tasks, emotional attributes, and employed approaches strongly influence system performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sweekar Sudhakara|AUTHOR Sweekar Sudhakara]], [[Manoj Kumar Ramanathi|AUTHOR Manoj Kumar Ramanathi]], [[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 954–958&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Goodness of pronunciation (GoP) is typically formulated with Gaussian mixture model-hidden Markov model (GMM-HMM) based acoustic models considering HMM state transition probabilities (STPs) and GMM likelihoods of context dependent phonemes. On the other hand, deep neural network (DNN)-HMM based acoustic models employed sub-phonemic (senone) posteriors instead of GMM likelihoods along with STPs. However, each senone is shared across many states; thus, there is no one-to-one correspondence between them. In order to circumvent this, most of the existing works have proposed modifications to the GoP formulation considering only posteriors neglecting the STPs. In this work, we derive a formulation for the GoP and it results in the formulation involving both senone posteriors and STPs. Further, we illustrate the steps to implement the proposed GoP formulation in Kaldi, a state-of-the-art automatic speech recognition toolkit. Experiments are conducted on English data collected from Indian speakers using acoustic models trained with native English data from LibriSpeech and Fisher-English corpora. The highest improvement in the correlation coefficient between the scores from the formulations and the expert ratings is found to be 14.89% (relative) better with the proposed approach compared to the best of the existing formulations that don’t include STPs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]]^^1^^, [[T. Arias-Vergara|AUTHOR T. Arias-Vergara]]^^1^^, [[Philipp Klumpp|AUTHOR Philipp Klumpp]]^^1^^, [[M. Strauss|AUTHOR M. Strauss]]^^1^^, [[A. Küderle|AUTHOR A. Küderle]]^^1^^, [[N. Roth|AUTHOR N. Roth]]^^1^^, [[S. Bayerl|AUTHOR S. Bayerl]]^^2^^, [[N. García-Ospina|AUTHOR N. García-Ospina]]^^3^^, [[P.A. Perez-Toro|AUTHOR P.A. Perez-Toro]]^^3^^, [[L.F. Parra-Gallego|AUTHOR L.F. Parra-Gallego]]^^3^^, [[Cristian David Rios-Urrego|AUTHOR Cristian David Rios-Urrego]]^^3^^, [[D. Escobar-Grisales|AUTHOR D. Escobar-Grisales]]^^3^^, [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]]^^1^^, [[B. Eskofier|AUTHOR B. Eskofier]]^^1^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FAU Erlangen-Nürnberg, Germany; ^^2^^Technische Hochschule Rosenheim, Germany; ^^3^^Universidad de Antioquia, Colombia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 964–965&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Parkinson’s disease is a neurological disorder that produces different motor impairments in the patients. The longitudinal assessment of the neurological state of patients is important to improve their quality of life. We introduced Apkinson, a smartphone application to evaluate continuously the speech and movement deficits of Parkinson’s patients, who receive feedback about their current state after performing different exercises. The speech assessment considers phonation, articulation, and prosody capabilities of the patients. Movement exercises captured with the inertial sensors of the smartphone evaluated symptoms in the upper and lower limbs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gábor Kiss|AUTHOR Gábor Kiss]], [[Dávid Sztahó|AUTHOR Dávid Sztahó]], [[Klára Vicsi|AUTHOR Klára Vicsi]]
</p><p class="cpabstractcardaffiliationlist">BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 966–967&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an application that detects depression by speech based on a speech feature extraction engine. The input of the application is a read speech sample and the output is predicted depression severity level (Beck Depression Inventory). The application analyses the speech sample and evaluates it using support vector regression (SVR). The developed system could assist general medical staff if no specialist is present to aid the diagnosis. If there is a suspicion that the speaker is suffering from depression, it is inevitable to seek special medical assistance. The application supports five native languages: English, French, German, Hungarian and Italian.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]]^^1^^, [[Aparna Srinivasan|AUTHOR Aparna Srinivasan]]^^1^^, [[Sravani Gottimukkala|AUTHOR Sravani Gottimukkala]]^^2^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^RGUKT, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 968–969&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Second language (L2) learners often achieve oral fluency by correct pronunciation of words with appropriate pauses. It has been shown that the L2 learners improve their language skills using mobile apps in a self-learning manner. Effective learning is possible with apps that provide detailed feedback. However, apps that train oral fluency in an automatic way are not available. In this work, we present SPIRE-fluent app, which provides an automatic feedback with scores representing learner’s pronunciation quality, for each word in a sentence and for the entire sentence. The word specific scores are computed based on the correctness of pronunciation with respect to the expert’s audio. Further, the app displays the syllables uttered and a set of two types of pauses produced by the learners and the expert while speaking the sentence. Considering this as a feedback, the learner can correct their mistakes based on the mismatches between those utterances. In addition, it also estimates any pause made by the learners within a word and highlights the syllable containing the phoneme preceding the pause.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shawn Nissen|AUTHOR Shawn Nissen]], [[Rebecca Nissen|AUTHOR Rebecca Nissen]]
</p><p class="cpabstractcardaffiliationlist">Brigham Young University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 970–971&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This demonstration will illustrate how using real-time visual biofeedback, through a relatively new type of electropalatographic (EPG) sensor, might facilitate improved pronunciation for learners of a second language (L2). The manner in which the EPG sensor is created and its use to track lingua-palatal articulation patterns will be described to individuals. This presentation will also include an explanation of how a student can visualize the contact patterns of their speech using the associated instructional software. A brief tutorial on the features of the instructional software will also be explained during the “show and tell” presentation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[A. Miwardelli|AUTHOR A. Miwardelli]]^^1^^, [[I. Gallagher|AUTHOR I. Gallagher]]^^2^^, [[J. Gibson|AUTHOR J. Gibson]]^^1^^, [[N. Katsos|AUTHOR N. Katsos]]^^1^^, [[Kate M. Knill|AUTHOR Kate M. Knill]]^^1^^, [[H. Wood|AUTHOR H. Wood]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Cambridge, UK; ^^2^^ig Projects, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 972–973&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a tablet-based app for Speech and Language Assessment in Schools and Homes ( Splash) to provide a first screening for young children aged 4–6 years to assess their speech and language skills. The app aims to be easy-to-administer with an adult, such as a teacher or parent, directing the child through the tasks. Three fun games have been developed to assess receptive language, expressive language and connected speech, respectively. Currently in proof-of-concept mode, when complete Splash will use automatic spoken language processing to give an instant estimate of a child’s communication ability and provide guidance on whether to speak specialist support. While not a diagnostic tool, the aim is for Splash to be used to provide immediate reassurance or direction to concerned parents, guardians or teachers as it can be administered by anyone, anywhere.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Colin T. Annand|AUTHOR Colin T. Annand]], [[Maurice Lamb|AUTHOR Maurice Lamb]], [[Sarah Dugan|AUTHOR Sarah Dugan]], [[Sarah R. Li|AUTHOR Sarah R. Li]], [[Hannah M. Woeste|AUTHOR Hannah M. Woeste]], [[T. Douglas Mast|AUTHOR T. Douglas Mast]], [[Michael A. Riley|AUTHOR Michael A. Riley]], [[Jack A. Masterson|AUTHOR Jack A. Masterson]], [[Neeraja Mahalingam|AUTHOR Neeraja Mahalingam]], [[Kathryn J. Eary|AUTHOR Kathryn J. Eary]], [[Caroline Spencer|AUTHOR Caroline Spencer]], [[Suzanne Boyce|AUTHOR Suzanne Boyce]], [[Stephanie Jackson|AUTHOR Stephanie Jackson]], [[Anoosha Baxi|AUTHOR Anoosha Baxi]], [[Reneé Seward|AUTHOR Reneé Seward]]
</p><p class="cpabstractcardaffiliationlist">University of Cincinnati, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 974–975&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Ultrasound images of the tongue surface can be used to provide real-time visual feedback for clinical practitioners and speakers adjusting pronunciation patterns. However, rapid and complex movements of the tongue can be difficult to interpret and directly relate to desired changes. We are developing a method for simplified visual feedback controlled by efficient, real-time tracking of tongue contours in ultrasound images. Our feedback and control paradigm are briefly discussed, and video of a potential game-like biofeedback stimulus is demonstrated.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vasiliy Radostev|AUTHOR Vasiliy Radostev]], [[Serge Berger|AUTHOR Serge Berger]], [[Justin Tabrizi|AUTHOR Justin Tabrizi]], [[Pasha Kamyshev|AUTHOR Pasha Kamyshev]], [[Hisami Suzuki|AUTHOR Hisami Suzuki]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 976–977&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel approach that introduces the strengths of voice assistants into a web browser that makes the task of web navigation a lot more accessible to all users, especially under limited mobility circumstances. Voice assistants have now been widely adopted and is providing great user experience for getting simple actions done quickly or getting a quick answer to a question. On the other hand, the benefits of voice assistants have not yet penetrated to the scenarios such as web navigation, which has so far been driven by mouse, keyboard and touch-based input only. In this paper, we demonstrate our speech-based web navigation system, and show that our system improves the completion of the web navigation task on both PC and mobile phone significantly as compared with an out-of-the-box voice assistants on this task.</p></div>
\rules except wikilink

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fei Wu|AUTHOR Fei Wu]], [[Leibny Paola García-Perera|AUTHOR Leibny Paola García-Perera]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1–5&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) has shown huge advances in adult speech; however, when the models are tested on child speech, the performance does not achieve satisfactory word error rates (WER). This is mainly due to the high variance in acoustic features of child speech and the lack of clean, labeled corpora. We apply the factored time delay neural network (TDNN-F) to the child speech domain, finding that it yields better performance. To enable our models to handle the different noise conditions and extremely small corpora, we augment the original training data by adding noise and reverberation. Compared with conventional GMM-HMM and TDNN systems, TDNN-F does better on two widely accessible corpora: CMU Kids and CSLU Kids, and on the combination of these two. Our system achieves a 26% relative improvement in WER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gary Yeung|AUTHOR Gary Yeung]], [[Abeer Alwan|AUTHOR Abeer Alwan]]
</p><p class="cpabstractcardaffiliationlist">University of California at Los Angeles, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 6–10&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Accurate automatic speech recognition (ASR) of kindergarten speech is particularly important as this age group may benefit the most from voice-based educational tools. Due to the lack of young child speech data, kindergarten ASR systems often are trained using older child or adult speech. This study proposes a fundamental frequency (f,,o,,)-based normalization technique to reduce the spectral mismatch between kindergarten and older child speech. The technique is based on the tonotopic distances between formants and f,,o,, developed to model vowel perception. This proposed procedure only relies on the computation of median f,,o,, across an utterance. Tonotopic distances for vowel perception were reformulated as a linear relationship between formants and f,,o,, to provide an effective approach for frequency normalization. This reformulation was verified by examining the formants and f,,o,, of child vowel productions. A 208-word ASR experiment using older child speech for training and kindergarten speech for testing was performed to examine the effectiveness of the proposed technique against piecewise vocal tract length, F3-based, and subglottal resonance normalization techniques. Results suggest that the proposed technique either has performance advantages or requires the computation of fewer parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Robert Gale|AUTHOR Robert Gale]], [[Liu Chen|AUTHOR Liu Chen]], [[Jill Dolata|AUTHOR Jill Dolata]], [[Jan van Santen|AUTHOR Jan van Santen]], [[Meysam Asgari|AUTHOR Meysam Asgari]]
</p><p class="cpabstractcardaffiliationlist">Oregon Health & Science University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 11–15&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study explores building and improving an automatic speech recognition (ASR) system for children aged 6–9 years and diagnosed with autism spectrum disorder (ASD), language impairment (LI), or both. Working with only 1.5 hours of target data in which children perform the Clinical Evaluation of Language Fundamentals Recalling Sentences task, we apply deep neural network (DNN) weight transfer techniques to adapt a large DNN model trained on the LibriSpeech corpus of adult speech. To begin, we aim to find the best proportional training rates of the DNN layers. Our best configuration yields a 29.38% word error rate (WER). Using this configuration, we explore the effects of quantity and similarity of data augmentation in transfer learning. We augment our training with portions of the OGI Kids’ Corpus, adding 4.6 hours of typically developing speakers aged kindergarten through 3^^rd^^ grade. We find that 2^^nd^^ grade data alone — approximately the mean age of the target data — outperforms other grades and all the sets combined. Doubling the data for 1^^st^^, 2^^nd^^, and 3^^rd^^ grade, we again compare each grade as well as pairs of grades. We find the combination of 1^^st^^ and 2^^nd^^ grade performs best at a 26.21% WER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manuel Sam Ribeiro|AUTHOR Manuel Sam Ribeiro]], [[Aciel Eshky|AUTHOR Aciel Eshky]], [[Korin Richmond|AUTHOR Korin Richmond]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 16–20&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate the automatic processing of child speech therapy sessions using ultrasound visual biofeedback, with a specific focus on complementing acoustic features with ultrasound images of the tongue for the tasks of speaker diarization and time-alignment of target words. For speaker diarization, we propose an ultrasound-based time-domain signal which we call estimated tongue activity. For word-alignment, we augment an acoustic model with low-dimensional representations of ultrasound images of the tongue, learned by a convolutional neural network. We conduct our experiments using the Ultrasuite repository of ultrasound and speech recordings for child speech therapy sessions. For both tasks, we observe that systems augmented with ultrasound data outperform corresponding systems using only the audio signal.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anastassia Loukina|AUTHOR Anastassia Loukina]], [[Beata Beigman Klebanov|AUTHOR Beata Beigman Klebanov]], [[Patrick Lange|AUTHOR Patrick Lange]], [[Yao Qian|AUTHOR Yao Qian]], [[Binod Gyawali|AUTHOR Binod Gyawali]], [[Nitin Madnani|AUTHOR Nitin Madnani]], [[Abhinav Misra|AUTHOR Abhinav Misra]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Zuowei Wang|AUTHOR Zuowei Wang]], [[John Sabatini|AUTHOR John Sabatini]]
</p><p class="cpabstractcardaffiliationlist">Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 21–25&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Use of speech technologies in the classroom is often limited by the inferior acoustic conditions as well as other factors that might affect the quality of the recordings. We describe MyTurnToRead, an e-book-based app designed to support an interleaved listening and reading experience, where the child takes turns reading aloud with a virtual partner. The child’s reading turns are recorded, and processed by an automated speech analysis system in order to provide feedback or track improvement in reading skill. We describe the architecture of the speech processing back-end and evaluate system performance on the data collected in several summer camps where children used the app on consumer-grade devices as part of the camp programming. We show that while the quality of the audio recordings varies greatly, our estimates of student oral reading fluency are very good: for example, the correlation between ASR-based and transcription-based estimates of reading fluency at the speaker level is r=0.93. These are also highly correlated with an external measure of reading comprehension.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vanessa Lopes|AUTHOR Vanessa Lopes]], [[João Magalhães|AUTHOR João Magalhães]], [[Sofia Cavaco|AUTHOR Sofia Cavaco]]
</p><p class="cpabstractcardaffiliationlist">Universidade Nova de Lisboa, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 26–30&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Problems in vocal quality are common in 4 to 12-year-old children, which may affect their health as well as their social interactions and development process. The sustained vowel exercise is widely used by speech and language pathologists for the child’s voice recovery and vocal re-education. Nonetheless, despite being an important voice exercise, it can be a monotonous and tedious activity for children. Here, we propose a computer therapy game that uses the sustained vowel exercise to motivate children on doing this exercise often. In addition, the game gives visual feedback on the child’s performance, which helps the child understand how to improve the voice production. The game uses a vowel classification model learned with a support vector machine and Mel frequency cepstral coefficients. A user test with 14 children showed that when using the game, children achieve longer phonation times than without the game. Also, it shows that the visual feedback helps and motivates children on improving their sustained vowel productions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna Esposito|AUTHOR Anna Esposito]]^^1^^, [[Terry Amorese|AUTHOR Terry Amorese]]^^1^^, [[Marialucia Cuciniello|AUTHOR Marialucia Cuciniello]]^^1^^, [[Maria Teresa Riviello|AUTHOR Maria Teresa Riviello]]^^1^^, [[Antonietta M. Esposito|AUTHOR Antonietta M. Esposito]]^^2^^, [[Alda Troncone|AUTHOR Alda Troncone]]^^1^^, [[Gennaro Cordasco|AUTHOR Gennaro Cordasco]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universitá della Campania “Luigi Vanvitelli”, Italy; ^^2^^INGV, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 31–35&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The research on ambient assistive technology is concerned with features humanoid agents should show in order to gain user acceptance. However, differently aged groups may have different requirements. This paper is particularly focused on agent’s voice preferences among elders, young adults, and adolescents.

To this aim 316 users organized in groups of 45/46 subjects of which 3 groups of elders (65+ years old), 2 of young adults (aged between 22–35 years), and 2 of adolescents (aged between 14–16 years) were recruited and administered the Virtual Agent Acceptance Questionnaire (VAAQ), after watching video-clips of mute and speaking agents, in order to test their preferences in terms of willingness to interact, pragmatic and hedonic qualities, and attractiveness, of proposed speaking and mute agents. In addition, the elders were also tested on listening only the agent’s. The results suggest that voice is primary for getting elder’s acceptance of virtual humanoid agents in contrast to young adults and adolescents which accept equally well either mute or speaking agents.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]^^1^^, [[Uffe Schjoedt|AUTHOR Uffe Schjoedt]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern Denmark, Denmark; ^^2^^Aarhus University, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 36–40&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We analyze the phonetic correlates of petitionary prayer in 22 Christian practitioners. Our aim is to examine if praying is characterized by prosodic markers of dialogue speech and expected efficacy. Three similar conditions are compared; 1) requests to God, 2) requests to a human recipient, 3) requests to an imaginary person. We find that making requests to God is clearly distinguishable from making requests to both human and imaginary interlocutors. Requests to God are, unlike requests to an imaginary person, characterized by markers of dialogue speech (as opposed to monologue speech), including, a higher f0 level, a larger f0 range, and a slower speaking rate. In addition, requests to God differ from those made to both human and imaginary persons in markers of expected efficacy on the part of the speaker. These markers are related to a more careful speech production, including almost complete lack of hesitations, more pauses, and a much longer speaking time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 41–45&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study explores whether people align to expressive speech spoken by a voice-activated artificially intelligent device (voice-AI), specifically Amazon’s Alexa. Participants shadowed words produced by the Alexa voice in two acoustically distinct conditions: “regular” and “expressive”, containing more exaggerated pitch contours and longer word durations. Another group of participants rated the shadowed items, in an AXB perceptual similarity task, as an assessment of overall degree of vocal alignment. Results show greater vocal alignment toward expressive speech produced by the Alexa voice and, furthermore, systematic variation based on speaker gender. Overall, these findings have applications to the field of affective computing in understanding human responses to synthesized emotional expressiveness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Catherine Lai|AUTHOR Catherine Lai]]^^1^^, [[Beatrice Alex|AUTHOR Beatrice Alex]]^^1^^, [[Johanna D. Moore|AUTHOR Johanna D. Moore]]^^1^^, [[Leimin Tian|AUTHOR Leimin Tian]]^^2^^, [[Tatsuro Hori|AUTHOR Tatsuro Hori]]^^3^^, [[Gianpiero Francesca|AUTHOR Gianpiero Francesca]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Edinburgh, UK; ^^2^^Monash University, Australia; ^^3^^Toyota, Japan; ^^4^^Toyota, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 46–50&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Being able to detect topics and speaker stances in conversations is a key requirement for developing spoken language understanding systems that are personalized and adaptive. In this work, we explore how topic-oriented speaker stance is expressed in conversational speech. To do this, we present a new set of topic and stance annotations of the CallHome corpus of spontaneous dialogues. Specifically, we focus on six stances — positivity, certainty, surprise, amusement, interest, and comfort — which are useful for characterizing important aspects of a conversation, such as whether a conversation is going well or not. Based on this, we investigate the use of neural network models for automatically detecting speaker stance from speech in multi-turn, multi-speaker contexts. In particular, we examine how performance changes depending on how input feature representations are constructed and how this is related to dialogue structure. Our experiments show that incorporating both lexical and acoustic features is beneficial for stance detection. However, we observe variation in whether using hierarchical models for encoding lexical and acoustic information improves performance, suggesting that some aspects of speaker stance are expressed more locally than others. Overall, our findings highlight the importance of modelling interaction dynamics and non-lexical content for stance detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jilt Sebastian|AUTHOR Jilt Sebastian]], [[Piero Pierucci|AUTHOR Piero Pierucci]]
</p><p class="cpabstractcardaffiliationlist">Telepathy Labs, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 51–55&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In human perception and understanding, a number of different and complementary cues are adopted according to different modalities. Various emotional states in communication between humans reflect this variety of cues across modalities. Recent developments in multi-modal emotion recognition utilize deep-learning techniques to achieve remarkable performances, with models based on different features suitable for text, audio and vision. This work focuses on cross-modal fusion techniques over deep learning models for emotion detection from spoken audio and corresponding transcripts.

We investigate the use of long short-term memory (LSTM) recurrent neural network (RNN) with pre-trained word embedding for text-based emotion recognition and convolutional neural network (CNN) with utterance-level descriptors for emotion recognition from speech. Various fusion strategies are adopted on these models to yield an overall score for each of the emotional categories. Intra-modality dynamics for each emotion is captured in the neural network designed for the specific modality. Fusion techniques are employed to obtain the inter-modality dynamics. Speaker and session-independent experiments on IEMOCAP multi-modal emotion detection dataset show the effectiveness of the proposed approaches. This method yields state-of-the-art results for utterance-level emotion recognition based on speech and text.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marvin Rajwadi|AUTHOR Marvin Rajwadi]]^^1^^, [[Cornelius Glackin|AUTHOR Cornelius Glackin]]^^2^^, [[Julie Wall|AUTHOR Julie Wall]]^^1^^, [[Gérard Chollet|AUTHOR Gérard Chollet]]^^2^^, [[Nigel Cannings|AUTHOR Nigel Cannings]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of East London, UK; ^^2^^Intelligent Voice, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 56–60&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a novel 1-D sentiment classifier trained on the benchmark IMDB dataset. The classifier is a 1-D convolutional neural network with repeated convolution and max pooling layers. The main contribution of this work is the demonstration of a deconvolution technique for 1-D convolutional neural networks that is agnostic to specific architecture types. This deconvolution technique enables text classification to be explained, a feature that is important for NLP-based decision support systems, as well as being an invaluable diagnostic tool.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ricardo Kleinlein|AUTHOR Ricardo Kleinlein]]^^1^^, [[Cristina Luna Jiménez|AUTHOR Cristina Luna Jiménez]]^^1^^, [[Juan Manuel Montero|AUTHOR Juan Manuel Montero]]^^1^^, [[Zoraida Callejas|AUTHOR Zoraida Callejas]]^^2^^, [[Fernando Fernández-Martínez|AUTHOR Fernando Fernández-Martínez]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad Politécnica de Madrid, Spain; ^^2^^Universidad de Granada, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 61–65&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Electrodermal activity (EDA) is a psychophysiological indicator that can be considered a somatic marker of the emotional and attentional reaction of subjects towards stimuli like audiovisual content. EDA measurements are not biased by the cognitive process of giving an opinion or a score to characterize the subjective perception, and group-level EDA recordings integrate the reaction of an audience, thus reducing the signal noise. This paper contributes to the field of audience’s attention prediction to video content, extending previous novel work on the use of EDA as ground truth for prediction algorithms. Videos are segmented into shorter clips attending to the audience’s increasing or decreasing attention, and we process videos’ audio waveform to extract meaningful aural embeddings from a VGGish model pretrained on the Audioset database. While previous similar work on attention level prediction using only audio accomplished 69.83% accuracy, we propose a Mixture of Experts approach to train a binary classifier that outperforms the main existing state-of-the-art approaches predicting increasing and decreasing attention levels with 81.76% accuracy. These results confirm the usefulness of providing acoustic features with a semantic significance, and the convenience of considering experts over partitions of the dataset in order to predict group-level attention from audio.</p></div>
\rules except wikilink

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mirella Lapata|AUTHOR Mirella Lapata]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>In Spike Jonze’s futuristic film “Her”, Theodore, a lonely writer, forms a strong emotional bond with Samantha, an operating system designed to meet his every need. Samantha can carry on seamless conversations with Theodore, exhibits a perfect command of language, and is able to take on complex tasks. She filters his emails for importance, allowing him to deal with information overload, she proactively arranges the publication of Theodore’s letters, and is able to give advice using common sense and reasoning skills.

In this talk I will present an overview of recent progress on learning natural language interfaces which might not be as clever as Samantha but nevertheless allow uses to interact with various devices and services using everyday language. I will address the structured prediction problem of mapping natural language utterances onto machine-interpretable representations and outline the various challenges it poses. For example, the fact that the translation of natural language to formal language is highly non-isomorphic, data for model training is scarce, and natural language can express the same information need in many different ways. I will describe a general modeling framework based on neural networks which tackles these challenges and improves the robustness of natural language interfaces.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Odette Scharenborg|AUTHOR Odette Scharenborg]]
</p><p class="cpabstractcardaffiliationlist">Technische Universiteit Delft, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>The fields of human speech recognition (HSR) and automatic speech recognition (ASR) both investigate parts of the speech recognition process and have word recognition as their central issue. Although the research fields appear closely related, their aims and research methods are quite different. Despite these differences there is, however, in the past two decades a growing interest in possible cross-fertilization. Researchers from both ASR and HSR are realizing the potential benefit of looking at the research field on the other side of the ‘gap’. In this survey talk, I will provide an overview of past and present efforts to link human and automatic speech recognition research and present an overview of the literature describing the performance difference between machines and human listeners. The focus of the talk is on the mutual benefits to be derived from establishing closer collaborations and knowledge interchange between ASR and HSR.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Shigeki Karita|AUTHOR Shigeki Karita]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3900–3904&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We have proposed a neural network (NN) model called a deep duel model (DDM) for rescoring N-best speech recognition hypothesis lists. A DDM is composed of a long short-term memory (LSTM)-based encoder followed by a fully-connected linear layer-based binary-class classifier. Given the feature vector sequences of two hypotheses in an N-best list, the DDM encodes the features and selects the hypothesis that has the lower word error rate (WER) based on the output binary-class probabilities. By repeating this one-on-one hypothesis comparison (duel) for each hypothesis pair in the N-best list, we can find the oracle (lowest WER) hypothesis as the survivor of the duels. We showed that the DDM can exploit the score provided by a forward LSTM-based recurrent NN language model (LSTMLM) as an additional feature to accurately select the hypotheses. In this study, we further improve the selection performance by introducing two modifications, i.e. adding the score provided by a backward LSTMLM, which uses succeeding words to predict the current word, and employing ensemble encoders, which have a high feature encoding capability. By combining these two modifications, our DDM achieves an over 10% relative WER reduction from a strong baseline obtained using both the forward and backward LSTMLMs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kazuki Irie|AUTHOR Kazuki Irie]], [[Albert Zeyer|AUTHOR Albert Zeyer]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3905–3909&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We explore deep autoregressive Transformer models in language modeling for speech recognition. We focus on two aspects. First, we revisit Transformer model configurations specifically for language modeling. We show that well configured Transformer models outperform our baseline models based on the shallow stack of LSTM recurrent neural network layers. We carry out experiments on the open-source LibriSpeech 960hr task, for both 200K vocabulary word-level and 10K byte-pair encoding subword-level language modeling. We apply our word-level models to conventional hybrid speech recognition by lattice rescoring, and the subword-level models to attention based encoder-decoder models by shallow fusion. Second, we show that deep Transformer language models do not require positional encoding. The positional encoding is an essential augmentation for the self-attention mechanism which is invariant to sequence ordering. However, in autoregressive setup, as is the case for language modeling, the amount of information increases along the position dimension, which is a positional signal by its own. The analysis of attention weights shows that deep autoregressive self-attention models can automatically make use of such positional information. We find that removing the positional encoding even slightly improves the performance of these models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anirudh Raju|AUTHOR Anirudh Raju]], [[Denis Filimonov|AUTHOR Denis Filimonov]], [[Gautam Tiwari|AUTHOR Gautam Tiwari]], [[Guitang Lan|AUTHOR Guitang Lan]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3910–3914&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural language models (NLM) have been shown to outperform conventional n-gram language models by a substantial margin in Automatic Speech Recognition (ASR) and other tasks. There are, however, a number of challenges that need to be addressed for an NLM to be used in a practical large-scale ASR system. In this paper, we present solutions to some of the challenges, including training NLM from heterogenous corpora, limiting latency impact and handling personalized bias in the second-pass rescorer. Overall, we show that we can achieve a 6.2% relative WER reduction using neural LM in a second-pass n-best rescoring framework with a minimal increase in latency.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tatiana Likhomanenko|AUTHOR Tatiana Likhomanenko]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3915–3919&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Lexicon-free speech recognition naturally deals with the problem of out-of-vocabulary (OOV) words. In this paper, we show that character-based language models (LM) can perform as well as word-based LMs for speech recognition, in word error rates (WER), even without restricting the decoding to a lexicon. We study character-based LMs and show that convolutional LMs can effectively leverage large (character) contexts, which is key for good speech recognition performance downstream. We specifically show that the lexicon-free decoding performance (WER) on utterances with OOV words using character-based LMs is better than lexicon-based decoding, both with character or word-based LMs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siddique Latif|AUTHOR Siddique Latif]]^^1^^, [[Rajib Rana|AUTHOR Rajib Rana]]^^1^^, [[Sara Khalifa|AUTHOR Sara Khalifa]]^^2^^, [[Raja Jurdak|AUTHOR Raja Jurdak]]^^2^^, [[Julien Epps|AUTHOR Julien Epps]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern Queensland, Australia; ^^2^^CSIRO, Australia; ^^3^^UNSW, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3920–3924&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition is a challenging task and heavily depends on hand-engineered acoustic features, which are typically crafted to echo human perception of speech signals. However, a filter bank that is designed from perceptual evidence is not always guaranteed to be the best in a statistical modelling framework where the end goal is for example emotion classification. This has fuelled the emerging trend of learning representations from raw speech especially using deep learning neural networks. In particular, a combination of Convolution Neural Networks (CNNs) and Long Short Term Memory (LSTM) have gained great traction for the intrinsic property of LSTM in learning contextual information crucial for emotion recognition; and CNNs been used for its ability to overcome the scalability problem of regular neural networks. In this paper, we show that there are still opportunities to improve the performance of emotion recognition from the raw speech by exploiting the properties of CNN in modelling contextual information. We propose the use of parallel convolutional layers to harness multiple temporal resolutions in the feature extraction block that is jointly trained with the LSTM based classification network for the emotion recognition task. Our results suggest that the proposed model can reach the performance of CNN trained with hand-engineered features from both IEMOCAP and MSP-IMPROV datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mousmita Sarma|AUTHOR Mousmita Sarma]]^^1^^, [[Pegah Ghahremani|AUTHOR Pegah Ghahremani]]^^2^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^2^^, [[Nagendra Kumar Goel|AUTHOR Nagendra Kumar Goel]]^^3^^, [[Kandarpa Kumar Sarma|AUTHOR Kandarpa Kumar Sarma]]^^4^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Gauhati University, India; ^^2^^Johns Hopkins University, USA; ^^3^^GoVivace, USA; ^^4^^Gauhati University, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3925–3929&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose to exploit phone posteriors as an additional feature in Deep Neural Network (DNN) to recognize emotions from raw speech waveform. The proposed DNN setup uses a time domain approach of learning filters within the network. The frame-level phone posteriors are combined with the learned feature representation through the network. Appended learned time domain features and phone posteriors are used as an input to the temporal context modeling layers which interleaves TDNN-LSTM with time-restricted self-attention. We achieve 16.48% relative error rate improvement in IEMOCAP categorical problem (with a final weighted accuracy of 75.03%) using phone posteriors compared to DNN setup which uses only learned time domain features for temporal context modeling. Further, we study the effect of learning emotion categories leveraging dimensional primitives in multi-task learning DNN model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Miao Cao|AUTHOR Miao Cao]], [[Chun Yang|AUTHOR Chun Yang]], [[Fang Zhou|AUTHOR Fang Zhou]], [[Xu-cheng Yin|AUTHOR Xu-cheng Yin]]
</p><p class="cpabstractcardaffiliationlist">USTB, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3930–3934&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As a sequence model, Deep Feedforward Sequential Memory Network (DFSMN) has shown superior performance on many tasks, such as language modeling and speech recognition. Based on this work, we propose an improved speech emotion recognition (SER) end-to-end system. Our model comprises both CNN layers and pyramid FSMN layers, where CNN layers are added at the front of the network to extract more sophisticated features. A timestep attention mechanism is also integrated into our SER system, which makes the system learn how to focus on the more robust or informative segments in the input signal. Furthermore, different from traditional SER systems, the proposed model is applied directly to spectrograms which contain more raw speech information, rather than well-established hand-crafted speech features such as spectral, cepstral and pitch. Finally, we evaluate our system on the Interactive Emotional Motion Capture (IEMOCAP) database. The experimental results show that our system achieves 2.67% improvement compared to the commonly used CNN-biLSTM model which requires much more computing resource.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christopher Oates|AUTHOR Christopher Oates]], [[Andreas Triantafyllopoulos|AUTHOR Andreas Triantafyllopoulos]], [[Ingmar Steiner|AUTHOR Ingmar Steiner]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">audEERING, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3935–3939&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In an era where large speech corpora annotated for emotion are hard to come by, and especially ones where emotion is expressed freely instead of being acted, the importance of using free online sources for collecting such data cannot be overstated. Most of those sources, however, contain encoded audio due to storage and bandwidth constraints, often in very low bitrates. In addition, with the increased industry interest on voice-based applications, it is inevitable that speech emotion recognition (SER) algorithms will soon find their way into production environments, where the audio might be encoded in a different bitrate than the one available during training. Our contribution is threefold. First, we show that encoded audio still contains enough relevant information for robust SER. Next, we investigate the effects of mismatched encoding conditions in the training and test set both for traditional machine learning algorithms built on hand-crafted features and modern end-to-end methods. Finally, we investigate the robustness of those algorithms in the multi-condition scenario, where the training set is augmented with encoded audio, but still differs from the training set. Our results indicate that end-to-end methods are more robust even in the more challenging scenario of mismatched conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]
</p><p class="cpabstractcardaffiliationlist">MTA-SZTE RGAI, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3940–3944&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Bag-of-Audio-Word (or BoAW) representation is an utterance-level feature representation approach that was successfully applied in the past in various computational paralinguistic tasks. Here, we extend the BoAW feature extraction process with the use of Deep Neural Networks: first we train a DNN acoustic model on an acoustic dataset consisting of 22 hours of speech for phoneme identification, then we evaluate this DNN on a standard paralinguistic dataset. To construct utterance-level features from the frame-level posterior vectors, we calculate their BoAW representation. We found that this approach can be utilized even on its own, although the results obtained lag behind those of the standard paralinguistic approach, and the optimal size of the extracted feature vectors tends to be large. Our approach, however, can be easily and efficiently combined with the standard paralinguistic one, resulting in the highest Unweighted Average Recall (UAR) score achieved so far for a general paralinguistic dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jennifer Williams|AUTHOR Jennifer Williams]], [[Simon King|AUTHOR Simon King]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3945–3949&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Our goal is to separate out speaking style from speaker identity in utterance-level representations of speech such as i-vectors and x-vectors. We first show that both i-vectors and x-vectors contain information not only about speaker but also about speaking style (for one data set) or emotion (for another data set), even when projected into a low-dimensional space. To disentangle these factors, we use an autoencoder in which the latent space is split into two subspaces. The entangled information about speaker and style/emotion is pushed apart by the use of auxiliary classifiers that take one of the two latent subspaces as input and that are jointly learned with the autoencoder. We evaluate how well the latent subspaces separate the factors by using them as input to separate style/emotion classification tasks. In traditional speaker identification tasks, speaker-invariant characteristics are factorized from channel and then the channel information is ignored. Our results suggest that this so-called channel may contain exploitable information, which we refer to as  style factors. Finally, we propose future work to use information theory to formalize  style factors in the context of speaker identity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-Yin Hsu|AUTHOR Yu-Yin Hsu]]^^1^^, [[Anqi Xu|AUTHOR Anqi Xu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^PolyU, China; ^^2^^University College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3950–3954&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We report results of a speech production experiment about the intonation of three sentence types in Taiwan Mandarin, and discuss our results with implications for focus acoustics, and semantic-syntactic theories of sentence final particles and  wh-indeterminates.  Wh-indeterminates refer to  wh-phrases that are ambiguous between interrogative and indefinite readings. In Mandarin, different interpretations of  wh-indeterminates are not morphologically marked, but can be disambiguated in specific sentence contexts marked by sentence final particles. In this study, we systematically examined the intonation of  wh-questions and  yes/no questions by using declarative sentences as the baseline. The results show that both  wh- and  yes/no questions exhibit F0 prominence, and lengthening effects on regions containing sentence-final particles and  wh-phrases, but the effects were stronger in  wh-questions. Examining the duration and F0 range, we found that  wh-phrases and sentence final particles together formed specific acoustic patterns to distinguish questions from declarative sentences. The findings suggest that the prosodic organization interacts with other internal structural organization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fang Hu|AUTHOR Fang Hu]]^^1^^, [[Youjue He|AUTHOR Youjue He]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CASS, China; ^^2^^UCASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3955–3959&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Frication is not a common feature in characterizing vowels. However, Chinese dialects are known for having apical vowels. Additionally, there are fricative high vowels in a few dialects. This paper describes the phonetics and phonology of the vowels in the Rui’an Wu Chinese dialect, with an emphasis on vowel features distinguishing the high vowels. Rui’an has 12 monophthongs [i y ʉ e ø ε  a ɿ ɔ o u ɯ]; and half of them [i y ʉ ɿ  u ɯ] are high vowels. Formant data from 10 native speakers, 5 male and 5 female, were analyzed. And acoustic results reveal that [ɿ] is an apical vowel with significantly higher frication than other high vowels, whereas the difference in frication between [ʉ ɯ] and [y u] respectively is not confirmed. Rather, spectral difference plays a more important role in the distinction between labiodental high vowels [ʉ ɯ] and their plain rounded counterparts [y u].</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenrui Zhang|AUTHOR Zhenrui Zhang]]^^1^^, [[Fang Hu|AUTHOR Fang Hu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UCASS, China; ^^2^^CASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3960–3964&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Based on an acoustic analysis of speech data from 10 speakers, 5 male and 5 female, this paper describes the phonetics and phonology of the vowels and diphthongs in the Xupu Xiang Chinese dialect. Results suggest that monophthongs and falling diphthongs should be grouped together, since the production of them is a single articulatory event. Falling diphthongs are composed of a dynamic spectral target, while monophthongs are composed of a static spectral target. But rising diphthongs are sequences of two spectral targets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luciana Albuquerque|AUTHOR Luciana Albuquerque]], [[Catarina Oliveira|AUTHOR Catarina Oliveira]], [[António Teixeira|AUTHOR António Teixeira]], [[Pedro Sa-Couto|AUTHOR Pedro Sa-Couto]], [[Daniela Figueiredo|AUTHOR Daniela Figueiredo]]
</p><p class="cpabstractcardaffiliationlist">Universidade de Aveiro, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3965–3969&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study addresses effects of age and gender on acoustics of European Portuguese oral vowels, given to the fact of conflicting findings reported in prior research. Fundamental frequency (F0), formant frequencies (F1 and F2) and duration of vowels produced by a group of 113 adults, aged between 35 and 97 years old, were measured. Vowel space area (VSA) according to gender and age was also analysed. The results revealed that the most consistent age-related effect was an increase in vowel duration in both genders. F0 decreases above [50–64] for female and for male data suggests a slight drop over the age range [35–64] and then an increase in an older age. That is, F0 tends to be closer between genders as age increases. In general, there is no evidence that F1 and F2 frequencies were lowering as age increased. Furthermore, there were no changes to VSA with ageing. These results provide a base of information to establish vowel acoustics normal patterns of ageing among Portuguese adults.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wendy Lalhminghlui|AUTHOR Wendy Lalhminghlui]], [[Viyazonuo Terhiija|AUTHOR Viyazonuo Terhiija]], [[Priyankoo Sarmah|AUTHOR Priyankoo Sarmah]]
</p><p class="cpabstractcardaffiliationlist">IIT Guwahati, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3970–3974&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Intrinsic F0 (IF0) has been considered a phonetic phenomenon that has a physiological basis. However, considering cross linguistic variation in IF0, it is also assumed that there is an amount of speaker intended control on IF0. This work looks into the two tone languages spoken in North East India and confirms the evidence of IF0 in the languages. However, it also shows that as soon as speakers exert control over F0 for tone production, IF0 differences diminish. As previously reported, in this study too, IF0 differences were noticed to be more pronounced in the higher F0 regions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jenifer Vega Rodríguez|AUTHOR Jenifer Vega Rodríguez]]
</p><p class="cpabstractcardaffiliationlist">Université Sorbonne Nouvelle, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3975–3979&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Korebaju [kòrèβáhᵼ̀] (ISO 639-3: coe) is a Western Tukanoan language from the South-Western part of Colombia. A study conducted in 2017 and 2018 with six native speakers (3 female and 3 male) shows that Korebaju has an inventory of 17 consonants /p, t, k, p^^h^^, t^^h^^, k^^h^^, β, ɸ, s, h, tʃ͡, m, n, ɲ, ^^h^^m, ^^h^^ŋ, r/ and 6 oral vowels /i, e, a, o, u, ɨ/, 6 nasal vowels /ĩ, ẽ, ã, õ, ũ, ᵼ̃/ and 3 glottal vowels /a^^ʔ^^, e^^ʔ^^, o^^ʔ^^/. Contrary to previous studies, this paper shows that Korebaju does not include the vowel [ɯ] in its phonemic inventory. The vowel [ɯ] is an allophone of the high back vowel /u/ when it follows a palatal consonant. In the same context, the high central vowel /ɨ/ also has an allophone [ɪ]. This paper focuses on an acoustic and articulatory description. Data come from a set of words recorded with synchronized audio and EGG signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Omnia Ibrahim|AUTHOR Omnia Ibrahim]]^^1^^, [[Gabriel Skantze|AUTHOR Gabriel Skantze]]^^2^^, [[Sabine Stoll|AUTHOR Sabine Stoll]]^^1^^, [[Volker Dellwo|AUTHOR Volker Dellwo]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Zürich, Switzerland; ^^2^^KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3980–3984&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In human-human interactions, the situational context plays a large role in the degree of speakers’ accommodation. In this paper, we investigate whether the degree of accommodation in a human-robot computer game is affected by (a) the duration of the interaction and (b) the success of the players in the game. 30 teams of two players played two card games with a conversational robot in which they had to find a correct order of five cards. After game 1, the players received the result of the game on a success scale from 1 (lowest success) to 5 (highest). Speakers’ f,,o,, accommodation was measured as the Euclidean distance between the human speakers and each human and the robot. Results revealed that (a) the duration of the game had no influence on the degree of f,,o,, accommodation and (b) the result of Game 1 correlated with the degree of f,,o,, accommodation in Game 2 (higher success equals lower Euclidean distance). We argue that game success is most likely considered as a sign of the success of players’ cooperation during the discussion, which leads to a higher accommodation behavior in speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Petra Wagner|AUTHOR Petra Wagner]], [[Nataliya Bryhadyr|AUTHOR Nataliya Bryhadyr]], [[Marin Schröer|AUTHOR Marin Schröer]]
</p><p class="cpabstractcardaffiliationlist">Universität Bielefeld, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3985–3989&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous research identified a differential contribution of information structure and the visibility of facial and contextual information to the acoustic-prosodic expression of pitch accents. However, it is unclear whether pitch accent shapes are affected by these conditions as well. To investigate whether varying context cues have a differentiated impact on pitch accent trajectories produced in conversational interaction, we modified the visibility conditions in a spontaneous dyadic interaction task, i.e. a verbalized version of TicTacToe. Besides varying visibility, the game task allows for measuring the impact of information-structure on pitch accent trajectories, differentiating important and unpredictable game moves. Using GAMMs on four speaker groups (identified by a cluster analysis), we could isolate varying strategies of prosodic adaptation to contextual change. While few speaker groups showed a reaction to the availability of visible context cues (facial prosody or executed game moves), all groups differentiated the verbalization of unpredictable and predictable game moves with a group-specific trajectory adaptation. The importance of game moves resulted in differentiated adaptations in two out of four speaker groups. The detected strategic trajectory adaptations were characterized by different characteristics of boundary tones, adaptations of the global f0-level, or the shape of the corresponding pitch accent.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Simon Betz|AUTHOR Simon Betz]]^^1^^, [[Sina Zarrieß|AUTHOR Sina Zarrieß]]^^1^^, [[Éva Székely|AUTHOR Éva Székely]]^^2^^, [[Petra Wagner|AUTHOR Petra Wagner]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Bielefeld, Germany; ^^2^^KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3990–3994&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Synthetic speech can be used to express uncertainty in dialogue systems by means of hesitation. If a phrase like “Next to the green tree” is uttered in a hesitant way, that is, containing lengthening, silences, and fillers, the listener can infer that the speaker is not certain about the concepts referred to. However, we do not know anything about the referential domain of the uncertainty; if only a particular word in this sentence would be uttered hesitantly, e.g. “the greee:n tree”, the listener could infer that the uncertainty refers to the color in the statement, but not to the object. In this study, we show that the domain of the uncertainty is controllable. We conducted an experiment in which color words in sentences like “search for the green tree” were lengthened in two different positions: word onsets or final consonants, and participants were asked to rate the uncertainty regarding color and object. The results show that initial lengthening is predominantly associated with uncertainty about the word itself, whereas final lengthening is primarily associated with the following object. These findings enable dialogue system developers to finely control the attitudinal display of uncertainty, adding nuances beyond the lexical content to message delivery.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuke Si|AUTHOR Yuke Si]]^^1^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Mengfei Wu|AUTHOR Mengfei Wu]]^^1^^, [[Aijun Li|AUTHOR Aijun Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^CASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3995–3999&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Question detection from dialogs is important in human-computer interaction systems. Recent studies on question detection mostly use recurrent neural network (RNN) based methods to process low-level descriptors (LLD) of the utterance. However, there are three main problems in these studies. Firstly, traditional LLD features are defined based on human a priori knowledge, some of which are difficult to be extracted accurately. Secondly, previous studies of question detection only consider features from amplitude information and ignored phase information. Thirdly, previous studies show that the context in an utterance is helpful to detect question, while the context between utterances is not well investigated in this task. To cope with the aforementioned problems, we propose a CNN-BLSTM based framework, where amplitude information is obtained from the combination of spectrogram and LLD, and processed together with the phase information. Our framework also models the context information in the dialog. From the experiments on Mandarin dialog corpus, we revealed the effectiveness of the integrated feature with both amplitude and phase in question detection. The results indicated that the phase feature was helpful to detect the questions with a short duration, and the context between utterances was beneficial to detect questions without special interrogative forms.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Katherine Metcalf|AUTHOR Katherine Metcalf]], [[Barry-John Theobald|AUTHOR Barry-John Theobald]], [[Garrett Weinberg|AUTHOR Garrett Weinberg]], [[Robert Lee|AUTHOR Robert Lee]], [[Ing-Marie Jonsson|AUTHOR Ing-Marie Jonsson]], [[Russ Webb|AUTHOR Russ Webb]], [[Nicholas Apostoloff|AUTHOR Nicholas Apostoloff]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4000–4004&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe experiments towards building a conversational digital assistant that considers the preferred conversational style of the user. In particular, these experiments are designed to measure whether users prefer and trust an assistant whose conversational style matches their own. To this end we conducted a user study where subjects interacted with a digital assistant whose response either matched their conversational style, or did not. We found that people strongly prefer a digital assistant that mirrors their “chattiness” and that this preference can be reliably detected.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eran Raveh|AUTHOR Eran Raveh]]^^1^^, [[Ingo Siegert|AUTHOR Ingo Siegert]]^^2^^, [[Ingmar Steiner|AUTHOR Ingmar Steiner]]^^3^^, [[Iona Gessinger|AUTHOR Iona Gessinger]]^^1^^, [[Bernd Möbius|AUTHOR Bernd Möbius]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität des Saarlandes, Germany; ^^2^^Otto-von-Guericke-Universität Magdeburg, Germany; ^^3^^audEERING, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4005–4009&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study examines how the presence of other speakers affects the interaction with a spoken dialogue system. We analyze participants’ speech regarding several phonetic features, viz., fundamental frequency, intensity, and articulation rate, in two conditions: with and without additional speech input from a human confederate as a third interlocutor. The comparison was made via tasks performed by participants using a commercial voice assistant under both conditions in alternation. We compare the distributions of the features across the two conditions to investigate whether speakers behave differently when a confederate is involved. Temporal analysis exposes continuous changes in the feature productions. In particular, we measured overall accommodation between the participants and the system throughout the interactions. Results show significant differences in a majority of cases for two of the three features, which are more pronounced in cases where the user first interacted with the device alone. We also analyze factors such as the task performed, participant gender, and task order, providing additional insight into the participants’ behavior.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Victor Soto|AUTHOR Victor Soto]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]
</p><p class="cpabstractcardaffiliationlist">Columbia University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3725–3729&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We have found that  cognate words, defined as sets of words used in multiple languages that share a common etymology, can in fact elicit code-switching or language mixing between the languages. This paper focuses on how information about cognate words can improve language modeling performance of code-switched English-Spanish (EN-ES) language. We have found that the degree of semantic, phonetic or lexical overlap between a pair of cognate words is a useful feature in identifying code-switching in language. We derive a set of spelling, phonetic and semantic features from a list of of EN-ES cognates and run experiments on a corpus of conversational code-switched EN-ES. First, we show that there exists a strong statistical relationship between these cognate-based features and code-switching in the corpus. Secondly, we demonstrate that language models using these features obtain similar performance improvements as do other manually tagged features including language and part-of-speech tags. We conclude that cognate features can be a useful set of automatically-derived features that can be easily obtained for any pair of languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Grandee Lee|AUTHOR Grandee Lee]], [[Xianghu Yue|AUTHOR Xianghu Yue]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3730–3734&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switch language modeling is challenging due to data scarcity as well as expanded vocabulary that involves two languages. We present a novel computational method to generate synthetic code-switch data using the Matrix Language Frame theory to alleviate the issue of data scarcity. The proposed method makes use of augmented parallel data to supplement the real code-switch data. We use the synthetic data to pre-train the language model. We show that the pre-trained language model can match the performance of vanilla models when it is finetuned with 2.5 times less real code-switch data. We also show that the perplexity of a RNN based language model pre-trained on synthetic code-switch data and fine-tuned with real code-switch data is significantly lower than that of the model trained on real code-switch data alone and the reduction in perplexity translates into 1.45% absolute reduction in WER in a speech recognition experiment.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[SaiKrishna Rallabandi|AUTHOR SaiKrishna Rallabandi]], [[Alan W. Black|AUTHOR Alan W. Black]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3735–3739&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code Mixing — phenomenon where lexical items from one language are embedded in the utterance of another — is relatively frequent in multilingual communities and therefore speech systems should be able to process such content. However, building a voice capable of synthesizing such content typically requires bilingual recordings from the speaker which might not always be easy to obtain. In this work, we present an approach for building mixed lingual systems using only monolingual corpora. Specifically we present a way to train multi speaker text to speech system by incorporating stochastic latent variables into the attention mechanism with the objective of synthesizing code mixed content. We subject the prior distribution for such latent variables to match articulatory constraints. Subjective evaluation shows that our systems are capable of generating high quality synthesis in code mixed scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qinyi Wang|AUTHOR Qinyi Wang]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Adem Derinel|AUTHOR Adem Derinel]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3740–3744&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching (CS) detection refers to the automatic detection of language switches in code-mixed utterances. This task can be achieved by using a CS automatic speech recognition (ASR) system that can handle such language switches. In our previous work, we have investigated the code-switching detection performance of the Frisian-Dutch CS ASR system by using the time alignment of the most likely hypothesis and found that this technique suffers from over-switching due to numerous very short spurious language switches. In this paper, we propose a novel method for CS detection aiming to remedy this shortcoming by using the language posteriors which are the sum of the frame-level posteriors of phones belonging to the same language. The CS ASR-generated language posteriors contain more complete language-specific information on frame level compared to the time alignment of the ASR output. Hence, it is expected to yield more accurate and robust CS detection. The CS detection experiments demonstrate that the proposed language posterior-based approach provides higher detection accuracy than the baseline system in terms of equal error rate. Moreover, a detailed CS detection error analysis reveals that using language posteriors reduces the false alarms and results in more robust CS detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Astik Biswas|AUTHOR Astik Biswas]]^^1^^, [[Emre Yılmaz|AUTHOR Emre Yılmaz]]^^2^^, [[Febe de Wet|AUTHOR Febe de Wet]]^^1^^, [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]]^^1^^, [[Thomas Niesler|AUTHOR Thomas Niesler]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Stellenbosch University, South Africa; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3745–3749&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents recent progress in the acoustic modelling of under-resourced code-switched (CS) speech in multiple South African languages. We consider two approaches. The first constructs separate bilingual acoustic models corresponding to language pairs (English-isiZulu, English-isiXhosa, English-Setswana and English-Sesotho). The second constructs a single unified five-lingual acoustic model representing all the languages (English, isiZulu, isiXhosa, Setswana and Sesotho). For these two approaches we consider the effectiveness of semi-supervised training to increase the size of the very sparse acoustic training sets. Using approximately 11 hours of untranscribed speech, we show that both approaches benefit from semi-supervised training. The bilingual TDNN-F acoustic models also benefit from the addition of CNN layers (CNN-TDNN-F), while the five-lingual system does not show any significant improvement. Furthermore, because English is common to all language pairs in our data, it dominates when training a unified language model, leading to improved English ASR performance at the expense of the other languages. Nevertheless, the five-lingual model offers flexibility because it can process more than two languages simultaneously, and is therefore an attractive option as an automatic transcription system in a semi-supervised training pipeline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Emre Yılmaz|AUTHOR Emre Yılmaz]]^^1^^, [[Samuel Cohen|AUTHOR Samuel Cohen]]^^1^^, [[Xianghu Yue|AUTHOR Xianghu Yue]]^^1^^, [[David A. van Leeuwen|AUTHOR David A. van Leeuwen]]^^2^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3750–3754&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the FAME! Project, a code-switching (CS) automatic speech recognition (ASR) system for Frisian-Dutch speech is developed that can accurately transcribe the local broadcaster’s bilingual archives with CS speech. This archive contains recordings with monolingual Frisian and Dutch speech segments as well as Frisian-Dutch CS speech, hence the recognition performance on monolingual segments is also vital for accurate transcriptions. In this work, we propose a multi-graph decoding and rescoring strategy using bilingual and monolingual graphs together with a unified acoustic model for CS ASR. The proposed decoding scheme gives the freedom to design and employ alternative search spaces for each (monolingual or bilingual) recognition task and enables the effective use of monolingual resources of the high-resourced mixed language in low-resourced CS scenarios. In our scenario, Dutch is the high-resourced and Frisian is the low-resourced language. We therefore use additional monolingual Dutch text resources to improve the Dutch language model (LM) and compare the performance of single- and multi-graph CS ASR systems on Dutch segments using larger Dutch LMs. The ASR results show that the proposed approach outperforms baseline single-graph CS ASR systems, providing better performance on the monolingual Dutch segments without any accuracy loss on monolingual Frisian and code-mixed segments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroshi Seki|AUTHOR Hiroshi Seki]]^^1^^, [[Takaaki Hori|AUTHOR Takaaki Hori]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]^^1^^, [[John R. Hershey|AUTHOR John R. Hershey]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MERL, USA; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3755–3759&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The expressive power of end-to-end automatic speech recognition (ASR) systems enables direct estimation of a character or word label sequence from a sequence of acoustic features. Direct optimization of the whole system is advantageous because it not only eliminates the internal linkage necessary for hybrid systems, but also extends the scope of potential applications by training the model for various objectives. In this paper, we tackle the challenging task of multilingual multi-speaker ASR using such an all-in-one end-to-end system. Several multilingual ASR systems were recently proposed based on a monolithic neural network architecture without language-dependent modules, showing that modeling of multiple languages is well within the capabilities of an end-to-end framework. There has also been growing interest in multi-speaker speech recognition, which enables generation of multiple label sequences from single-channel mixed speech. In particular, a multi-speaker end-to-end ASR system that can directly model one-to-many mappings without additional auxiliary clues was recently proposed. The proposed model, which integrates the capabilities of these two systems, is evaluated using mixtures of two speakers generated by using 10 languages, including code-switching utterances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Oriol Guasch|AUTHOR Oriol Guasch]]
</p><p class="cpabstractcardaffiliationlist">Universitat Ramon Llull, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Simulating the very complex physics of voice on realistic vocal tract geometries looked daunting a few years ago but has recently experienced a very significant boom. Earlier works mainly dealt with vowel production. Solving the wave equation in a three-dimensional vocal tract suffices for that purpose. As we depart from vowels, however, things quickly get harder. Simulating a few milliseconds of sibilant /s/ demands high-performance computers to solve the sound turbulent eddies generate. Producing a diphthong implies dealing with dynamic geometries. A syllable like /sa/ seems out of reach of current computation capabilities, though some modelling techniques inspired on one-dimensional approaches may lead to more than acceptable results. The shaping of dynamic vocal tracts shall be linked to biomechanical models to gain flexibility and achieve a more complete representation on how, we humans, generate voice. Besides, including phonation in the computations implies resolving the vocal fold self-oscillations and the very demanding coupling of the mechanical, fluid and acoustic fields. Finally, including naturalness in computational voice generation is a newborn and challenging task. In this talk, a general overview on realistic physics-based computational voice production will be given. Current achievements and remaining challenges will be highlighted and discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Debasish Ray Mohapatra|AUTHOR Debasish Ray Mohapatra]]^^1^^, [[Victor Zappi|AUTHOR Victor Zappi]]^^2^^, [[Sidney Fels|AUTHOR Sidney Fels]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of British Columbia, Canada; ^^2^^Northeastern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3760–3764&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The simulation of two-dimensional (2D) wave propagation is an affordable computational task and its use can potentially improve time performance in vocal tracts’ acoustic analysis. Several models have been designed that rely on 2D wave solvers and include 2D representations of three-dimensional (3D) vocal tract-like geometries. However, until now, only the acoustics of straight 3D tubes with circular cross-sections have been successfully replicated with this approach. Furthermore, the simulation of the resulting 2D shapes requires extremely high spatiotemporal resolutions, dramatically reducing the speed boost deriving from the usage of a 2D wave solver. In this paper, we introduce an in-progress novel vocal tract model that extends the 2D Finite-Difference Time-Domain wave solver (2.5D FDTD) by adding tube depth, derived from the area functions, to the acoustic solver. The model combines the speed of a light 2D numerical scheme with the ability to natively simulate 3D tubes that are symmetric in one dimension, hence relaxing previous resolution requirements. An implementation of the 2.5D FDTD is presented, along with evaluation of its performance in the case of static vowel modeling. The paper discusses the current features and limits of the approach, and the potential impact on computational acoustics applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peter Birkholz|AUTHOR Peter Birkholz]]^^1^^, [[Susanne Drechsel|AUTHOR Susanne Drechsel]]^^2^^, [[Simon Stone|AUTHOR Simon Stone]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Dresden, Germany; ^^2^^MLU Halle-Wittenberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3765–3769&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a geometric vocal fold model that describes the glottal area between the lower and upper vocal fold edges as a function of time. It is based on a glottis model by Titze [J. Acoust. Soc. Am., 75(2), 570–580 (1984)] and has been enhanced to allow the generation of skewed (asymmetric) glottal area waveforms and diplophonic double pulsing. Embedded in the articulatory speech synthesizer VocalTractLab, the model was used for the synthesis of German words with a range of settings for the vocal fold model parameters to generate different male and female voices. A perception experiment was conducted to determine the parameter settings that generate the most natural-sounding voices. The most natural-sounding male voice was generated with a slightly divergent prephonatory glottal shape, with a phase delay of 70° between the lower and upper vocal fold edges, symmetric glottal area pulses, and a little shimmer (double pulsing). The most natural-sounding female voice was generated with a straight prephonatory glottal channel, with a phase delay of 50° between the vocal fold edges, slightly asymmetric glottal area pulses, and a little shimmer.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingming Gao|AUTHOR Yingming Gao]], [[Simon Stone|AUTHOR Simon Stone]], [[Peter Birkholz|AUTHOR Peter Birkholz]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Dresden, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3770–3774&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes a novel approach for copy synthesis of human speech with the articulatory speech synthesizer VocalTractLab (VTL). For a given natural utterance, an appropriate gestural score (an organized pattern of articulatory movements) was obtained in two steps: initialization and optimization. In the first step, we employed a rule-based method to create an initial gestural score. In the second step, this initial gestural score was optimized by a genetic algorithm such that the cosine distance of acoustic features between the synthetic and natural utterances was minimized. The optimization was regularized by limiting certain gestural score parameters to reasonable values during the analysis-by-synthesis procedure. The experiment results showed that, compared to a baseline coordinate descent algorithm, the genetic algorithm performed better in terms of acoustic distance. In addition, a perceptual experiment was conducted to rate the similarity between the optimized synthetic speech and the original human speech. Here, similarity scores of optimized utterances with regularization were significantly higher than those without regularization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abdolreza Sabzi Shahrebabaki|AUTHOR Abdolreza Sabzi Shahrebabaki]]^^1^^, [[Negar Olfati|AUTHOR Negar Olfati]]^^1^^, [[Ali Shariq Imran|AUTHOR Ali Shariq Imran]]^^1^^, [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]]^^2^^, [[Torbjørn Svendsen|AUTHOR Torbjørn Svendsen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTNU, Norway; ^^2^^Università di Enna “Kore”, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3775–3779&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The challenge of articulatory inversion is to determine the temporal movement of the articulators from the speech waveform, or from acoustic-phonetic knowledge, e.g. derived from information about the linguistic content of the utterance. The actual position of the articulators is typically obtained from measured data, in our case position measurements obtained using EMA (Electromagnetic articulography). In this paper, we investigate the impact on articulatory inversion problem by using features derived from the acoustic waveform relative to using linguistic features related to the time aligned phone sequence of the utterance. Filterbank energies (FBE) are used as acoustic features, while phoneme identities and (binary) phonetic attributes are used as linguistic features. Experiments are performed on a speech corpus with synchronously recorded EMA measurements and employing a bidirectional long short-term memory (BLSTM) that estimates the articulators’ position. Acoustic FBE features performed better for vowel sounds. Phonetic features attained better results for nasal and fricative sounds except for /h/. Further improvements were obtained by combining FBE and linguistic features, which led to an average relative RMSE reduction of 9.8%, and a 3% relative improvement of the Pearson correlation coefficient.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[George Saon|AUTHOR George Saon]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3780–3784&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The paper presents our endeavor to improve state-of-the-art speech recognition results using attention based neural network approaches. Our test focus was LibriSpeech, a well-known, publicly available, large, speech corpus, but the methodologies are clearly applicable to other tasks. After systematic application of standard techniques — sophisticated data augmentation, various dropout schemes, scheduled sampling, warm-restart —, and optimizing search configurations, our model achieves 4.0% and 11.7% word error rate (WER) on the test-clean and test-other sets,  without any external language model. A powerful recurrent language model drops the error rate further to 2.7% and 8.2%. Thus, we not only report the lowest sequence-to-sequence model based numbers on this task to date, but our single system even challenges the best result known in the literature, namely a hybrid model together with recurrent language model rescoring. A simple ROVER combination of several of our attention based systems achieved 2.5% and 7.3% WER on the clean and other test sets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Awni Hannun|AUTHOR Awni Hannun]], [[Ann Lee|AUTHOR Ann Lee]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Ronan Collobert|AUTHOR Ronan Collobert]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3785–3789&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a fully convolutional sequence-to-sequence encoder architecture with a simple and efficient decoder. Our model improves WER on LibriSpeech while being an order of magnitude more efficient than a strong RNN baseline. Key to our approach is a time-depth separable convolution block which dramatically reduces the number of parameters in the model while keeping the receptive field large. We also give a stable and efficient beam search inference procedure which allows us to effectively integrate a language model. Coupled with a convolutional language model, our time-depth separable convolution architecture improves by more than 22% relative WER over the best previously reported sequence-to-sequence results on the noisy LibriSpeech test set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Murali Karthick Baskar|AUTHOR Murali Karthick Baskar]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Ramon Astudillo|AUTHOR Ramon Astudillo]]^^3^^, [[Takaaki Hori|AUTHOR Takaaki Hori]]^^4^^, [[Lukáš Burget|AUTHOR Lukáš Burget]]^^1^^, [[Jan Černocký|AUTHOR Jan Černocký]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Brno University of Technology, Czech Republic; ^^2^^Johns Hopkins University, USA; ^^3^^IBM, USA; ^^4^^MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3790–3794&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequence-to-sequence automatic speech recognition (ASR) models require large quantities of data to attain high performance. For this reason, there has been a recent surge in interest for unsupervised and semi-supervised training in such models. This work builds upon recent results showing notable improvements in semi-supervised training using cycle-consistency and related techniques. Such techniques derive training procedures and losses able to leverage unpaired speech and/or text data by combining ASR with Text-to-Speech (TTS) models. In particular, this work proposes a new semi-supervised loss combining an end-to-end differentiable ASR→TTS loss with TTS→ASR loss. The method is able to leverage both unpaired speech and text data to outperform recently proposed related techniques in terms of %WER. We provide extensive results analyzing the impact of data quantity and speech and text modalities and show consistent gains across WSJ and Librispeech corpora. Our code is provided in ESPnet to reproduce the experiments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ye Bai|AUTHOR Ye Bai]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3795–3799&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Integrating an external language model into a sequence-to-sequence speech recognition system is non-trivial. Previous works utilize linear interpolation or a fusion network to integrate external language models. However, these approaches introduce external components, and increase decoding computation. In this paper, we instead propose a knowledge distillation based training approach to integrating external language models into a sequence-to-sequence model. A recurrent neural network language model, which is trained on large scale external text, generates soft labels to guide the sequence-to-sequence model training. Thus, the language model plays the role of the teacher. This approach does not add any external component to the sequence-to-sequence model during testing. And this approach is flexible to be combined with shallow fusion technique together for decoding. The experiments are conducted on public Chinese datasets AISHELL-1 and CLMAD. Our approach achieves a character error rate of 9.3%, which is relatively reduced by 18.42% compared with the vanilla sequence-to-sequence model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kazuki Irie|AUTHOR Kazuki Irie]]^^1^^, [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]]^^2^^, [[Anjuli Kannan|AUTHOR Anjuli Kannan]]^^2^^, [[Antoine Bruguier|AUTHOR Antoine Bruguier]]^^2^^, [[David Rybach|AUTHOR David Rybach]]^^2^^, [[Patrick Nguyen|AUTHOR Patrick Nguyen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^RWTH Aachen University, Germany; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3800–3804&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In conventional speech recognition, phoneme-based models outperform grapheme-based models for non-phonetic languages such as English. The performance gap between the two typically reduces as the amount of training data is increased. In this work, we examine the impact of the choice of modeling unit for attention-based encoder-decoder models. We conduct experiments on the LibriSpeech 100hr, 460hr, and 960hr tasks, using various target units (phoneme, grapheme, and word-piece); across all tasks, we find that grapheme or word-piece models consistently outperform phoneme-based models, even though they are evaluated without a lexicon or an external language model. We also investigate model complementarity: we find that we can improve WERs by up to 9% relative by rescoring N-best lists generated from a strong word-piece based baseline with either the phoneme or the grapheme model. Rescoring an N-best list generated by the phonemic system, however, provides limited improvements. Further analysis shows that the word-piece-based models produce more diverse N-best hypotheses, and thus lower oracle WERs, than phonemic models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felix Weninger|AUTHOR Felix Weninger]]^^1^^, [[Jesús Andrés-Ferrer|AUTHOR Jesús Andrés-Ferrer]]^^2^^, [[Xinwei Li|AUTHOR Xinwei Li]]^^1^^, [[Puming Zhan|AUTHOR Puming Zhan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nuance Communications, USA; ^^2^^Nuance Communications, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3805–3809&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequence-to-sequence (seq2seq) based ASR systems have shown state-of-the-art performances while having clear advantages in terms of simplicity. However, comparisons are mostly done on speaker independent (SI) ASR systems, though speaker adapted conventional systems are commonly used in practice for improving robustness to speaker and environment variations. In this paper, we apply speaker adaptation to seq2seq models with the goal of matching the performance of conventional ASR adaptation. Specifically, we investigate Kullback-Leibler divergence (KLD) as well as Linear Hidden Network (LHN) based adaptation for seq2seq ASR, using different amounts (up to 20 hours) of adaptation data per speaker. Our SI models are trained on large amounts of dictation data and achieve state-of-the-art results. We obtained 25% relative word error rate (WER) improvement with KLD adaptation of the seq2seq model vs. 18.7% gain from acoustic model adaptation in the conventional system. We also show that the WER of the seq2seq model decreases log-linearly with the amount of adaptation data. Finally, we analyze adaptation based on the minimum WER criterion and adapting the language model (LM) for score fusion with the speaker adapted seq2seq model, which result in further improvements of the seq2seq system performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna V. Rúnarsdóttir|AUTHOR Anna V. Rúnarsdóttir]], [[Inga R. Helgadóttir|AUTHOR Inga R. Helgadóttir]], [[Jón Guðnason|AUTHOR Jón Guðnason]]
</p><p class="cpabstractcardaffiliationlist">Reykjavik University, Iceland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3810–3814&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) systems are increasingly used to transcribe text for publication or official uses. However, even the best ASR systems make mistakes that can change the meaning of the recognition results. The results from these systems are therefore often reviewed by human editors, who fix the errors that arise. Offering automatic updates of utterances, with lattice re-scoring, could decrease the manual labor needed to fix errors from these systems. The research presented in this paper is conducted within an ASR-based transcription system with human post-editing for the Icelandic parliament,  Althingi, and aims to automatically correct down-stream errors once the first error of a sentence has been manually corrected. After manually correcting the first error of the utterances, a new path is computed through the correction, using the lattice created during the ASR decoding process. With re-scoring, the sentence error rate (SER) for utterances containing two errors (and hence with SER=100%) drops to 82.77% and for utterances containing three errors drops to 95.88%. This paper demonstrates that the trade-off between automatically fixed errors and new errors introduced in the re-scoring heavily favours adding this process to the transcription system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daisuke Fukunaga|AUTHOR Daisuke Fukunaga]], [[Yoshiki Tanaka|AUTHOR Yoshiki Tanaka]], [[Yuichi Kageyama|AUTHOR Yuichi Kageyama]]
</p><p class="cpabstractcardaffiliationlist">Sony, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3815–3819&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Weighted finite-state transducer (WFST) decoding in speech recognition can be accelerated by using graphics processing units (GPUs). To obtain a high recognition accuracy in a WFST-based speech recognition system, a very large language model (LM) represented as a WFST with more than 10 GB of data is required. Since a GPU typically has only several GB of memory, it is impossible to store such a large LM in GPU memory. In this paper, we propose a new method for WFST decoding on a GPU. The method utilizes the  on-the-fly rescoring algorithm, which performs the Viterbi search on a WFST with a small LM and rescores hypotheses using a large LM during decoding. We solve the problem of insufficient GPU memory by storing most of the large LM in a memory on the host and copying the data from the host memory to the GPU memory as needed during runtime. Our evaluation of the proposed method on the LibriSpeech test sets using an NVIDIA Tesla V100 GPU shows that it achieves a ten times faster decoding than an equivalent CPU implementation without recognition accuracy degradation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Javier Jorge|AUTHOR Javier Jorge]], [[Adrià Giménez|AUTHOR Adrià Giménez]], [[Javier Iranzo-Sánchez|AUTHOR Javier Iranzo-Sánchez]], [[Jorge Civera|AUTHOR Jorge Civera]], [[Albert Sanchis|AUTHOR Albert Sanchis]], [[Alfons Juan|AUTHOR Alfons Juan]]
</p><p class="cpabstractcardaffiliationlist">Universidad Politécnica de Valencia, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3820–3824&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recurrent Neural Networks, in particular Long-Short Term Memory (LSTM) networks, are widely used in Automatic Speech Recognition for language modelling during decoding, usually as a mechanism for rescoring hypothesis. This paper proposes a new architecture to perform real-time one-pass decoding using LSTM language models. To make decoding efficient, the estimation of look-ahead scores was accelerated by precomputing static look-ahead tables. These static tables were precomputed from a pruned n-gram model, reducing drastically the computational cost during decoding. Additionally, the LSTM language model evaluation was efficiently performed using Variance Regularization along with a strategy of lazy evaluation. The proposed one-pass decoder architecture was evaluated on the well-known LibriSpeech and TED-LIUMv3 datasets. Results showed that the proposed algorithm obtains very competitive WERs with ~0.6 RTFs. Finally, our one-pass decoder is compared with a decoupled two-pass decoder.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroshi Seki|AUTHOR Hiroshi Seki]]^^1^^, [[Takaaki Hori|AUTHOR Takaaki Hori]]^^2^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^3^^, [[Niko Moritz|AUTHOR Niko Moritz]]^^2^^, [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Toyohashi University of Technology, Japan; ^^2^^MERL, USA; ^^3^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3825–3829&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates efficient beam search techniques for end-to-end automatic speech recognition (ASR) with attention-based encoder-decoder architecture. We accelerate the decoding process by vectorizing multiple hypotheses during the beam search, where we replace the score accumulation steps for each hypothesis with vector-matrix operations for the vectorized hypotheses. This modification allows us to take advantage of the parallel computing capabilities of multi-core CPUs and GPUs, resulting in significant speedups and also enabling us to process multiple utterances in a batch simultaneously. Moreover, we extend the decoding method to incorporate a recurrent neural network language model (RNNLM) and connectionist temporal classification (CTC) scores, which typically improve ASR accuracy but have not been investigated for the use of such parallelized decoding algorithms. Experiments with LibriSpeech and Corpus of Spontaneous Japanese datasets have demonstrated that the vectorized beam search achieves 1.8× speedup on a CPU and 33× speedup on a GPU compared with the original CPU implementation. When using joint CTC/attention decoding with an RNNLM, we also achieved 11× speedup on a GPU while maintaining the benefits of CTC and RNNLM. With these benefits, we achieved almost real-time processing with a small latency of 0.1× real-time without streaming search process.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jack Serrino|AUTHOR Jack Serrino]]^^1^^, [[Leonid Velikovich|AUTHOR Leonid Velikovich]]^^2^^, [[Petar Aleksic|AUTHOR Petar Aleksic]]^^2^^, [[Cyril Allauzen|AUTHOR Cyril Allauzen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3830–3834&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As voice-driven intelligent assistants become commonplace, adaptation to user context becomes critical for Automatic Speech Recognition (ASR) systems. For example, ASR systems may be expected to recognize a user’s contact names containing improbable or out-of-vocabulary (OOV) words.

We introduce a method to identify contextual cues in a first-pass ASR system’s output and to recover out-of-lattice hypotheses that are contextually relevant. Our proposed module is agnostic to the architecture of the underlying recognizer, provided it generates a word lattice of hypotheses; it is sufficiently compact for use on device. The module identifies subgraphs in the lattice likely to contain named entities (NEs), recovers phoneme hypotheses over corresponding time spans, and inserts NEs that are phonetically close to those hypotheses. We measure a decrease in the mean word error rate (WER) of word lattices from 11.5% to 4.9% on a test set of NEs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sashi Novitasari|AUTHOR Sashi Novitasari]], [[Andros Tjandra|AUTHOR Andros Tjandra]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]
</p><p class="cpabstractcardaffiliationlist">NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3835–3839&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based sequence-to-sequence automatic speech recognition (ASR) requires a significant delay to recognize long utterances because the output is generated after receiving entire input sequences. Although several studies recently proposed sequence mechanisms for incremental speech recognition (ISR), using different frameworks and learning algorithms is more complicated than the standard ASR model. One main reason is because the model needs to decide the incremental steps and learn the transcription that aligns with the current short speech segment. In this work, we investigate whether it is possible to employ the original architecture of attention-based ASR for ISR tasks by treating a full-utterance ASR as the teacher model and the ISR as the student model. We design an alternative student network that, instead of using a thinner or a shallower model, keeps the original architecture of the teacher model but with shorter sequences (few encoder and decoder states). Using attention transfer, the student network learns to mimic the same alignment between the current input short speech segments and the transcription. Our experiments show that by delaying the starting time of recognition process with about 1.7 sec, we can achieve comparable performance to one that needs to wait until the end.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Lian|AUTHOR Zheng Lian]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jian Huang|AUTHOR Jian Huang]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3840–3844&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prior works on speech emotion recognition utilize various unsupervised learning approaches to deal with low-resource samples. However, these methods pay less attention to modeling the long-term dynamic dependency, which is important for speech emotion recognition. To deal with this problem, this paper combines the unsupervised representation learning strategy — Future Observation Prediction (FOP), with transfer learning approaches (such as Fine-tuning and Hypercolumns). To verify the effectiveness of the proposed method, we conduct experiments on the IEMOCAP database. Experimental results demonstrate that our method is superior to currently advanced unsupervised learning strategies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huy Phan|AUTHOR Huy Phan]]^^1^^, [[Oliver Y. Chén|AUTHOR Oliver Y. Chén]]^^2^^, [[Lam Pham|AUTHOR Lam Pham]]^^1^^, [[Philipp Koch|AUTHOR Philipp Koch]]^^3^^, [[Maarten De Vos|AUTHOR Maarten De Vos]]^^2^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^1^^, [[Alfred Mertins|AUTHOR Alfred Mertins]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Kent, UK; ^^2^^University of Oxford, UK; ^^3^^Universität zu Lübeck, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3845–3849&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic scenes are rich and redundant in their content. In this work, we present a spatio-temporal attention pooling layer coupled with a convolutional recurrent neural network to learn from patterns that are discriminative while suppressing those that are irrelevant for acoustic scene classification. The convolutional layers in this network learn invariant features from time-frequency input. The bidirectional recurrent layers are then able to encode the temporal dynamics of the resulting convolutional features. Afterwards, a two-dimensional attention mask is formed via the outer product of the spatial and temporal attention vectors learned from two designated attention layers to weigh and pool the recurrent output into a final feature vector for classification. The network is trained with  between-class examples generated from between-class data augmentation. Experiments demonstrate that the proposed method not only outperforms a strong convolutional neural network baseline but also sets new state-of-the-art performance on the LITIS Rouen dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiuying Shi|AUTHOR Qiuying Shi]], [[Hui Luo|AUTHOR Hui Luo]], [[Jiqing Han|AUTHOR Jiqing Han]]
</p><p class="cpabstractcardaffiliationlist">Harbin Institute of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3850–3854&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Currently, most popular methods of Audio Event Recognition (AER) firstly split audio event signals into multiple short segments, then the features of these segments are pooled for recognition. However, the temporal features between segments, which highly affect the semantic representation of signals, are usually discarded in the above pooling step. Thus, how to introduce the temporal features to the pooling step requires further investigation. Unfortunately, on the one hand, only a few studies have been conducted towards solving this problem so far. On the other hand, the effective temporal features should not only capture the temporal dynamics but also have the signal reconstruction ability, while most of the above studies mainly focus on the former but ignore the latter. In addition, the effective features of high-dimensional original signals usually inhabit a low-dimensional subspace. Therefore, we propose two novel pooling based methods which try to consider both the temporal dynamics and signal reconstruction ability of temporal features in the low-dimensional subspace. The proposed methods are evaluated on the AudioEvent database, and experimental results show that our methods can outperform most of the typical methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingyang Zhang|AUTHOR Jingyang Zhang]]^^1^^, [[Wenhao Ding|AUTHOR Wenhao Ding]]^^1^^, [[Jintao Kang|AUTHOR Jintao Kang]]^^2^^, [[Liang He|AUTHOR Liang He]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Ministry of Public Security, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3855–3859&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most attention-based methods only concentrate along the time axis, which is insufficient for Acoustic Event Detection (AED). Meanwhile, previous methods for AED rarely considered that target events possess distinct temporal and frequential scales. In this work, we propose a  Multi-Scale Time-Frequency Attention (MTFA) module for AED. MTFA gathers information at multiple resolutions to generate a time-frequency attention mask which tells the model where to focus along both time and frequency axis. With MTFA, the model could capture the characteristics of target events with different scales. We demonstrate the proposed method on Task 2 of Detection and Classification of Acoustic Scenes and Events (DCASE) 2017 Challenge. Our method achieves competitive results on both development dataset and evaluation dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongwei Song|AUTHOR Hongwei Song]]^^1^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^1^^, [[Shiwen Deng|AUTHOR Shiwen Deng]]^^2^^, [[Zhihao Du|AUTHOR Zhihao Du]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Harbin Institute of Technology, China; ^^2^^Harbin Normal University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3860–3864&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a new strategy for acoustic scene classification (ASC) , namely recognizing acoustic scenes through identifying distinct sound events. This differs from existing strategies, which focus on characterizing global acoustical distributions of audio or the temporal evolution of short-term audio features, without analysis down to the level of sound events. To identify distinct sound events for each scene, we formulate ASC in a multi-instance learning (MIL) framework, where each audio recording is mapped into a bag-of-instances representation. Here, instances can be seen as high-level representations for sound events inside a scene. We also propose a MIL neural networks model, which implicitly identifies distinct instances (i.e., sound events). Furthermore, we propose two specially designed modules that model the multi-temporal scale and multi-modal natures of the sound events respectively. The experiments were conducted on the official development set of the DCASE2018 Task1 Subtask B, and our best-performing model improves over the official baseline by 9.4% (68.3% vs 58.9%) in terms of classification accuracy. This study indicates that recognizing acoustic scenes by identifying distinct sound events is effective and paves the way for future studies that combine this strategy with previous ones.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoke Qi|AUTHOR Xiaoke Qi]]^^1^^, [[Lu Wang|AUTHOR Lu Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUPL, China; ^^2^^Shenzhen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3865–3869&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Individualized head-related transfer functions (HRTFs) play an important role in accurate localization perception. However, it is a great challenge to efficiently measure continuous HRTFs for each person in full space. In this paper, we propose a parameter-transfer learning method termed PTL to obtain individualized HRTFs based on a small set of HRTF measurements. The key idea behind PTL is to transfer a HRTF generation model from other database to a target individual. To this end, PTL first pretrains a deep neural network (DNN)-based universal model on a large database of HRTFs with the assist of domain knowledge. Domain knowledge is used to generate the input features derived from the solution to sound wave propagation equation at the physical level, and to design the loss function based on the knowledge of objective evaluation criterion. Then, the universal model is transferred to a target individual by adapting the parameters of a hidden layer of DNN with a small set of HRTF measurements. The adaptation layer is determined by experimental verification. We also conduct the objective and subjective experiments, and the results show that the proposed method outperforms the state-of-the-arts methods in terms of LSD and localization accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lei Liu|AUTHOR Lei Liu]], [[Meng Jian|AUTHOR Meng Jian]], [[Wentao Gu|AUTHOR Wentao Gu]]
</p><p class="cpabstractcardaffiliationlist">Nanjing Normal University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3870–3874&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work investigated the prosodic characteristics of declarative and interrogative utterances produced by speakers with Parkinson’s disease (PD), in comparison to healthy controls (HC). Forty native speakers of Mandarin, including 20 PDs and 20 age-matched HCs, recorded 32 utterances varying in sentence type, sentence length, and sentence-final tone. SS-ANOVA was used to show the F0 contours and the global and final-syllable F0 level, F0 slope, speech rate, and intensity ratio were statistically analyzed using linear mixed-effects models. For the HC group, interrogative utterances showed a significantly higher mean F0 than declarative utterances. The PD group exhibited no significant F0 difference between declarative and interrogative utterances, coinciding with our subjective impression on PD’s monotonous voice of tone. This suggests that PD’s ability to control fundamental frequency degraded in comparison to HC. Also, the PD group produced significantly faster speech, especially final syllable, than the HC group, suggesting that PD’s articulatory control degraded at the end of an utterance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Laureano Moro-Velazquez|AUTHOR Laureano Moro-Velazquez]]^^1^^, [[JaeJin Cho|AUTHOR JaeJin Cho]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^1^^, [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]]^^2^^, [[Odette Scharenborg|AUTHOR Odette Scharenborg]]^^3^^, [[Heejin Kim|AUTHOR Heejin Kim]]^^2^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^University of Illinois at Urbana-Champaign, USA; ^^3^^Technische Universiteit Delft, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3875–3879&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Parkinson’s Disease (PD) affects motor capabilities of patients, who in some cases need to use human-computer assistive technologies to regain independence. The objective of this work is to study in detail the differences in error patterns from state-of-the-art Automatic Speech Recognition (ASR) systems on speech from people with and without PD. Two different speech recognizers (attention-based end-to-end and Deep Neural Network - Hidden Markov Models hybrid systems) were trained on a Spanish language corpus and subsequently tested on speech from 43 speakers with PD and 46 without PD. The differences related to error rates, substitutions, insertions and deletions of characters and phonetic units between the two groups were analyzed, showing that the word error rate is 27% higher in speakers with PD than in control speakers, with a moderated correlation between that rate and the developmental stage of the disease. The errors were related to all manner classes, and were more pronounced in the vowel /u/. This study is the first to evaluate ASR systems’ responses to speech from patients at different stages of PD in Spanish. The analyses showed general trends but individual speech deficits must be studied in the future when designing new ASR systems for this population.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tianqi Wang|AUTHOR Tianqi Wang]]^^1^^, [[Chongyuan Lian|AUTHOR Chongyuan Lian]]^^1^^, [[Jingshen Pan|AUTHOR Jingshen Pan]]^^1^^, [[Quanlei Yan|AUTHOR Quanlei Yan]]^^1^^, [[Feiqi Zhu|AUTHOR Feiqi Zhu]]^^2^^, [[Manwa L. Ng|AUTHOR Manwa L. Ng]]^^3^^, [[Lan Wang|AUTHOR Lan Wang]]^^1^^, [[Nan Yan|AUTHOR Nan Yan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Shenzhen Luohu People’s Hospital, China; ^^3^^HKU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3880–3884&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Language impairment is a sensitive biomarker for the detection of cognitive decline associated with mild cognitive impairment (MCI). Recently, knowledge about distinctive linguistic features identifying language deficits in MCI has progressively been enriched and accumulated. However, the employment of a single speech task to elicit connected speech (e.g., structured vs. spontaneous conversations) might limit the generalization of salient linguistic features associated with MCI. Not to mention the scarcity of reports on analysis of extended speech of Chinese. The present study aimed to examine if connected speech production in both situational picture description and spontaneous self-introduction tasks could be used to distinguish individuals with psychometric evidence of MCI and those who were cognitively intact. Speech samples produced by 75 elderly native speakers of Mandarin Chinese, including 19 with MCI and 56 healthy controls were obtained. Macrostructural aspects of language, including lexico-semantic, syntactic, speech fluency, and acoustics were analyzed by applying the linear mixed-effect regression model. Our study revealed decreasing linear trends in semantic contents and syntactic complexity, as well as significantly greater signs of disfluency and reduced speech production in participants with MCI. The findings extended what was reported in the literature, and carry important implications to the screening and diagnosis of suspected MCI.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiarui Wang|AUTHOR Jiarui Wang]], [[Ying Qin|AUTHOR Ying Qin]], [[Zhiyuan Peng|AUTHOR Zhiyuan Peng]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3885–3889&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustics-based automatic assessment is a highly desirable approach to detecting speech sound disorder (SSD) in children. The performance of an automatic speech assessment system depends greatly on the availability of a good amount of properly annotated disordered speech, which is a critical problem particularly for child speech. This paper presents a novel design of child speech disorder detection system that requires only normal speech for model training. The system is based on a Siamese recurrent network, which is trained to learn the similarity and discrepancy of pronunciations between a pair of phones in the embedding space. For detection of speech sound disorder, the trained network measures a distance that contrasts the test phone to the desired phone and the distance is used to train a binary classifier. Speech attribute features are incorporated to measure the pronunciation quality and provide diagnostic feedback. Experimental results show that Siamese recurrent network with a combination of speech attribute features and phone posterior features could attain an optimal detection accuracy of 0.941.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel Korzekwa|AUTHOR Daniel Korzekwa]]^^1^^, [[Roberto Barra-Chicote|AUTHOR Roberto Barra-Chicote]]^^2^^, [[Bozena Kostek|AUTHOR Bozena Kostek]]^^3^^, [[Thomas Drugman|AUTHOR Thomas Drugman]]^^4^^, [[Mateusz Lajszczak|AUTHOR Mateusz Lajszczak]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, Poland; ^^2^^Amazon, UK; ^^3^^Gdańsk University of Technology, Poland; ^^4^^Amazon, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3890–3894&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel deep learning model for the detection and reconstruction of dysarthric speech. We train the model with a multi-task learning technique to jointly solve dysarthria detection and speech reconstruction tasks. The model key feature is a low-dimensional latent space that is meant to encode the properties of dysarthric speech. It is commonly believed that neural networks are black boxes that solve problems but do not provide interpretable outputs. On the contrary, we show that this latent space successfully encodes interpretable characteristics of dysarthria, is effective at detecting dysarthria, and that manipulation of the latent space allows the model to reconstruct healthy speech from dysarthric speech. This work can help patients and speech pathologists to improve their understanding of the condition, lead to more accurate diagnoses and aid in reconstructing healthy speech for afflicted patients.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Camille Noufi|AUTHOR Camille Noufi]]^^1^^, [[Adam C. Lammert|AUTHOR Adam C. Lammert]]^^1^^, [[Daryush D. Mehta|AUTHOR Daryush D. Mehta]]^^1^^, [[James R. Williamson|AUTHOR James R. Williamson]]^^1^^, [[Gregory Ciccarelli|AUTHOR Gregory Ciccarelli]]^^1^^, [[Douglas Sturim|AUTHOR Douglas Sturim]]^^1^^, [[Jordan R. Green|AUTHOR Jordan R. Green]]^^2^^, [[Thomas F. Campbell|AUTHOR Thomas F. Campbell]]^^3^^, [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT Lincoln Laboratory, USA; ^^2^^MGH Institute of Health Professions, USA; ^^3^^University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3895–3899&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recommendations following pediatric traumatic brain injury (TBI) support the integration of instrumental measurement to aid perceptual assessment in recovery and treatment plans. A comprehensive set of sensitive, robust and non-invasive measurements is therefore essential in assessing variations in speech characteristics over time following pediatric TBI. In this paper, we discuss a method for measuring changes in the speech patterns of a pediatric cohort of ten subjects diagnosed with severe TBI. We apply a diverse set of both well-known and novel feature measurements to child speech recorded throughout the year following diagnosis. We analyze these features individually and by speech subsystem for each subject as well as for the entire cohort. In children older than 72 months, we find highly significant (p < 0.01) increases in pitch variation and number of unique phonemes spoken, shortened pause length, and steadying articulation rate variability. Younger children exhibit similar steadied rate variability alongside an increase in articulation complexity. Nearly all speech features significantly change (p < 0.05) for the cohort as a whole, confirming that acoustic measures expanding upon perceptual assessment are needed to identify efficacious treatment targets for speech therapy following TBI.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yusuke Fujita|AUTHOR Yusuke Fujita]]^^1^^, [[Naoyuki Kanda|AUTHOR Naoyuki Kanda]]^^1^^, [[Shota Horiguchi|AUTHOR Shota Horiguchi]]^^1^^, [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Hitachi, Japan; ^^2^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4300–4304&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a novel end-to-end neural-network-based speaker diarization method. Unlike most existing methods, our proposed method does not have separate modules for extraction and clustering of speaker representations. Instead, our model has a single neural network that directly outputs speaker diarization results. To realize such a model, we formulate the speaker diarization problem as a multi-label classification problem, and introduces a permutation-free objective function to directly minimize diarization errors without being suffered from the speaker-label permutation problem. Besides its end-to-end simplicity, the proposed method also benefits from being able to explicitly handle overlapping speech during training and inference. Because of the benefit, our model can be easily trained/adapted with real-recorded multi-speaker conversations just by feeding the corresponding multi-speaker segment labels. We evaluated the proposed method on simulated speech mixtures. The proposed method achieved diarization error rate of 12.28%, while a conventional clustering-based system produced diarization error rate of 28.77%. Furthermore, the domain adaptation with real-recorded speech provided 25.6% relative improvement on the CALLHOME dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingke Zhu|AUTHOR Yingke Zhu]]^^1^^, [[Tom Ko|AUTHOR Tom Ko]]^^2^^, [[Brian Mak|AUTHOR Brian Mak]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^HKUST, China; ^^2^^SUSTech, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4345–4349&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Mixup is a learning strategy that constructs additional virtual training samples from existing training samples by linearly interpolating random pairs of them. It has been shown that mixup can help avoid data memorization and thus improve model generalization. This paper investigates the mixup learning strategy in training speaker-discriminative deep neural network (DNN) for better text-independent speaker verification.

In recent speaker verification systems, a DNN is usually trained to classify speakers in the training set. The DNN, at the same time, learns a low-dimensional embedding of speakers so that speaker embeddings can be generated for any speakers during evaluation. We adapted the mixup strategy to the speaker-discriminative DNN training procedure, and studied different mixup schemes, such as performing mixup on MFCC features or raw audio samples. The mixup learning strategy was evaluated on NIST SRE 2010, 2016 and SITW evaluation sets. Experimental results show consistent performance improvements both in terms of EER and DCF of up to 13% relative. We further find that mixup training also improves the DNN’s speaker classification accuracy consistently without requiring any additional data sources.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luciana Ferrer|AUTHOR Luciana Ferrer]]^^1^^, [[Mitchell McLaren|AUTHOR Mitchell McLaren]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UBA, Argentina; ^^2^^SRI International, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4350–4354&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art speaker verification systems use deep neural networks (DNN) to extract highly discriminant representations of the samples, commonly called speaker embeddings. The networks are trained to maximize the cross-entropy between the estimated posteriors and the speaker labels. The pre-activations from one of the last layers in that network are used as embeddings. These sample-level vectors are then used as input to a backend that generates the final scores. The most successful backend for speaker verification to date is the probabilistic linear discriminant analysis (PLDA) backend. The full process consists of a linear discriminant analysis (LDA) projection of the embeddings, followed by mean and length normalization, ending with PLDA for score computation. While this procedure works very well compared to other approaches, it seems to be inherently suboptimal since the embeddings extractor is not directly trained to optimize the performance of the embeddings when using the PLDA backend for scoring. In this work, we propose one way to encourage the DNN to generate embeddings that are optimized for use in the PLDA backend, by adding a secondary objective designed to measure the performance of a such backend within the network. We show modest but consistent gains across several speaker recognition datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^1^^, [[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]]^^1^^, [[Koji Okabe|AUTHOR Koji Okabe]]^^1^^, [[Qiongqiong Wang|AUTHOR Qiongqiong Wang]]^^1^^, [[Ling Guo|AUTHOR Ling Guo]]^^1^^, [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]]^^1^^, [[Jiacen Zhang|AUTHOR Jiacen Zhang]]^^2^^, [[Koichi Shinoda|AUTHOR Koichi Shinoda]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NEC, Japan; ^^2^^Tokyo Tech, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4355–4359&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the NEC-TT speaker verification system for the 2018 NIST  speaker recognition evaluation (SRE’18). We present the details of data partitioning, x-vector speaker embedding, data augmentation, speaker diarization, and domain adaptation techniques used in NEC-TT SRE’18 speaker verification system. For the speaker embedding front-end, we found that the amount and diversity of training data are essential to improve the robustness of the x-vector extractor. This was achieved with data augmentation and mixed-bandwidth training in our submission. For the multi-speaker test scenario, we show that x-vector based speaker diarization is promising and holds potential for future research. For the scoring back-end, we used two variants of probabilistic linear discriminant analysis (PLDA), namely, the Gaussian PLDA and heavy-tailed PLDA. We show that correlation alignment (CORAL) and CORAL+ unsupervised PLDA adaptation are effective to deal with domain mismatch.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siqi Zheng|AUTHOR Siqi Zheng]], [[Gang Liu|AUTHOR Gang Liu]], [[Hongbin Suo|AUTHOR Hongbin Suo]], [[Yun Lei|AUTHOR Yun Lei]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4360–4364&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study aims to improve the performance of speaker verification system when no labeled out-of-domain data is available. An autoencoder-based semi-supervised curriculum learning scheme is proposed to automatically cluster unlabeled data and iteratively update the corpus during training. This new training scheme allows us to (1) progressively expand the size of training corpus by utilizing unlabeled data and correcting previous labels at run-time; and (2) improve robustness when generalizing to multiple conditions, such as out-of-domain and text-independent speaker verification tasks. It is also discovered that a denoising autoencoder can significantly enhance the clustering accuracy when it is trained on carefully-selected subset of speakers. Our experimental results show a relative reduction of 30%–50% in EER compared to the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Danwei Cai|AUTHOR Danwei Cai]], [[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4365–4369&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite the significant improvements in speaker recognition enabled by deep neural networks, unsatisfactory performance persists under far-field scenarios due to the effects of the long range fading, room reverberation, and environmental noises. In this study, we focus on far-field speaker recognition with a microphone array. We propose a multi-channel training framework for the deep speaker embedding neural network on noisy and reverberant data. The proposed multi-channel training framework simultaneously processes the time-, frequency- and channel-information to learn a robust deep speaker embedding. Based on the 2-dimensional or 3-dimensional convolution layer, we investigate different multi-channel training schemes. Experiments on the simulated multi-channel reverberant and noisy data show that the proposed method obtains significant improvements over the single-channel trained deep speaker embedding system with front end speech enhancement or multi-channel embedding fusion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Danwei Cai|AUTHOR Danwei Cai]], [[Weicheng Cai|AUTHOR Weicheng Cai]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4370–4374&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present the system submission for the NIST 2018 Speaker Recognition Evaluation by DKU Speech and Multi-Modal Intelligent Information Processing (SMIIP) Lab. We explore various kinds of state-of-the-art front-end extractors as well as back-end modeling for text-independent speaker verifications. Our submitted primary systems employ multiple state-of-the-art front-end extractors, including the MFCC i-vector, the DNN tandem i-vector, the TDNN x-vector, and the deep ResNet. After speaker embedding is extracted, we exploit several kinds of back-end modeling to perform variability compensation and domain adaptation for mismatch training and testing conditions. The final submitted system on the fixed condition obtains actual detection cost of 0.392 and 0.494 on CMN2 and VAST evaluation data respectively. After the official evaluation, we further extend our experiments by investigating multiple encoding layer designs and loss functions for the deep ResNet system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Miquel India|AUTHOR Miquel India]], [[Pooyan Safari|AUTHOR Pooyan Safari]], [[Javier Hernando|AUTHOR Javier Hernando]]
</p><p class="cpabstractcardaffiliationlist">Universitat Politècnica de Catalunya, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4305–4309&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most state-of-the-art Deep Learning (DL) approaches for speaker recognition work on a short utterance level. Given the speech signal, these algorithms extract a sequence of speaker embeddings from short segments and those are averaged to obtain an utterance level speaker representation. In this work we propose the use of an attention mechanism to obtain a discriminative speaker embedding given non fixed length speech utterances. Our system is based on a Convolutional Neural Network (CNN) that encodes short-term speaker features from the spectrogram and a self multi-head attention model that maps these representations into a long-term speaker embedding. The attention model that we propose produces multiple alignments from different subsegments of the CNN encoded states over the sequence. Hence this mechanism works as a pooling layer which decides the most discriminative features over the sequence to obtain an utterance level representation. We have tested this approach for the verification task for the VoxCeleb1 dataset. The results show that self multi-head attention outperforms both temporal and statistical pooling methods with a 18% of relative EER. Obtained results show a 58% relative improvement in EER compared to i-vector+PLDA.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ignacio Viñals|AUTHOR Ignacio Viñals]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Victoria Mingote|AUTHOR Victoria Mingote]], [[Jorge Llombart|AUTHOR Jorge Llombart]], [[Pablo Gimeno|AUTHOR Pablo Gimeno]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4310–4314&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Very often, speaker recognition systems do not take into account phonetic information explicitly. In order to gain insight along this line of research, we have studied the use of phonetic information in the embedding extraction process for automatic speaker verification systems in two different ways: on the one hand using the well-known i-vector paradigm and, on the other hand, using Wide Residual Networks (WRN) with Time Delay Neural Networks (TDNN) and Self-Attention Mechanisms. The phonetic information is provided by a WRN with TDNN using 1D convolutional layers specifically trained for this purpose. These two approaches along with the widely used x-vector system based on the Kaldi toolkit were submitted to the 2018 NIST speaker recognition evaluation. As back-end, these representations used a standard PLDA classifier with ad-hoc configurations for each system and in-domain adaptation. The results obtained in the NIST SRE 2018 show that our methods are very promising and it is worth continuing to work on them to improve their performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Youzhi Tu|AUTHOR Youzhi Tu]]^^1^^, [[Man-Wai Mak|AUTHOR Man-Wai Mak]]^^1^^, [[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^PolyU, China; ^^2^^National Chiao Tung University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4315–4319&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Domain mismatch refers to the problem in which the distribution of training data differs from that of the test data. This paper proposes a variational domain adversarial neural network (VDANN), which consists of a variational autoencoder (VAE) and a domain adversarial neural network (DANN), to reduce domain mismatch. The DANN part aims to retain speaker identity information and learn a feature space that is robust against domain mismatch, while the VAE part is to impose variational regularization on the learned features so that they follow a Gaussian distribution. Thus, the representation produced by VDANN is not only speaker discriminative and domain-invariant but also Gaussian distributed, which is essential for the standard PLDA backend. Experiments on both SRE16 and SRE18-CMN2 show that VDANN outperforms the Kaldi baseline and the standard DANN. The results also suggest that VAE regularization is effective for domain adaptation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tianchi Liu|AUTHOR Tianchi Liu]], [[Maulik Madhavi|AUTHOR Maulik Madhavi]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4320–4324&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker and utterance verification are two tasks that co-exist in text-dependent speaker verification (SV), where a phrase of the same lexical information is spoken during train and test sessions. The conventional approaches mostly verify the speaker and the utterance separately using two models. While there are studies on joint modeling of speaker and utterance, it is always desirable to have a common framework that performs both speaker and utterance verification to access the intended service. To this end, we propose a unified framework that deals with both objectives and the trade-off between the two. The unified framework is based on long short term memory network trained using both speaker and utterance information. We use Part I of RSR2015 database for the studies in this work. We show that the unified framework not only demonstrates competitive SV performance, but also provides a solution for a system to address different levels of security need.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]]^^1^^, [[Luciana Ferrer|AUTHOR Luciana Ferrer]]^^2^^, [[Mitchell McLaren|AUTHOR Mitchell McLaren]]^^1^^, [[Diego Castan|AUTHOR Diego Castan]]^^1^^, [[Aaron Lawson|AUTHOR Aaron Lawson]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SRI International, USA; ^^2^^UBA, Argentina</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4325–4329&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we analyze and assess the impact of critical metadata factors on the calibration performance of speaker recognition systems. In particular, we study the effect of duration, distance, language, and gender by using a variety of datasets and systematically varying the conditions in the evaluation and calibration sets. For all experiments, the system is based on i-vectors and a probabilistic linear discriminant analysis (PLDA) back-end and linear calibration. We measure system performance in terms of calibration loss. Our experiments reveal (i) a large degradation when the duration used for calibration is significantly different from that in the evaluation set; (ii) no significant degradation when a different gender is used for calibration than for evaluation; (iii) a large degradation when microphone distance is significantly different between the sets; and (iv) a small loss for closely related languages and languages with shared vocabulary. This analysis will be beneficial in the development of speaker recognition systems for use in unseen environments and for forensic speaker recognition analysts when selecting relevant population data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ondřej Novotný|AUTHOR Ondřej Novotný]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]], [[Lukáš Burget|AUTHOR Lukáš Burget]]
</p><p class="cpabstractcardaffiliationlist">Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4330–4334&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we continue in our research on i-vector extractor for speaker verification (SV) and we optimize its architecture for fast and effective discriminative training. We were motivated by computational and memory requirements caused by the large number of parameters of the original generative i-vector model. Our aim is to preserve the power of the original generative model, and at the same time focus the model towards extraction of speaker-related information. We show that it is possible to represent a standard generative i-vector extractor by a model with significantly less parameters and obtain similar performance on SV tasks. We can further refine this compact model by discriminative training and obtain i-vectors that lead to better performance on various SV benchmarks representing different acoustic domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniele Salvati|AUTHOR Daniele Salvati]], [[Carlo Drioli|AUTHOR Carlo Drioli]], [[Gian Luca Foresti|AUTHOR Gian Luca Foresti]]
</p><p class="cpabstractcardaffiliationlist">Università di Udine, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4335–4339&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolutional neural network (CNN) models are being investigated extensively in the field of speech and speaker recognition, and are rapidly gaining appreciation due to their performance robustness and effective training strategies. Recently, they are also providing interesting results in end-to-end configurations using directly raw waveforms for classification, with the drawback however of being more sensible on the amount of training data. We present a raw waveform (RW) end-to-end computational scheme for speaker identification based on CNNs with noise and reverberation data augmentation (DA). The CNN is designed for a frame-to-frame analysis to handle variable-length signals. We analyze the identification performance with simulated experiments in noisy and reverberation conditions comparing the proposed RW-CNN with the mel-frequency cepstral coefficients (MFCCs) features. The results show that the method offers robustness to adverse conditions. The RW-CNN outperforms the MFCC-CNN in noise conditions, and they have similar performance in reverberant environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abinay Reddy Naini|AUTHOR Abinay Reddy Naini]], [[Achuth Rao M.V.|AUTHOR Achuth Rao M.V.]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4340–4344&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we propose a novel feature mapping (FM) from whispered to neutral speech features using a cosine similarity based objective function for speaker verification (SV) using whispered speech. Typically the performance of an SV system enrolled with neutral speech degrades significantly when tested using whispered speech, due to the differences between spectral characteristics of neutral and whispered speech. We hypothesize that FM from whispered Mel frequency cepstral coefficients (MFCC) to neutral MFCC by maximizing cosine similarity between neutral and whisper i-vectors yields better performance than the baseline method, which typically performs a direct FM between MFCC features by minimizing mean squared error (MSE). We also explored an affine transform between MFCC features using the proposed objective function. Whisper SV experiments with 1882 speakers reveal that the equal error rate (EER) using the proposed method is lower than that using the best baseline by ~24% (relative). We show that the proposed FM system maintains the neutral SV performance, while improving the EER of whispered SV unlike baseline methods. We also show that the bias in the learned affine transform is corresponds to the glottal flow information, which is absent in the whispered speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Matthew Wiesner|AUTHOR Matthew Wiesner]], [[Adithya Renduchintala|AUTHOR Adithya Renduchintala]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Chunxi Liu|AUTHOR Chunxi Liu]], [[Najim Dehak|AUTHOR Najim Dehak]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4375–4379&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We explore training attention-based encoder-decoder ASR in low-resource settings. These models perform poorly when trained on small amounts of transcribed speech, in part because they depend on having sufficient target-side text to train the attention and decoder networks. In this paper we address this shortcoming by pretraining our network parameters using only text-based data and transcribed speech from other languages. We analyze the relative contributions of both sources of data. Across 3 test languages, our text-based approach resulted in a 20% average relative improvement over a text-based augmentation technique without pretraining. Using transcribed speech from nearby languages gives a further 20–30% relative reduction in character error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cheng Yi|AUTHOR Cheng Yi]], [[Feng Wang|AUTHOR Feng Wang]], [[Bo Xu|AUTHOR Bo Xu]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4420–4424&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Real-time streaming speech recognition is required by most applications for a nice interactive experience. To naturally support online recognition, a common strategy used in recently proposed end-to-end models is to introduce a blank label to the label set and instead output alignments. However, generating the alignment means decoding much longer than the length of the linguistic sequence. Besides, there exist several blank labels between two output units in the alignment, which hinders models from learning the adjacent dependency of units in the target sequence. In this work, we propose an innovative encoder-decoder structure, called  Ectc-Docd, for online speech recognition which directly predicts the linguistic sequence without blank labels. Apart from the encoder and decoder structures,  Ectc-Docd contains an additional shrinking layer to drop the redundant acoustic information. This layer serves as a bridge connecting acoustic representation and linguistic modelling parts. Through experiments, we confirm that  Ectc-Docd can obtain better performance than a strong CTC model in online ASR tasks. We also show that  Ectc-Docd can achieve promising results on both Mandarin and English ASR datasets with first and second pass decoding.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pavel Denisov|AUTHOR Pavel Denisov]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]
</p><p class="cpabstractcardaffiliationlist">Universität Stuttgart, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4425–4429&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents our latest investigation on end-to-end automatic speech recognition (ASR) for overlapped speech. We propose to train an end-to-end system conditioned on speaker embeddings and further improved by transfer learning from clean speech. This proposed framework does not require any parallel non-overlapped speech materials and is independent of the number of speakers. Our experimental results on overlapped speech datasets show that joint conditioning on speaker embeddings and transfer learning significantly improves the ASR performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suyoun Kim|AUTHOR Suyoun Kim]], [[Siddharth Dalmia|AUTHOR Siddharth Dalmia]], [[Florian Metze|AUTHOR Florian Metze]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4380–4384&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an end-to-end speech recognition model that learns interaction between two speakers based on the turn-changing information. Unlike conventional speech recognition models, our model exploits two speakers history of conversational-context information that spans across multiple turns within an end-to-end framework. Specifically, we propose a speaker-specific cross-attention mechanism that can look at the output of the other speaker side as well as the one of the current speaker for better at recognizing long conversations. We evaluated the models on the Switchboard conversational speech corpus and show that our model outperforms standard end-to-end speech recognition models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Chorowski|AUTHOR Jan Chorowski]], [[Adrian Łańcucki|AUTHOR Adrian Łańcucki]], [[Bartosz Kostka|AUTHOR Bartosz Kostka]], [[Michał Zapotoczny|AUTHOR Michał Zapotoczny]]
</p><p class="cpabstractcardaffiliationlist">University of Wrocław, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4385–4389&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural acoustic models benefit from context-dependent (CD) modeling of output symbols. We consider direct training of CTC networks with CD outputs, and identify two issues. The first one is frame-level normalization of probabilities in CTC, which induces strong language modeling behavior that leads to overfitting and interference with external language models. The second one is poor generalization in the presence of numerous lexical units like triphones or tri-chars. We mitigate the former with utterance-level normalization of probabilities. The latter typically requires reducing the CD symbol inventory with state-tying decision trees, which have to be transferred from classical GMM-HMM systems. We replace the trees with a CD symbol embedding network, which saves parameters and ensures generalization to unseen and undersampled CD symbols. The embedding network is trained together with the rest of the acoustic model and removes one of the last cases in which neural systems have to be bootstrapped from GMM-HMM ones.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruchao Fan|AUTHOR Ruchao Fan]]^^1^^, [[Pan Zhou|AUTHOR Pan Zhou]]^^2^^, [[Wei Chen|AUTHOR Wei Chen]]^^3^^, [[Jia Jia|AUTHOR Jia Jia]]^^2^^, [[Gang Liu|AUTHOR Gang Liu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^BUPT, China; ^^2^^Tsinghua University, China; ^^3^^Sogou, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4390–4394&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based end-to-end models such as Listen, Attend and Spell (LAS), simplify the whole pipeline of traditional automatic speech recognition (ASR) systems and become popular in the field of speech recognition. In previous work, researchers have shown that such architectures can acquire comparable results to state-of-the-art ASR systems, especially when using a bidirectional encoder and global soft attention (GSA) mechanism. However, bidirectional encoder and GSA are two obstacles for real-time speech recognition. In this work, we aim to stream LAS baseline by removing the above two obstacles. On the encoder side, we use a latency-controlled (LC) bidirectional structure to reduce the delay of forward computation. Meanwhile, an adaptive monotonic chunk-wise attention (AMoChA) mechanism is proposed to replace GSA for the calculation of attention weight distribution. Furthermore, we propose two methods to alleviate the huge performance degradation when combining LC and AMoChA. Finally, we successfully acquire an online LAS model, LC-AMoChA, which has only 3.5% relative performance reduction to LAS baseline on our internal Mandarin corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ye Bai|AUTHOR Ye Bai]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4395–4399&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recurrent neural network transducers (RNN-T) have been successfully applied in end-to-end speech recognition. However, the recurrent structure makes it difficult for parallelization. In this paper, we propose a self-attention transducer (SA-T) for speech recognition. RNNs are replaced with self-attention blocks, which are powerful to model long-term dependencies inside sequences and able to be efficiently parallelized. Furthermore, a path-aware regularization is proposed to assist SA-T to learn alignments and improve the performance. Additionally, a chunk-flow mechanism is utilized to achieve online decoding. All experiments are conducted on a Mandarin Chinese dataset AISHELL-1. The results demonstrate that our proposed approach achieves a 21.3% relative reduction in character error rate compared with the baseline RNN-T. In addition, the SA-T with chunk-flow mechanism can perform online decoding with only a little degradation of the performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sheng Li|AUTHOR Sheng Li]], [[Dabre Raj|AUTHOR Dabre Raj]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Peng Shen|AUTHOR Peng Shen]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]
</p><p class="cpabstractcardaffiliationlist">NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4400–4404&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The end-to-end (E2E) model allows for training of automatic speech recognition (ASR) systems without having to consider the acoustic model, lexicon, language model and complicated decoding algorithms, which are integral to conventional ASR systems. Recently, the transformer-based E2E ASR model (ASR-Transformer) showed promising results in many speech recognition tasks. The most common practice is to stack a number of feed-forward layers in the encoder and decoder. As a result, the addition of new layers improves speech recognition performance significantly. However, this also leads to a large increase in the number of parameters and severe decoding latency. In this paper, we propose to reduce the model complexity by simply reusing parameters across all stacked layers instead of introducing new parameters per layer. In order to address the slight reduction in recognition quality we propose to augment the speech inputs with bags-of-attributes. As a result we obtain a highly compressed, efficient and high quality ASR model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jeong-Uk Bang|AUTHOR Jeong-Uk Bang]]^^1^^, [[Mu-Yeol Choi|AUTHOR Mu-Yeol Choi]]^^2^^, [[Sang-Hun Kim|AUTHOR Sang-Hun Kim]]^^2^^, [[Oh-Wook Kwon|AUTHOR Oh-Wook Kwon]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chungbuk National University, Korea; ^^2^^ETRI, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4405–4409&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a method to extend a phone set by using a large amount of Korean broadcast data to improve the performance of spontaneous speech recognition. The proposed method first extracts variable-length phoneme-level segments from broadcast data, and then converts them into fixed-length latent vectors based on an LSTM architecture. Then, we used the k-means algorithm to cluster acoustically similar latent vectors and then build a new phone set by gathering the clustered vectors. To update the lexicon of a speech recognizer, we choose the pronunciation sequence of each word with the highest conditional probability. To verify the performance of the proposed unit, we visualize the spectral patterns and segment duration for the new phone set. In both spontaneous and read speech recognition tasks, the proposed unit is shown to produce better performance than the phoneme-based and grapheme-based units.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Jian Wang|AUTHOR Jian Wang]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Yusuke Shinohara|AUTHOR Yusuke Shinohara]], [[Yoshikazu Yamaguchi|AUTHOR Yoshikazu Yamaguchi]], [[Yushi Aono|AUTHOR Yushi Aono]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4410–4414&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel fully neural network (FNN) -based automatic speech recognition (ASR) system that addresses the out-of-vocabulary (OOV) problem. The most common approach to the OOV problem is leveraging character/sub-word level units as output symbols. Unfortunately, this approach is not suitable for Japanese and Mandarin Chinese since they have many more grapheme sets than English. Our solution is to develop FNN-based ASR that uses a pronunciation-based unit set with dictionaries, i.e., word-to-pronunciation rules. A previous study proposed, for Mandarin Chinese, a greedy cascading decoder (GCD) that uses two neural converters, acoustic-to-pronunciation (A2P) and pronunciation-to-word (P2W) conversion models. However, to generate optimal word sequences, the previous work considered just optimal pronunciation sequences. In this paper, we propose a joint maximization decoder (JMD) that considers the joint probability of pronunciation and word in beam-search decoding. Moreover, we introduce a neural network based joint source channel model for improving A2P conversion performance. Experiments on Japanese ASR tasks demonstrate that JMD achieves better performance than GCD. Furthermore, we show the effectiveness of using just language resources to retrain the P2W conversion model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Titouan Parcollet|AUTHOR Titouan Parcollet]]^^1^^, [[Mohamed Morchid|AUTHOR Mohamed Morchid]]^^1^^, [[Georges Linarès|AUTHOR Georges Linarès]]^^1^^, [[Renato De Mori|AUTHOR Renato De Mori]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIA (EA 4128), France; ^^2^^McGill University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4415–4419&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural networks (DNNs) and more precisely recurrent neural networks (RNNs) are at the core of modern automatic speech recognition systems, due to their efficiency to process input sequences. Recently, it has been shown that different input representations, based on multidimensional algebras, such as complex and quaternion numbers, are able to bring to neural networks a more natural, compressive and powerful representation of the input signal by outperforming common real-valued NNs. Indeed, quaternion-valued neural networks (QNNs) better learn both internal dependencies, such as the relation between the Mel-filter-bank value of a specific time frame and its time derivatives, and global dependencies, describing the relations that exist between time frames. Nonetheless, QNNs are limited to quaternion-valued input signals, and it is difficult to benefit from this powerful representation with real-valued input data. This paper proposes to tackle this weakness by introducing a real-to-quaternion encoder that allows QNNs to process any one dimensional input features, such as traditional Mel-filter-banks for automatic speech recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomoki Hayashi|AUTHOR Tomoki Hayashi]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^1^^, [[Kazuya Takeda|AUTHOR Kazuya Takeda]]^^1^^, [[Shubham Toshniwal|AUTHOR Shubham Toshniwal]]^^3^^, [[Karen Livescu|AUTHOR Karen Livescu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Nagoya University, Japan; ^^2^^Johns Hopkins University, USA; ^^3^^TTIC, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4430–4434&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose an end-to-end text-to-speech (TTS) synthesis model that explicitly uses information from pre-trained embeddings of the text. Recent work in natural language processing has developed self-supervised representations of text that have proven very effective as pre-training for language understanding tasks. We propose using one such pre-trained representation (BERT) to encode input phrases, as an additional input to a Tacotron2-based sequence-to-sequence TTS model. We hypothesize that the text embeddings contain information about the semantics of the phrase and the importance of each word, which should help TTS systems produce more natural prosody and pronunciation. We conduct subjective listening tests of our proposed models using the 24-hour LJSpeech corpus, finding that they improve mean opinion scores modestly but significantly over a baseline TTS model without pre-trained text embedding input.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Noé Tits|AUTHOR Noé Tits]]^^1^^, [[Fengna Wang|AUTHOR Fengna Wang]]^^2^^, [[Kevin El Haddad|AUTHOR Kevin El Haddad]]^^1^^, [[Vincent Pagel|AUTHOR Vincent Pagel]]^^2^^, [[Thierry Dutoit|AUTHOR Thierry Dutoit]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Université de Mons, Belgium; ^^2^^Acapela Group, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4475–4479&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The field of Text-to-Speech has experienced huge improvements last years benefiting from deep learning techniques. Producing realistic speech becomes possible now. As a consequence, the research on the control of the expressiveness, allowing to generate speech in different styles or manners, has attracted increasing attention lately. Systems able to control style have been developed and show impressive results. However the control parameters often consist of latent variables and remain complex to interpret.

In this paper, we analyze and compare different latent spaces and obtain an interpretation of their influence on expressive speech. This will enable the possibility to build controllable speech synthesis systems with an understandable behaviour.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bing Yang|AUTHOR Bing Yang]], [[Jiaqi Zhong|AUTHOR Jiaqi Zhong]], [[Shan Liu|AUTHOR Shan Liu]]
</p><p class="cpabstractcardaffiliationlist">Tencent, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4480–4484&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a novel method to improve the performance and robustness of the front-end text processing modules of Mandarin text-to-speech (TTS) synthesis. We use pre-trained text encoding models, such as the encoder of a transformer based NMT model and BERT, to extract the latent semantic representations of words or characters and use them as input features for tasks in the front-end of TTS systems. Our experiments on the tasks of Mandarin polyphone disambiguation and prosodic structure prediction show that the proposed method can significantly improve the performances. Specifically, we get an absolute improvement of 0.013 and 0.027 in F1 score for prosodic word prediction and prosodic phrase prediction respectively, and an absolute improvement of 2.44% in polyphone disambiguation compared to previous methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Huashan Pan|AUTHOR Huashan Pan]], [[Xiulin Li|AUTHOR Xiulin Li]], [[Zhiqiang Huang|AUTHOR Zhiqiang Huang]]
</p><p class="cpabstractcardaffiliationlist">Databaker Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4485–4488&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a mandarin prosodic boundary prediction model based on Multi-Task Learning (MTL) architecture. The prosody structure of mandarin is a three-level hierarchical structure, which contains three basic units — Prosodic Word (PW), Prosodic Phrase (PPH) and Intonational Phrase (IPH) [1]. Previous studies usually decompose mandarin prosodic boundary prediction task into three independent tasks on these three unit boundaries [1–4]. In recent years, with the development of deep learning, MTL has achieved state-of-the-art performance on many tasks in Natural Language Processing (NLP) field [5–7]. Inspired by this, this paper implements an MTL framework with Bidirectional Long-Short Term Memory and Conditional Random Field (BLSTM-CRF) as the basic model, and takes three independent tasks of mandarin prosodic boundary prediction as sub-modules for PW, PPH and IPH individually. Under the MTL architecture, the three independent tasks are unified for overall optimization. The experiment results show that our model is effective in solving the task of mandarin prosodic boundary prediction, in which the overall prediction performance is improved by 0.8%, and the model size is reduced by about 55%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ajda Gokcen|AUTHOR Ajda Gokcen]]^^1^^, [[Hao Zhang|AUTHOR Hao Zhang]]^^2^^, [[Richard Sproat|AUTHOR Richard Sproat]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Washington, USA; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4489–4493&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural text normalization systems can achieve low error rates; however, the errors they make include not only ones from which the hearer can recover (such as reading  3 as  three dollar) but also  unrecoverable errors, such as reading  3 as  three euros. FST decoding constraints have proven effective at reducing unrecoverable errors. In this paper we explore an alternative approach to error mitigation: using  dual encoder classifiers trained with both positive and negative examples to implement  soft constraints on acceptability. Since the error rates are very low, it is difficult to determine when improvement is significant, but qualitative analysis suggests that soft dual encoder constraints can help reduce the number of unrecoverable errors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jingbei Li|AUTHOR Jingbei Li]]^^1^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Runnan Li|AUTHOR Runnan Li]]^^1^^, [[Pengpeng Zhi|AUTHOR Pengpeng Zhi]]^^2^^, [[Song Yang|AUTHOR Song Yang]]^^2^^, [[Helen Meng|AUTHOR Helen Meng]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^TAL, China; ^^3^^CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4494–4498&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent researches have shown superior performance of applying end-to-end architecture in text-to-speech (TTS) synthesis. However, considering the complex linguistic structure of Chinese, using Chinese characters directly for Mandarin TTS may suffer from the poor linguistic encoding performance, resulting in improper word tokenization and pronunciation errors. To ensure the naturalness and intelligibility of synthetic speech, state-of-the-art Mandarin TTS systems employ a list of components, such as word tokenization, part-of-speech (POS) tagging and grapheme-to-phoneme (G2P) conversion, to produce knowledge-enhanced inputs to alleviate the problems caused by linguistic encoding. These components are based on linguistic expertise and well-designed, but trained individually, leading to errors compounding for the TTS system. In this paper, to reduce the complexity of Mandarin TTS system and bring further improvement, we proposed a knowledge-based linguistic encoder for the character-based end-to-end Mandarin TTS system. Developed with multi-task learning structure, the proposed encoder can learn from linguistic analysis subtasks, providing robust and discriminative linguistic encodings for the following speech generation decoder. Experimental results demonstrate the effectiveness of the proposed framework, with word tokenization error dropped from 12.81% to 1.58%, syllable pronunciation error dropped from 10.89% to 2.81% compared with state-of-the-art baseline approach, providing mean opinion score (MOS) improvement from 3.76 to 3.87.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Hsi-Wei Hsieh|AUTHOR Hsi-Wei Hsieh]], [[Nicolas Charon|AUTHOR Nicolas Charon]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4499–4503&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel approach for emotion conversion that bridges the domains of speech analysis and computer vision. Our strategy is to warp the pitch contour of a source emotional utterance using diffeomorphic curve registration. The associated dynamical process pushes the original source contour towards that of a target emotional utterance. Mathematically, this warping process is completely specified by a set of  initial momenta. Therefore, we use parallel data to train a highway neural network (HNet) to predict these initial momenta directly from the signal characteristics. The input features to the HNet include contextual pitch and spectral information. Once trained, the HNet is used to obtain the initial momenta for new utterances. From here, the diffeomorphic process takes over and warps the pitch contour accordingly. We validate our framework on the VESUS repository collected at Johns Hopkins University, which contains parallel emotional utterances from 10 actors. The proposed warping is more accurate that three state-of-the-art baselines for emotion conversion. We also evaluate the quality of our emotion manipulations via crowd sourcing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Éva Székely|AUTHOR Éva Székely]], [[Gustav Eje Henter|AUTHOR Gustav Eje Henter]], [[Jonas Beskow|AUTHOR Jonas Beskow]], [[Joakim Gustafson|AUTHOR Joakim Gustafson]]
</p><p class="cpabstractcardaffiliationlist">KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4435–4439&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Synthesising spontaneous speech is a difficult task due to disfluencies, high variability and syntactic conventions different from those of written language. Using found data, as opposed to lab-recorded conversations, for speech synthesis adds to these challenges because of overlapping speech and the lack of control over recording conditions. In this paper we address these challenges by using a speaker-dependent CNN-LSTM breath detector to separate continuous recordings into utterances, which we here apply to extract nine hours of clean single-speaker breath groups from a conversational podcast. The resulting corpus is transcribed automatically (both lexical items and filler tokens) and used to build several voices on a Tacotron 2 architecture. Listening tests show: i) pronunciation accuracy improved with phonetic input and transfer learning; ii) it is possible to create a more fluent conversational voice by training on data without filled pauses; and iii) the presence of filled pauses improved perceived speaker authenticity. Another listening test showed the found podcast voice to be more appropriate for prompts from both public speeches and casual conversations, compared to synthesis from found read speech and from a manually transcribed lab-recorded spontaneous conversation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Viacheslav Klimkov|AUTHOR Viacheslav Klimkov]]^^1^^, [[Srikanth Ronanki|AUTHOR Srikanth Ronanki]]^^2^^, [[Jonas Rohnke|AUTHOR Jonas Rohnke]]^^2^^, [[Thomas Drugman|AUTHOR Thomas Drugman]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, Germany; ^^2^^Amazon, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4440–4444&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a neural text-to-speech system for fine-grained prosody transfer from one speaker to another. Conventional approaches for end-to-end prosody transfer typically use either fixed-dimensional or variable-length prosody embedding via a secondary attention to encode the reference signal. However, when trained on a single-speaker dataset, the conventional prosody transfer systems are not robust enough to speaker variability, especially in the case of a reference signal coming from an unseen speaker. Therefore, we propose decoupling of the reference signal alignment from the overall system. For this purpose, we pre-compute phoneme-level time stamps and use them to aggregate prosodic features per phoneme, injecting them into a sequence-to-sequence text-to-speech system. We incorporate a variational auto-encoder to further enhance the latent representation of prosody embeddings. We show that our proposed approach is significantly more stable and achieves reliable prosody transplantation from an unseen speaker. We also propose a solution to the use case in which the transcription of the reference signal is absent. We evaluate all our proposed methods using both objective and subjective listening tests.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nusrah Hussain|AUTHOR Nusrah Hussain]], [[Engin Erzin|AUTHOR Engin Erzin]], [[T. Metin Sezgin|AUTHOR T. Metin Sezgin]], [[Yücel Yemez|AUTHOR Yücel Yemez]]
</p><p class="cpabstractcardaffiliationlist">Koç Üniversitesi, Turkey</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4445–4449&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a novel method for training a social robot to generate backchannels during human-robot interaction. We address the problem within an off-policy reinforcement learning framework, and show how a robot may learn to produce non-verbal backchannels like laughs, when trained to maximize the engagement and attention of the user. A major contribution of this work is the formulation of the problem as a Markov decision process (MDP) with states defined by the speech activity of the user and rewards generated by quantified engagement levels. The problem that we address falls into the class of applications where unlimited interaction with the environment is not possible (our environment being a human) because it may be time-consuming, costly, impracticable or even dangerous in case a bad policy is executed. Therefore, we introduce deep Q-network (DQN) in a batch reinforcement learning framework, where an optimal policy is learned from a batch data collected using a more controlled policy. We suggest the use of human-to-human dyadic interaction datasets as a batch of trajectories to train an agent for engaging interactions. Our experiments demonstrate the potential of our method to train a robot for engaging behaviors in an offline manner.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomoki Koriyama|AUTHOR Tomoki Koriyama]]^^1^^, [[Takao Kobayashi|AUTHOR Takao Kobayashi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tokyo, Japan; ^^2^^Tokyo Tech, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4450–4454&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a semi-supervised speech synthesis framework in which prosodic labels of training data are partially annotated. When we construct a text-to-speech (TTS) system, it is crucial to use appropriately annotated prosodic labels. For this purpose, manually annotated ones would provide a good result, but it generally costs much time and patience. Although recent studies report that end-to-end TTS framework can generate natural-sounding prosody without using prosodic labels, this does not always appear in arbitrary languages such as pitch accent ones. Alternatively, we propose an approach to utilizing a latent variable representation of prosodic information. In the latent variable representation, we employ deep Gaussian process (DGP), a deep Bayesian generative model. In the proposed semi-supervised learning framework, the posterior distributions of latent variables are inferred from linguistic and acoustic features, and the inferred latent variables are utilized to train a DGP-based regression model of acoustic features. Experimental results show that the proposed framework can give a comparable performance with the case using fully-annotated speech data in subjective evaluation even if the prosodic information of pitch accent is limited.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna Björk Nikulásdóttir|AUTHOR Anna Björk Nikulásdóttir]], [[Jón Guðnason|AUTHOR Jón Guðnason]]
</p><p class="cpabstractcardaffiliationlist">Reykjavik University, Iceland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4455–4459&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Text normalization is an important part of many natural language applications, in particular for text-to-speech systems. Text normalization poses special challenges for highly inflected languages since the correct morphological form for the normalization is not evident from the non-standard word, e.g. a digit.

In this paper we report on ongoing work on a text normalization system for Icelandic, a highly inflected North Germanic language. We describe experiments on the normalization of numbers and address the problem of choosing the correct morphological form of number names. We use language models trained on texts containing number names and inspect effects of different LMs on domain specific texts with a high ratio of digits. A partially class based LM, replacing number names with their part-of-speech tags, shows the best results in all domains. We further show that testing normalization on texts where number names have been converted to digits does not show representative results for texts originally containing digits: while a test set similar to the language model training data shows an error rate of 10.1% on inflected cardinals from 1–99, test sets originally containing digits show 45.3% and 55% error rates.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haohan Guo|AUTHOR Haohan Guo]]^^1^^, [[Frank K. Soong|AUTHOR Frank K. Soong]]^^2^^, [[Lei He|AUTHOR Lei He]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4460–4464&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The end-to-end TTS, which can predict speech directly from a given sequence of graphemes or phonemes, has shown improved performance over the conventional TTS. However, its predicting capability is still limited by the acoustic/phonetic coverage of the training data, usually constrained by the training set size. To further improve the TTS quality in pronunciation, prosody and perceived naturalness, we propose to exploit the information embedded in a syntactically parse tree where the inter-phrase/word information of a sentence is organized in a multilevel tree structure. Specifically, two key features: phrase structure and relations between adjacent words are investigated. Experimental results in subjective listening, measured on three test sets, show that the proposed approach is effective to improve the pronunciation clarity, prosody and naturalness of the synthesized speech of the baseline system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinfu Ni|AUTHOR Jinfu Ni]], [[Yoshinori Shiga|AUTHOR Yoshinori Shiga]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]
</p><p class="cpabstractcardaffiliationlist">NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4465–4469&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A duration model is a major component in every parametric speech synthesis system. Conventional methods use full contextual labels as features to predict phoneme durations that require morphological analysis of text. By contrast, advances in bidirectional recurrent neural networks (BRNN) and global space vector models make it possible to perform grapheme-to-phoneme (G2P) conversion from plain text. In this paper, we investigate duration prediction from plain phonemes instead of using their full contextual labels. We propose a new approach that relies on both BRNN and global space vector representations of phonemes (GPV) and durations (GDV). GPVs represent the statistics of phonemes used in a language, whereas GDVs capture duration variations beyond linguistic features. They are essentially learned from a large-scale text corpus in an unsupervised manner where phonemes are converted by G2P.

We conducted experiments on two speech corpora in Korean and Chinese to train BRNN-based models in a supervised manner. An objective evaluation conducted on a set of test sentences demonstrated that the proposed method leads to more accurate modeling of phoneme durations than the baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adèle Aubin|AUTHOR Adèle Aubin]]^^1^^, [[Alessandra Cervone|AUTHOR Alessandra Cervone]]^^2^^, [[Oliver Watts|AUTHOR Oliver Watts]]^^1^^, [[Simon King|AUTHOR Simon King]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Edinburgh, UK; ^^2^^Università di Trento, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4470–4474&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper explores whether adding Discourse Relation (DR) features improves the naturalness of neural statistical parametric speech synthesis (SPSS) in English. We hypothesize first — in the light of several previous studies — that DRs have a dedicated prosodic encoding. Secondly, we hypothesize that encoding DRs in a speech synthesizer’s input will improve the naturalness of its output. In order to test our hypotheses, we prepare a dataset of DR-annotated transcriptions of audiobooks in English. We then perform an acoustic analysis of the corpus which supports our first hypothesis that DRs are acoustically encoded in speech prosody. The analysis reveals significant correlation between specific DR categories and acoustic features, such as F0 and intensity. Then, we use the corpus to train a neural SPSS system in two configurations: a baseline configuration making use only of conventional linguistic features, and an experimental one where these are supplemented with DRs. Augmenting the inputs with DR features improves objective acoustic scores on a test set and leads to significant preference by listeners in a forced choice AB test for naturalness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kathryn P. Connaghan|AUTHOR Kathryn P. Connaghan]]^^1^^, [[Jordan R. Green|AUTHOR Jordan R. Green]]^^1^^, [[Sabrina Paganoni|AUTHOR Sabrina Paganoni]]^^2^^, [[James Chan|AUTHOR James Chan]]^^3^^, [[Harli Weber|AUTHOR Harli Weber]]^^3^^, [[Ella Collins|AUTHOR Ella Collins]]^^3^^, [[Brian Richburg|AUTHOR Brian Richburg]]^^1^^, [[Marziye Eshghi|AUTHOR Marziye Eshghi]]^^1^^, [[J.P. Onnela|AUTHOR J.P. Onnela]]^^2^^, [[James D. Berry|AUTHOR James D. Berry]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MGH Institute of Health Professions, USA; ^^2^^Harvard University, USA; ^^3^^Massachusetts General Hospital, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4504–4508&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The capacity for smartphones to remotely capture speech data affords significant clinical and research opportunities for degenerative neurologic diseases such as amyotrophic lateral sclerosis (ALS). Longitudinal data may inform ALS disease prognosis, facilitate timely intervention, and document response to treatment [1]. A recent study established the feasibility of the Beiwe smartphone-based digital phenotyping to track the clinical progression of ALS across multiple domains [2]. The current investigation extends this work to address the utility of Beiwe to identify and track speech decline in ALS. Twelve participants with ALS used the Beiwe app weekly to record reading passages and self-report (ALSFRS-R) ratings of bulbar (speech) function. Speaking rate and pause variables were automatically extracted from recordings offline [3]. Speech function measures at baseline were significantly different for participants with and without bulbar symptoms. In addition, the rate of decline of all measured speech functions was greater for participants with bulbar symptoms. The successful use of Beiwe for speech function analysis suggests that smartphone-based capture of speech has potential for diagnostic screening and disease progress monitoring in ALS. Further large sample investigation across a comprehensive set of speech variables is warranted.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luis Serrano|AUTHOR Luis Serrano]], [[Sneha Raman|AUTHOR Sneha Raman]], [[David Tavarez|AUTHOR David Tavarez]], [[Eva Navas|AUTHOR Eva Navas]], [[Inma Hernaez|AUTHOR Inma Hernaez]]
</p><p class="cpabstractcardaffiliationlist">Universidad del País Vasco, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4549–4553&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State of the art systems for voice conversion have been shown to generate highly natural sounding converted speech. Voice conversion techniques have also been applied to alaryngeal speech, with the aim of improving its quality or its intelligibility. In this paper, we present an attempt to apply a voice conversion strategy based on phonetic posteriorgrams (PPGs), which produces very high quality converted speech, to improve the characteristics of esophageal speech. The main advantage of this PPG based architecture lies in the fact that it is able to convert speech from any source, without the need to previously train the system with a parallel corpus. However, our results show that the PPG approach degrades the intelligibility of the converted speech considerably, especially when the input speech is already poorly intelligible. In this paper two systems are compared, an LSTM based one-to-one conversion system, which is referred to as the baseline, and the new system using phonetic posteriorgrams. Both spectral parameters and f,,0,, are converted using DNN (Deep Neural Network) based architectures. Results from both objective and subjective evaluations are presented, showing that although ASR (Automated Speech Recognition) errors are reduced, original esophageal speech is still preferred by subjects.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Akhilesh Kumar Dubey|AUTHOR Akhilesh Kumar Dubey]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]], [[S. Dandapat|AUTHOR S. Dandapat]]
</p><p class="cpabstractcardaffiliationlist">IIT Guwahati, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4554–4558&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, detection of hypernasality severity in cleft palate speech is attempted using constant Q cepstral coefficients (CQCC) feature. The coupling of nasal tract with the oral tract during the production of hypernasal speech adds nasal formants and anti-formants in low frequency region of vowel spectrum mainly around the first formant. The strength and position of nasal formants and anti-formants along with the oral formants changes as the severity of nasality changes in hypernasal speech. The CQCC feature is extracted from the constant Q transform (CQT) spectrum which employs geometrically spaced frequency bins and maintains a constant Q factor for across the entire spectrum. This results in a higher frequency resolution at lower frequencies and higher temporal resolution at higher frequencies. The CQT spectrum resolves the nasal and oral formants in low frequency and captures the spectral changes due to change in nasality severity. The CQCC feature gives the overall classification accuracy of 83.33% and 78.47% for /i/ and /u/ vowels corresponding to normal, mild and moderate-severe hypernasal speech, respectively using multiclass support vector classifier.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingyue Niu|AUTHOR Mingyue Niu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Cunhang Fan|AUTHOR Cunhang Fan]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4559–4563&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Related physiological studies have shown that Mel-frequency cepstral coefficient (MFCC) is a discriminative acoustic feature for depression detection. This fact has led to some works using MFCCs to identify individual depression degree. However, they rarely adopt neural network to capture high-level feature associated with depression detection. And the suitable feature pooling parameter for depression detection has not been optimized. For these reasons, we propose a hybrid network and ℓ,,p,,-norm pooling combined with least absolute shrinkage and selection operator (LASSO) to improve the accuracy of depression detection. Firstly, the MFCCs of the original speech are divided into many segments. Then, we extract the segment-level feature using the proposed hybrid network, which investigates the depression-related information in the spatial structure, temporal changes and discriminative representation of short-term MFCC segments. Thirdly, ℓ,,p,,-norm pooling combined with LASSO is adopted to find the optimal pooling parameter for depression detection to generate the utterance-level feature. Finally, depression level prediction is accomplished using support vector regression (SVR). Experiments are conducted on AVEC2013 and AVEC2014. The results demonstrate that our proposed method achieves better performance than the previous algorithms.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suhas B.N.|AUTHOR Suhas B.N.]]^^1^^, [[Deep Patel|AUTHOR Deep Patel]]^^1^^, [[Nithin Rao|AUTHOR Nithin Rao]]^^2^^, [[Yamini Belur|AUTHOR Yamini Belur]]^^3^^, [[Pradeep Reddy|AUTHOR Pradeep Reddy]]^^3^^, [[Nalini Atchayaram|AUTHOR Nalini Atchayaram]]^^3^^, [[Ravi Yadav|AUTHOR Ravi Yadav]]^^3^^, [[Dipanjan Gope|AUTHOR Dipanjan Gope]]^^1^^, [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^University of Southern California, USA; ^^3^^NIMHANS, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4564–4568&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We consider the task of speech based automatic classification of patients with amyotrophic lateral sclerosis (ALS) and healthy subjects. The role of different speech tasks and recording devices on classification accuracy is examined. Sustained phoneme production (PHON), diadochokinetic task (DDK) and spontaneous speech (SPON) have been used as speech tasks. The chosen five recording devices include a high quality microphone and built-in smartphone microphones at various price ranges. Experiments are performed using speech data from 25 ALS patients and 25 healthy subjects using support vector machines and deep neural networks as classifiers and suprasegmental features based on mel frequency cepstral coefficients. Results reveal that DDK consistently performs better than SPON and PHON across all devices for discriminating ALS patients and healthy subjects. Considering DDK, the best classification accuracy of 92.2% is obtained using a high quality microphone but the accuracy drops if there is a mismatch between the microphones for training and test. However, a classifier trained with recordings from all devices together performs more uniformly across all devices. The findings from this study could aid in determining the choice of the task and device in developing an assistive tool for detection and monitoring of ALS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hannah P. Rowe|AUTHOR Hannah P. Rowe]], [[Jordan R. Green|AUTHOR Jordan R. Green]]
</p><p class="cpabstractcardaffiliationlist">MGH Institute of Health Professions, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4509–4513&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The goal of this study was to profile the speech motor impairments that characterize dysarthria secondary to amyotrophic lateral sclerosis (ALS). This information is important for identifying optimal treatment strategies and guiding speech impairment subtype discovery, which may facilitate ongoing efforts to improve automatic speech recognition (ASR) of dysarthric speech. Speech motor impairments were profiled by introducing a novel framework that assesses four key components of motor control:  coordination,  consistency,  speed, and  precision. An individual acoustic feature was selected to represent each component. Specifically,  coordination was indexed by the proportion of voice onset time (VOT) to syllable duration,  consistency by the coefficient of variation of VOT between repetitions of /pataka/ within each distinct consonant,  speed by the slope of the second formant (F2), and  precision by the standard deviation of F2 slope between distinct consonants within each repetition of /pataka/. Acoustic measures were extracted from audio recordings of each participant (18 controls and 14 participants with ALS) during a sequential motion rate (SMR) task. Results revealed that the primary underlying speech motor impairments that characterize ALS are in  coordination,  speed, and  precision. Further research is needed to validate the existence of speech-impairment-based subtypes across the continuum of speech motor disorders.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alex Mayle|AUTHOR Alex Mayle]]^^1^^, [[Zhiwei Mou|AUTHOR Zhiwei Mou]]^^2^^, [[Razvan Bunescu|AUTHOR Razvan Bunescu]]^^1^^, [[Sadegh Mirshekarian|AUTHOR Sadegh Mirshekarian]]^^1^^, [[Li Xu|AUTHOR Li Xu]]^^1^^, [[Chang Liu|AUTHOR Chang Liu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ohio University, USA; ^^2^^Jinan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4514–4518&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes the use of Recurrent Neural Networks (RNNs) with Long Short-Term Memory (LSTM) units for determining whether Mandarin-speaking individuals are afflicted with a form of Dysarthria based on samples of syllable pronunciations. Several LSTM network architectures are evaluated on this binary classification task, using accuracy and Receiver Operating Characteristic (ROC) curves as metrics. The LSTM models are shown to significantly improve upon a baseline fully connected network, reaching over 90% area under the ROC curve on the task of classifying new speakers, when a sufficient number of cepstrum coefficients are used. The results show that the LSTM’s ability to leverage temporal information within its input makes for an effective step in the pursuit of accessible Dysarthria diagnoses.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Protima Nomo Sudro|AUTHOR Protima Nomo Sudro]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]
</p><p class="cpabstractcardaffiliationlist">IIT Guwahati, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4519–4523&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The cleft of the lip and palate (CLP) caused by structural and functional deformation leads to various speech-related disorders, which substantially degrades the speech intelligibility. In this work, devoiced stop consonants in CLP speech are analyzed, and an approach is proposed for its modification in order to enhance the speech intelligibility. The devoicing errors are primarily characterized by the absence of voicebar in the closure interval and relatively longer voice onset time (VOT). The proposed approach first segments the regions corresponding to the closure interval and VOT based on the knowledge of glottal activity, voice onset point, voice offset point, and burst onset point. In the next stage, specific transformations are performed for the modification of closure bar and VOT respectively. For transformation, first different transformation matrices are learned for closure bar and VOT from normal and CLP speakers. The transformation matrix is optimized using nonnegative matrix factorization method. Further, the corresponding transformation matrices are used to modify the closure bar and VOT separately. The subjective evaluation results indicate that the devoiced stop consonants tend to exhibit the characteristics of voiced stop consonants.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marziye Eshghi|AUTHOR Marziye Eshghi]]^^1^^, [[Panying Rong|AUTHOR Panying Rong]]^^2^^, [[Antje S. Mefferd|AUTHOR Antje S. Mefferd]]^^3^^, [[Kaila L. Stipancic|AUTHOR Kaila L. Stipancic]]^^1^^, [[Yana Yunusova|AUTHOR Yana Yunusova]]^^4^^, [[Jordan R. Green|AUTHOR Jordan R. Green]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MGH Institute of Health Professions, USA; ^^2^^University of Kansas, USA; ^^3^^Vanderbilt University, USA; ^^4^^University of Toronto, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4524–4528&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The identification of robust biomarkers to detect the onset of amyotrophic lateral sclerosis (ALS) has been an ongoing challenge. Recent evidence from multiple studies suggests that speech changes are a reliable early indicator of ALS particularly during physically demanding speaking tasks such as alternating motion rate (AMR). However, it has also been found that individuals make various behavioral adaptations to meet the maximum rate requirement in AMR. In this study, we explored the extent to which persons with early-stage ALS are capable of adapting to challenging speech-like tasks. Speech motor performance of 14 healthy controls was compared to that of 18 patients at the early stage of ALS during standard (unconstrained) and fixed-target (constrained) AMR tasks. Fixed-target tasks were designed to impose high demands on the speech motor system. Although habitual speaking rate was maintained within normal limits, findings revealed that task adaptation was reduced at the early stage of ALS. Furthermore, the difference between the number of cycles in the fixed-target task and standard task showed higher sensitivity than habitual speaking rate to detect early decline in bulbar function. The inability to adapt to the fixed-target task was a good early indicator of bulbar motor involvement due to ALS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tianqi Wang|AUTHOR Tianqi Wang]]^^1^^, [[Quanlei Yan|AUTHOR Quanlei Yan]]^^1^^, [[Jingshen Pan|AUTHOR Jingshen Pan]]^^1^^, [[Feiqi Zhu|AUTHOR Feiqi Zhu]]^^2^^, [[Rongfeng Su|AUTHOR Rongfeng Su]]^^1^^, [[Yi Guo|AUTHOR Yi Guo]]^^3^^, [[Lan Wang|AUTHOR Lan Wang]]^^1^^, [[Nan Yan|AUTHOR Nan Yan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Shenzhen Luohu People’s Hospital, China; ^^3^^Shenzhen People’s Hospital, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4529–4533&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech and language features have been proven to be useful for the detection of neurodegenerative diseases, such as Alzheimer’s disease (AD), and its prodromal stage, mild cognitive impairment (MCI). Unfortunately, high-quality speech database remains scarce, which limit its application in automatic screening and assessment of early dementia in clinical practice. To bridge this gap, the present study aimed to design a speech database of Chinese elderly with intact cognition and MCI, named “Mandarin Elderly Cognitive Speech Database” (MECSD). The database consists of 110 hours of speech recordings from 85 native speakers of Mandarin Chinese (age range = 55–85 years), including 20 participants with MCI and 65 healthy controls. Manually transcribed materials with temporal information were also included in this database. Nine tasks, involving conventional test batteries and connected speech productions, were used to obtain speech samples, producing a total of 8563 sentences and 49841 words. Details concerning the design of the database, together with our preliminary findings applying automatic speech recognition (ASR), were reported in this study. The MECSD will provide researchers with access to a large shared database that can facilitate hypothesis testing in the study of early-stage dementia.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenjun Chen|AUTHOR Wenjun Chen]]^^1^^, [[Jeroen van de Weijer|AUTHOR Jeroen van de Weijer]]^^2^^, [[Shuangshuang Zhu|AUTHOR Shuangshuang Zhu]]^^3^^, [[Qian Qian|AUTHOR Qian Qian]]^^3^^, [[Manna Wang|AUTHOR Manna Wang]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SISU, China; ^^2^^Shenzhen University, China; ^^3^^Shanghai Sunshine Rehabilitation Center, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4534–4538&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study identifies the acoustic characteristics of tones produced by Mandarin brain-damaged patients. We investigate the F0 characteristics of the patients’ tone productions and compare them with a control group of healthy speakers. The results show tone disruption in patients with brain damage in either the left or the right hemisphere. Even patients’ tone productions that were correctly identified by Mandarin native speakers were acoustically different from the ones produced by healthy speakers. The patterns of tone disruption in Mandarin brain-damaged patients might be caused by damage to the motor function in the brain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anne Hermes|AUTHOR Anne Hermes]]^^1^^, [[Doris Mücke|AUTHOR Doris Mücke]]^^2^^, [[Tabea Thies|AUTHOR Tabea Thies]]^^2^^, [[Michael T. Barbe|AUTHOR Michael T. Barbe]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LPP (UMR 7018), France; ^^2^^Universität zu Köln, Germany; ^^3^^Uniklinik Köln, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4539–4543&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the present study, we investigate intragestural parameters during the production of CV syllables in natural sentence production of Essential Tremor (ET) patients treated with Deep Brain Stimulation (DBS). Within the task dynamic approach, we analyzed temporal and spatial parameters of consonantal and vocalic movements of the respective target syllables. Our analysis revealed that intragestural coordination patterns are affected in the patients’ group: While patients with inactivated stimulation (DBS-OFF) already showed signs of dysarthria in terms of longer and less stiff movements, there was an additional slowing down of the speech motor system under activated stimulation (DBS-ON). When comparing CV production in natural sentence to fast syllable repetition tasks (DDK), we find similarities in that there is a slowing down of the system, but also differences in that coordination problems increase in DDK leading to an overmodulation of peak velocities and displacements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sishir Kalita|AUTHOR Sishir Kalita]], [[Protima Nomo Sudro|AUTHOR Protima Nomo Sudro]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]], [[S. Dandapat|AUTHOR S. Dandapat]]
</p><p class="cpabstractcardaffiliationlist">IIT Guwahati, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4544–4548&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Cleft lip and palate (CLP) is a congenital disorder of the orofacial region. Nasal air emission (NAE) in CLP speech occurs due to the presence of velopharyngeal dysfunction (VPD), and it mostly occurs in the production of fricative sounds. The objective of present work is to study the acoustic characteristics of voiceless sibilant fricatives in Kannada distorted by NAE and develop an SVM-based classification to distinguish normal fricatives from the NAE distorted fricatives. Static spectral measures, such as spectral moments are used to analyze the deviant spectral distribution of NAE distorted fricatives. As the aerodynamic parameters are deviated due to VPD, the temporal variation of spectral characteristics might also get deviated in NAE distorted fricatives. This variation is studied using the peak equivalent rectangular bandwidth (ERB,,N,,)-number, a psychoacoustic measure to analyze the temporal variation in the spectral properties of fricatives. The analysis of NAE distorted fricatives shows that the maximum spectral density is concentrated in the lower frequency range with steep positive skewness and more variations in the trajectories of peak ERB,,N,,-number as compared to the normal fricatives. The proposed SVM-based classification achieves good detection rates in discriminating NAE distorted fricatives from normal fricatives.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dongxiao Wang|AUTHOR Dongxiao Wang]]^^1^^, [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]]^^2^^, [[Koichi Shinoda|AUTHOR Koichi Shinoda]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tokyo Tech, Japan; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4569–4573&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a new algorithm to estimate the phase of speech signal in the mixture of audio sources under the assumption that the magnitude spectrum of each source is given. The previous method, multiple input spectrogram inversion algorithm (MISI), often performs poorly when the magnitude spectrograms estimated are not accurate. This may be because it imposes a strict constraint that the summation of source waveforms should be exactly the same as the mixture waveform. Our proposing algorithm employs a new objective function in which this constraint is relaxed. In this objective function, the difference between the summation of source waveforms and the mixture waveform is the target to be minimized. The performance of our method, modified MISI is evaluated on two different experimental settings. In both settings it improves the audio source separation performance compared to MISI.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziqiang Shi|AUTHOR Ziqiang Shi]]^^1^^, [[Huibin Lin|AUTHOR Huibin Lin]]^^1^^, [[Liu Liu|AUTHOR Liu Liu]]^^1^^, [[Rujie Liu|AUTHOR Rujie Liu]]^^1^^, [[Shoji Hayakawa|AUTHOR Shoji Hayakawa]]^^2^^, [[Shouji Harada|AUTHOR Shouji Harada]]^^2^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fujitsu, China; ^^2^^Fujitsu, Japan; ^^3^^Harbin Institute of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4614–4618&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The monaural speech separation technology is far from satisfactory and has been a challenging task due to the interference of multiple sound sources. While deep dilated temporal convolutional networks (TCN) have been proved to be very effective in sequence modeling, this work investigates how to extend TCN to result in a new state-of-the-art approach for monaural speech separation. First a novel gating mechanisms is introduced and added to result in gated TCN. The gated activation can control the flow of information. Further in order to remedy the temporal scale variation problem caused by word length and pronunciation characteristics of different people, a multi-scale dynamic weighted pyramids gated TCNs is proposed, where a “weightor” network is used to determine the weights of different gated TCNs dynamically for each utterance. Since the strengths of different branches with different temporal receipt fields appear complementary, the combination outperforms single branch system. For the objective, we propose to train the network by directly optimizing utterance level signal-to-distortion ratio (SDR) in a permutation invariant training (PIT) style. Our experiments on the the WSJ0-2mix data corpus results in 18.4dB SDR improvement, which shows our proposed networks can leads to performance improvement on the speaker separation task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Francesc Lluís|AUTHOR Francesc Lluís]], [[Jordi Pons|AUTHOR Jordi Pons]], [[Xavier Serra|AUTHOR Xavier Serra]]
</p><p class="cpabstractcardaffiliationlist">Universitat Pompeu Fabra, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4619–4623&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Most of the currently successful source separation techniques use the magnitude spectrogram as input, and are therefore by default omitting part of the signal: the phase. To avoid omitting potentially useful information, we study the viability of using end-to-end models for music source separation — which take into account all the information available in the raw audio signal, including the phase. Although during the last decades end-to-end music source separation has been considered almost unattainable, our results confirm that waveform-based models can perform similarly (if not better) than a spectrogram-based deep learning model. Namely: a Wavenet-based model we propose and Wave-U-Net can outperform DeepConvSep, a recent spectrogram-based deep learning model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fahimeh Bahmaninezhad|AUTHOR Fahimeh Bahmaninezhad]]^^1^^, [[Jian Wu|AUTHOR Jian Wu]]^^2^^, [[Rongzhi Gu|AUTHOR Rongzhi Gu]]^^3^^, [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]]^^4^^, [[Yong Xu|AUTHOR Yong Xu]]^^4^^, [[Meng Yu|AUTHOR Meng Yu]]^^4^^, [[Dong Yu|AUTHOR Dong Yu]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^Northwestern Polytechnical University, China; ^^3^^Peking University, China; ^^4^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4574–4578&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech separation has been studied widely for single-channel close-talk microphone recordings over the past few years; developed solutions are mostly in frequency-domain. Recently, a raw audio waveform separation network (TasNet) is introduced for single-channel data, with achieving high Si-SNR (scale-invariant source-to-noise ratio) and SDR (source-to-distortion ratio) comparing against the state-of-the-art solution in frequency-domain. In this study, we incorporate effective components of the TasNet into a frequency-domain separation method. We compare both for alternative scenarios. We introduce a solution for directly optimizing the separation criterion in frequency-domain networks. In addition to speech separation objective and subjective measurements, we evaluate the separation performance on a speech recognition task as well. We study the speech separation problem for far-field data (more similar to naturalistic audio streams) and develop multi-channel solutions for both frequency and time-domain separators with utilizing spectral, spatial and speaker location information. For our experiments, we simulated multi-channel spatialized reverberate WSJ0-2mix dataset. Our experimental results show that spectrogram separation can achieve competitive performance with better network design. Multi-channel framework as well is shown to improve the single-channel performance relatively up to +35.5% and +46% in terms of WER and SDR, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Berkay İnan|AUTHOR Berkay İnan]]^^1^^, [[Milos Cernak|AUTHOR Milos Cernak]]^^2^^, [[Helmut Grabner|AUTHOR Helmut Grabner]]^^2^^, [[Helena Peic Tukuljac|AUTHOR Helena Peic Tukuljac]]^^1^^, [[Rodrigo C.G. Pena|AUTHOR Rodrigo C.G. Pena]]^^1^^, [[Benjamin Ricaud|AUTHOR Benjamin Ricaud]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EPFL, Switzerland; ^^2^^Logitech, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4579–4583&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Source separation involving mono-channel audio is a challenging problem, in particular for speech separation where source contributions overlap both in time and frequency. This task is of high interest for applications such as video conferencing. Recent progress in machine learning has shown that the combination of visual cues, coming from the video, can increase the source separation performance. Starting from a recently designed deep neural network, we assess its ability and robustness to separate the visible speakers’ speech from other interfering speeches or signals. We test it for different configuration of video recordings where the speaker’s face may not be fully visible. We also asses the performance of the network with respect to different sets of visual features from the speakers’ faces.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David Ditter|AUTHOR David Ditter]], [[Timo Gerkmann|AUTHOR Timo Gerkmann]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4584–4588&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent studies have shown that Deep Learning based single-channel speech separation systems perform worse for same-gender mixtures than for different-gender mixtures. In this work, we provide for a more detailed analysis of the respective impact of the fundamental frequency and the vocal tract length on the system performance. While both parameters are correlated with gender, the vocal tract length is a fixed speaker-specific parameter, whereas the fundamental frequency can vary for different speaking styles. We show that the difference of the fundamental frequency medians of two speakers in a mixture is highly correlated with the SDR performance while the difference of the vocal tract lengths is not. Our analysis allows us to do performance predictions for given speakers based on measurements of their fundamental frequency. Furthermore we conclude that current systems separate (short-term) speaking styles rather than (long-term) speaker characteristics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jeroen Zegers|AUTHOR Jeroen Zegers]], [[Hugo Van hamme|AUTHOR Hugo Van hamme]]
</p><p class="cpabstractcardaffiliationlist">Katholieke Universiteit Leuven, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4589–4593&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years there have been many deep learning approaches towards the multi-speaker source separation problem. Most use Long Short-Term Memory - Recurrent Neural Networks (LSTM-RNN) or Convolutional Neural Networks (CNN) to model the sequential behavior of speech. In this paper we propose a novel network for source separation using an encoder-decoder CNN and LSTM in parallel. Hyper parameters have to be chosen for both parts of the network and they are potentially mutually dependent. Since hyper parameter grid search has a high computational burden, random search is often preferred. However, when sampling a new point in the hyper parameter space, it can potentially be very close to a previously evaluated point and thus give little additional information. Furthermore, random sampling is as likely to sample in a promising area as in an hyper space area dominated with poor performing models. Therefore, we use a Bayesian hyper parameter optimization technique and find that the parallel CNN-LSTM outperforms the LSTM-only and CNN-only model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Helen L. Bear|AUTHOR Helen L. Bear]], [[In^es Nolasco|AUTHOR In^es Nolasco]], [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]]
</p><p class="cpabstractcardaffiliationlist">Queen Mary University of London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4594–4598&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic Scene Classification (ASC) and Sound Event Detection (SED) are two separate tasks in the field of computational sound scene analysis. In this work, we present a new dataset with both sound scene and sound event labels and use this to demonstrate a novel method for jointly classifying sound scenes and recognizing sound events. We show that by taking a joint approach, learning is more efficient and whilst improvements are still needed for sound event detection, SED results are robust in a dataset where the sample distribution is skewed towards sound scenes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cunhang Fan|AUTHOR Cunhang Fan]], [[Bin Liu|AUTHOR Bin Liu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4599–4603&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep clustering (DC) and utterance-level permutation invariant training (uPIT) have been demonstrated promising for speaker-independent speech separation. DC is usually formulated as two-step processes: embedding learning and embedding clustering, which results in complex separation pipelines and a huge obstacle in directly optimizing the actual separation objectives. As for uPIT, it only minimizes the chosen permutation with the lowest mean square error, doesn’t discriminate it with other permutations. In this paper, we propose a discriminative learning method for speaker-independent speech separation using deep embedding features. Firstly, a DC network is trained to extract deep embedding features, which contain each source’s information and have an advantage in discriminating each target speakers. Then these features are used as the input for uPIT to directly separate the different sources. Finally, uPIT and DC are jointly trained, which directly optimizes the actual separation objectives. Moreover, in order to maximize the distance of each permutation, the discriminative learning is applied to fine tuning the whole model. Our experiments are conducted on WSJ0-2mix dataset. Experimental results show that the proposed models achieve better performances than DC and uPIT for speaker-independent speech separation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Midia Yousefi|AUTHOR Midia Yousefi]], [[Soheil Khorram|AUTHOR Soheil Khorram]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4604–4608&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Single-microphone, speaker-independent speech separation is normally performed through two steps:  (i) separating the specific speech sources, and  (ii) determining the best output-label assignment to find the separation error. The second step is the main obstacle in training neural networks for speech separation. Recently proposed  Permutation Invariant Training (PIT) addresses this problem by determining the output-label assignment which minimizes the separation error. In this study, we show that a major drawback of this technique is the overconfident choice of the output-label assignment, especially in the initial steps of training when the network generates unreliable outputs. To solve this problem, we propose  Probabilistic PIT (Prob-PIT) which considers the output-label permutation as a discrete latent random variable with a uniform prior distribution. Prob-PIT defines a log-likelihood function based on the prior distributions and the separation errors of all permutations; it trains the speech separation networks by maximizing the log-likelihood function. Prob-PIT can be easily implemented by replacing the minimum function of PIT with a soft-minimum function. We evaluate our approach for speech separation on both TIMIT and CHiME datasets. The results show that the proposed method significantly outperforms PIT in terms of Signal to Distortion Ratio and Signal to Interference Ratio.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jing Shi|AUTHOR Jing Shi]], [[Jiaming Xu|AUTHOR Jiaming Xu]], [[Bo Xu|AUTHOR Bo Xu]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4609–4613&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/1591" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent deep learning methods have gained noteworthy success in the multi-talker mixed speech separation task, which is also famous known as the Cocktail Party Problem. However, most existing models are well-designed towards some predefined conditions, which make them unable to handle the complex auditory scene automatically, such as a variable and unknown number of speakers in the mixture. In this paper, we propose a speaker-inferred model, based on the flexible and efficient Seq2Seq generation model, to accurately infer the possible speakers and the speech channel of each. Our model is totally end-to-end with several different modules to emphasize and better utilize the information from speakers. Without a priori knowledge about the number of speakers or any additional curriculum training strategy or man-made rules, our method gets comparable performance with those strong baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qing Wang|AUTHOR Qing Wang]]^^1^^, [[Pengcheng Guo|AUTHOR Pengcheng Guo]]^^1^^, [[Sining Sun|AUTHOR Sining Sun]]^^1^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^, [[John H.L. Hansen|AUTHOR John H.L. Hansen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4010–4014&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep learning has been successfully used in speaker verification (SV), especially in end-to-end SV systems which have attracted more interest recently. It has been shown in image as well as speech applications that deep neural networks are vulnerable to adversarial examples. In this study, we explore two methods to generate adversarial examples for advanced SV: (i) fast gradient-sign method (FGSM), and (ii) local distributional smoothness (LDS) method. To explore this issue, we use adversarial examples to attack an end-to-end SV system. Experiments will show that the neural network can be easily disturbed by adversarial examples. Next, we propose to train an end-to-end robust SV model using the two proposed adversarial examples for model regularization. Experimental results with the TIMIT dataset indicate that the EER is improved relatively by (i) +18.89% and (ii) +5.54% for the original test set using the regularized model. In addition, the regularized model improves EER of the adversarial example test set by a relative (i) +30.11% and (ii) +22.12%, which therefore suggests more consistent performance against adversarial example attacks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryota Kaminishi|AUTHOR Ryota Kaminishi]], [[Haruna Miyamoto|AUTHOR Haruna Miyamoto]], [[Sayaka Shiota|AUTHOR Sayaka Shiota]], [[Hitoshi Kiya|AUTHOR Hitoshi Kiya]]
</p><p class="cpabstractcardaffiliationlist">Tokyo Metropolitan University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4055–4059&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study evaluates the effects of some non-learning blind bandwidth extension (BWE) methods on automatic speaker verification (ASV) systems based on x-vector. Recently, a non-linear bandwidth extension (N-BWE) has been proposed as a blind, non-learning, and light-weight BWE approach. Other non-learning BWEs have also been developed in recent years. For ASV evaluations, most data available to train ASV systems is narrowband (NB) telephone speech. Meanwhile, wideband (WB) data have been used to train the state-of-the-art ASV systems, such as i-vector and x-vector. This can cause sampling rate mismatches when all datasets are used. In this paper, we investigate the influence of sampling rate mismatches in the x-vector-based ASV systems and how non-learning BWE methods perform against them. The results showed that the N-BWE method improved the equal error rate (EER) on ASV systems based on x-vector when the mismatches were present. We researched the relationship between objective measurements and EERs. Consequently, the N-BWE method produced the lowest EER and obtained the lower RMS-LSD value and the higher STOI score.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Umair Khan|AUTHOR Umair Khan]], [[Miquel India|AUTHOR Miquel India]], [[Javier Hernando|AUTHOR Javier Hernando]]
</p><p class="cpabstractcardaffiliationlist">Universitat Politècnica de Catalunya, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4060–4064&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the last years, i-vectors followed by cosine or PLDA scoring techniques were the state-of-the-art approach in speaker verification. PLDA requires labeled background data, and there exists a significant performance gap between the two scoring techniques. In this work, we propose to reduce this gap by using an autoencoder to transform i-vector into a new speaker vector representation, which will be referred to as ae-vector. The autoencoder will be trained to reconstruct neighbor i-vectors instead of the same training i-vectors, as usual. These neighbor i-vectors will be selected in an unsupervised manner according to the highest cosine scores to the training i-vectors. The evaluation is performed on the speaker verification trials of VoxCeleb-1 database. The experiments show that our proposed ae-vectors gain a relative improvement of 42% in terms of EER compared to the conventional i-vectors using cosine scoring, which fills the performance gap between cosine and PLDA scoring techniques by 92%, but without using speaker labels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siqi Zheng|AUTHOR Siqi Zheng]], [[Gang Liu|AUTHOR Gang Liu]], [[Hongbin Suo|AUTHOR Hongbin Suo]], [[Yun Lei|AUTHOR Yun Lei]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4065–4069&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Large-scale deployment of speech interaction devices makes it possible to harvest tremendous data quickly, which also introduces the problem of wrong labeling during data mining. Mislabeled training data has a substantial negative effect on the performance of speaker verification system. This study aims to enhance the generalization ability and robustness of the model when the training data is contaminated by wrong labels. Several regularization approaches are proposed to reduce the condition number of the speaker verification problem, making the model less sensitive to errors in the inputs. They are validated on both NIST SRE corpus and far-field smart speaker data. The results suggest that the performance deterioration caused by mislabeled training data can be significantly ameliorated by proper regularization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hassan Taherian|AUTHOR Hassan Taherian]], [[Zhong-Qiu Wang|AUTHOR Zhong-Qiu Wang]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4070–4074&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite successful applications of multi-channel signal processing in robust automatic speech recognition (ASR), relatively little research has been conducted on the effectiveness of such techniques in the robust speaker recognition domain. This paper introduces time-frequency (T-F) masking-based beamforming to address text-independent speaker recognition in conditions where strong diffuse noise and reverberation are both present. We examine various masking-based beamformers, such as parameterized multi-channel Wiener filter, generalized eigenvalue (GEV) beamformer and minimum variance distortion-less response (MVDR) beamformer, and evaluate their performance in terms of speaker recognition accuracy for i-vector and x-vector based systems. In addition, we present a different formulation for estimating steering vectors from speech covariance matrices. We show that rank-1 approximation of a speech covariance matrix based on generalized eigenvalue decomposition leads to the best results for the masking-based MVDR beamformer. Experiments on the recently introduced NIST SRE 2010 retransmitted corpus show that the MVDR beamformer with rank-1 approximation provides an absolute reduction of 5.55% in equal error rate compared to a standard masking-based MVDR beamformer.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joon-Young Yang|AUTHOR Joon-Young Yang]], [[Joon-Hyuk Chang|AUTHOR Joon-Hyuk Chang]]
</p><p class="cpabstractcardaffiliationlist">Hanyang University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4075–4079&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we investigate the deep neural network (DNN) supported acoustic beamforming and dereverberation as the front-end of the x-vector speaker verification (SV) framework in a noisy and reverberant environment. Firstly, a DNN for supporting either the classical beamforming (e. g. MVDR) or the dereverberation (e. g. WPE) algorithm is trained on multi-channel speech signals. Next, an x-vector speaker embedding network is trained on top of the enhanced speech features to classify the training speakers. Finally, after the separate training stages are over, either one or both of the DNN supported beamforming and dereverberation modules are serially connected to the x-vector network, and jointly trained to optimize the common objective of speaker classification. Experiments on the artificially generated speech dataset using simulated and real room impulse responses (RIRs) with various types of domestic noise samples show that jointly training the supportive neural network models along with the x-vector network within the classical speech enhancement framework brings significant performance gain for robust text-independent (TI) SV.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoxiao Miao|AUTHOR Xiaoxiao Miao]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^1^^, [[Yonghong Yan|AUTHOR Yonghong Yan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Kent, UK; ^^2^^Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4080–4084&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we aim to improve traditional DNN x-vector language identification (LID) performance by employing Convolutional and Long Short Term Memory-Recurrent (CLSTM) Neural Networks, as they can strengthen feature extraction and capture longer temporal dependencies. We also propose a two-dimensional attention mechanism. Compared with conventional one-dimensional time attention, our method introduces a frequency attention mechanism to give different weights to different frequency bands to generate weighted means and standard deviations. This mechanism can direct attention to either time or frequency information, and can be trained or fused singly or jointly. Experimental results show firstly that CLSTM can significantly outperform a traditional DNN x-vector implementation. Secondly, the proposed frequency attention method is more effective than time attention, particularly when the number of frequency bands matches the feature size. Furthermore, frequency-time score merging is the best, whereas frequency-time feature merge only shows improvements for small frequency dimension.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[João Monteiro|AUTHOR João Monteiro]], [[Jahangir Alam|AUTHOR Jahangir Alam]], [[Tiago H. Falk|AUTHOR Tiago H. Falk]]
</p><p class="cpabstractcardaffiliationlist">INRS-EMT, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4015–4019&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we tackle automatic speaker verification under a text-independent setting. Speaker modelling is performed by a deep convolutional neural network on top of time-frequency speech representations. Convolutions performed over the time dimension provide the means for the model to take both short-term dependencies into account, given the nature of the learned filters which operate over short-windows, as well as long-term dependencies, since depth in a convolutional stack implies dependency of outputs across large portions of input samples. Additionally, various pooling strategies across the time dimension are compared so as to effectively map varying length recordings into fixed dimensional representations while simultaneously providing the neural network with an extra mechanism to model long-term dependencies. We finally propose a training scheme under which well-known metric learning approaches, namely triplet loss minimization, is performed along with speaker recognition in a multi-class classification setting. Evaluation on well-known datasets and comparisons with state-of-the-art benchmarks show that the proposed setting is effective in yielding speaker-dependent representations, thus is well-suited for voice biometrics downstream tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yang Zhang|AUTHOR Yang Zhang]], [[Lantian Li|AUTHOR Lantian Li]], [[Dong Wang|AUTHOR Dong Wang]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4020–4024&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep speaker embedding has achieved state-of-the-art performance in speaker recognition. A potential problem of these embedded vectors (called ‘x-vectors’) are not Gaussian, causing performance degradation with the famous PLDA back-end scoring. In this paper, we propose a regularization approach based on Variational Auto-Encoder (VAE). This model transforms x-vectors to a latent space where mapped latent codes are more Gaussian, hence more suitable for PLDA scoring.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Victoria Mingote|AUTHOR Victoria Mingote]]^^1^^, [[Diego Castan|AUTHOR Diego Castan]]^^2^^, [[Mitchell McLaren|AUTHOR Mitchell McLaren]]^^2^^, [[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]]^^2^^, [[Alfonso Ortega|AUTHOR Alfonso Ortega]]^^1^^, [[Eduardo Lleida|AUTHOR Eduardo Lleida]]^^1^^, [[Antonio Miguel|AUTHOR Antonio Miguel]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad de Zaragoza, Spain; ^^2^^SRI International, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4025–4029&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a novel neural network back-end approach based on triplets for the language recognition task, due to its success application in the related field of text-dependent speaker verification. A triplet is a training example constructed of three audio samples; two from the same class and one from a different class. In presenting two pairs of samples to the network, the triplet neural network learns to discriminate between samples from the same languages and pairs of different languages. Triplet-based training optimizes the Area Under the Curve (AUC) in contrast to other triplet loss functions proposed in the literature. The optimization of the AUC as cost function is appropriate for a detection task as it directly correlates with end-use performance of the system. Moreover, we show the importance of defining an appropriate method of triplet selection and how this impacts performance of the system. When benchmarked on the LRE09 database, the new triplet backend demonstrated superior performance compared to traditional back-ends used for language recognition. In addition, we performed an evaluation on the LRE15 and LRE17 databases to check the generalization power of the proposed systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Youngmoon Jung|AUTHOR Youngmoon Jung]]^^1^^, [[Younggwan Kim|AUTHOR Younggwan Kim]]^^2^^, [[Hyungjun Lim|AUTHOR Hyungjun Lim]]^^1^^, [[Yeunju Choi|AUTHOR Yeunju Choi]]^^1^^, [[Hoirin Kim|AUTHOR Hoirin Kim]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^KAIST, Korea; ^^2^^LG Electronics, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4030–4034&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a new pooling method called spatial pyramid encoding (SPE) to generate speaker embeddings for text-independent speaker verification. We first partition the output feature maps from a deep residual network (ResNet) into increasingly fine sub-regions and extract speaker embeddings from each sub-region through a learnable dictionary encoding layer. These embeddings are concatenated to obtain the final speaker representation. The SPE layer not only generates a fixed-dimensional speaker embedding for a variable-length speech segment, but also aggregates the information of feature distribution from multi-level temporal bins. Furthermore, we apply deep length normalization by augmenting the loss function with ring loss. By applying ring loss, the network gradually learns to normalize the speaker embeddings using model weights themselves while preserving convexity, leading to more robust speaker embeddings. Experiments on the VoxCeleb1 dataset show that the proposed system using the SPE layer and ring loss-based deep length normalization outperforms both i-vector and d-vector baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[IL-Ho Yang|AUTHOR IL-Ho Yang]], [[Sung-Hyun Yoon|AUTHOR Sung-Hyun Yoon]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4035–4039&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, speaker verification has primarily performed using deep neural networks that are trained to output embeddings from input features such as spectrograms or Mel-filterbank energies. Studies that design various loss functions, including metric learning have been widely explored. In this study, we propose two end-to-end loss functions for speaker verification using the concept of speaker bases, which are trainable parameters. One loss function is designed to further increase the inter-speaker variation, and the other is designed to conduct the identical concept with hard negative mining. Each speaker basis is designed to represent the corresponding speaker in the process of training deep neural networks. In contrast to the conventional loss functions that can consider only a limited number of speakers included in a mini-batch, the proposed loss functions can consider all the speakers in the training set regardless of the mini-batch composition. In particular, the proposed loss functions enable hard negative mining and calculations of between-speaker variations with consideration of all speakers. Through experiments on VoxCeleb1 and VoxCeleb2 datasets, we confirmed that the proposed loss functions could supplement conventional softmax and center loss functions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yiheng Jiang|AUTHOR Yiheng Jiang]]^^1^^, [[Yan Song|AUTHOR Yan Song]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^2^^, [[Zhifu Gao|AUTHOR Zhifu Gao]]^^1^^, [[Li-Rong Dai|AUTHOR Li-Rong Dai]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^University of Kent, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4040–4044&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present an effective deep embedding learning architecture, which combines a dense connection of dilated convolutional layers with a gating mechanism, for speaker verification (SV) tasks. Compared with the widely used time-delay neural network (TDNN) based architecture, two main improvements are proposed: (1) The dilated filters are designed to effectively capture time-frequency context information, then the convolutional layer outputs are utilized for effective embedding learning. Specifically, we employ the idea of the successful DenseNet to collect the context information by dense connections from each layer to every other layer in a feed-forward fashion. (2) A gating mechanism is further introduced to provide channel-wise attention by exploiting inter-dependencies across channels. Motivated by squeeze-and-excitation networks (SENet), the global time-frequency information is utilized for this feature calibration. To evaluate the proposed network architecture, we conduct extensive experiments on noisy and unconstrained SV tasks, i.e., Speaker in the Wild (SITW) and Voxceleb1. The results demonstrate state-of-the-art SV performance. Specifically, our proposed method reduces equal error rate (EER) from TDNN based method by 25% and 27% for SITW and Voxceleb1, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Danwei Cai|AUTHOR Danwei Cai]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4045–4049&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we focus on the far-field end-to-end text-dependent speaker verification task with a small-scale far-field text dependent dataset and a large scale close-talking text independent database for training. First, we show that simulating far-field text independent data from the existing large-scale clean database for data augmentation can reduce the mismatch. Second, using a small far-field text dependent data set to finetune the deep speaker embedding model pre-trained from the simulated far-field as well as original clean text independent data can significantly improve the system performance. Third, in special applications when using the close-talking clean utterances for enrollment and employing the real far-field noisy utterances for testing, adding reverberant noises on the clean enrollment data can further enhance the system performance. We evaluate our methods on AISHELL ASR0009 and AISHELL 2019B-eval databases and achieve an equal error rate (EER) of 5.75% for far-field text-dependent speaker verification under noisy environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zongze Ren|AUTHOR Zongze Ren]], [[Guofu Yang|AUTHOR Guofu Yang]], [[Shugong Xu|AUTHOR Shugong Xu]]
</p><p class="cpabstractcardaffiliationlist">Shanghai University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4050–4054&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a two-stage language identification (LID) system based on a shallow ResNet14 followed by a simple 2-layer recurrent neural network (RNN) architecture, which was used for Xunfei (iFlyTek) Chinese Dialect Recognition Challenge and won the first place among 110 teams. The system trains an acoustic model (AM) firstly with connectionist temporal classification (CTC) to recognize the given phonetic sequence annotation and then train another RNN to classify dialect category by utilizing the intermediate features as inputs from the AM. Compared with a three-stage system we further explore, our results show that the two-stage system can achieve high accuracy for Chinese dialects recognition under both short utterance and long utterance conditions with less training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jun Chen|AUTHOR Jun Chen]], [[Ji Zhu|AUTHOR Ji Zhu]], [[Jieping Ye|AUTHOR Jieping Ye]]
</p><p class="cpabstractcardaffiliationlist">University of Michigan, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4085–4089&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Alzheimer’s disease (AD) is one of the leading causes of death in the world and affects at least 50 million individuals. Currently, there is no cure for the disease. So a convenient and reliable early detection approach before irreversible brain damage and cognitive decline have occurred is of great importance. One prominent sign of AD is language dysfunction. Some aspects of language are affected at the same time or even before the memory problems emerge. Therefore, we propose an automatic speech analysis framework to identify AD subjects from short narrative speech transcript elicited with a picture description task. The proposed network is based on attention mechanism and is composed of a CNN and a GRU module. We obtained state-of-the-art cross-validation accuracy of 97 in distinguishing individuals with AD from elderly normal controls. The performance of our model makes it reasonable to conclude that our approach reveals a considerable part of the language deficits of AD patients and can help with the diagnosis of the disease from spontaneous speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shansong Liu|AUTHOR Shansong Liu]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4130–4134&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Pitch features have long been known to be useful for recognition of normal speech. However, for disordered speech, the significant degradation of voice quality renders the prosodic features, such as pitch, not always useful, particularly when the underlying conditions, for example, damages to the cerebellum, introduce a large effect on prosody control. Hence, both acoustic and prosodic information can be distorted. To the best of our knowledge, there has been very limited research on using pitch features for disordered speech recognition. In this paper, a comparative study of multiple approaches designed to incorporate pitch features is conducted to improve the performance of two disordered speech recognition tasks: English UASpeech, and Cantonese CUDYS. A novel gated neural network (GNN) based approach is used to improve acoustic and pitch feature integration over a conventional concatenation between the two. Bayesian estimation of GNNs is also investigated to further improve their robustness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Brendan Shillingford|AUTHOR Brendan Shillingford]]^^1^^, [[Yannis Assael|AUTHOR Yannis Assael]]^^1^^, [[Matthew W. Hoffman|AUTHOR Matthew W. Hoffman]]^^1^^, [[Thomas Paine|AUTHOR Thomas Paine]]^^1^^, [[Cían Hughes|AUTHOR Cían Hughes]]^^1^^, [[Utsav Prabhu|AUTHOR Utsav Prabhu]]^^2^^, [[Hank Liao|AUTHOR Hank Liao]]^^2^^, [[Hasim Sak|AUTHOR Hasim Sak]]^^2^^, [[Kanishka Rao|AUTHOR Kanishka Rao]]^^2^^, [[Lorrayne Bennett|AUTHOR Lorrayne Bennett]]^^1^^, [[Marie Mulville|AUTHOR Marie Mulville]]^^1^^, [[Misha Denil|AUTHOR Misha Denil]]^^1^^, [[Ben Coppin|AUTHOR Ben Coppin]]^^1^^, [[Ben Laurie|AUTHOR Ben Laurie]]^^1^^, [[Andrew Senior|AUTHOR Andrew Senior]]^^1^^, [[Nando de Freitas|AUTHOR Nando de Freitas]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^DeepMind, UK; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4135–4139&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work presents a scalable solution to continuous visual speech recognition. To achieve this, we constructed the largest existing visual speech recognition dataset, consisting of pairs of transcriptions and video clips of faces speaking (3,886 hours of video). In tandem, we designed and trained an integrated lipreading system, consisting of a video processing pipeline that maps raw video to stable videos of lips and sequences of phonemes, a scalable deep neural network that maps the lip videos to sequences of phoneme distributions, and a phoneme-to-word speech decoder that outputs sequences of words. The proposed system achieves a word error rate (WER) of 40.9% as measured on a held-out set. In comparison, professional lipreaders achieve either 86.4% or 92.9% WER on the same dataset when having access to additional types of contextual information. Our approach significantly improves on previous lipreading approaches, including variants of  LipNet and of  Watch, Attend, and Spell (WAS), which are only capable of 89.8% and 76.8% WER respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pingchuan Ma|AUTHOR Pingchuan Ma]], [[Stavros Petridis|AUTHOR Stavros Petridis]], [[Maja Pantic|AUTHOR Maja Pantic]]
</p><p class="cpabstractcardaffiliationlist">Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4090–4094&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Several audio-visual speech recognition models have been recently proposed which aim to improve the robustness over audio-only models in the presence of noise. However, almost all of them ignore the impact of the Lombard effect, i.e., the change in speaking style in noisy environments which aims to make speech more intelligible and affects both the acoustic characteristics of speech and the lip movements. In this paper, we investigate the impact of the Lombard effect in audio-visual speech recognition. To the best of our knowledge, this is the first work which does so using end-to-end deep architectures and presents results on unseen speakers. Our results show that properly modelling Lombard speech is always beneficial. Even if a relatively small amount of Lombard speech is added to the training set then the performance in a real scenario, where noisy Lombard speech is present, can be significantly improved. We also show that the standard approach followed in the literature, where a model is trained and tested on noisy plain speech, provides a correct estimate of the video-only performance and slightly underestimates the audio-visual performance. In case of audio-only approaches, performance is overestimated for SNRs higher than -3dB and underestimated for lower SNRs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jasper Ooster|AUTHOR Jasper Ooster]]^^1^^, [[Pia Nancy Porysek Moreta|AUTHOR Pia Nancy Porysek Moreta]]^^1^^, [[Jörg-Hendrik Bach|AUTHOR Jörg-Hendrik Bach]]^^2^^, [[Inga Holube|AUTHOR Inga Holube]]^^3^^, [[Bernd T. Meyer|AUTHOR Bernd T. Meyer]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Carl von Ossietzky Universität Oldenburg, Germany; ^^2^^HörTech, Germany; ^^3^^Jade Hochschule, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4095–4099&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech audiometry based on matrix sentence tests is an important diagnostic tool for hearing impairment and fitting of hearing aids. This paper introduces a self-conducted measurement for estimating the speech reception threshold (SRT) of a subject, i.e., the signal-to-noise ratio corresponding to 50% intelligibility, based on a smart speaker. While the original measurement procedure is well-evaluated and provides a very high measurement accuracy (<1 dB test-retest standard deviation), the measurement using a smart speaker differs in several aspects from the commercially available implementation, such as missing control over the absolute presentation level, mode of presentation (headphones vs. loudspeaker), potential errors from the automated response logging, and influence from room acoustics. The SRT measurement accuracy is evaluated with six normal-hearing subjects conducted with an Amazon Alexa application on an Echo Plus loudspeaker in a quiet office environment. We found a significant difference of 0.6 dB in SRT between the proposed and the commercially available testing procedure. However, this bias is smaller than the inter-subject standard deviation, and the measurement accuracy is similar to the original test for normal-hearing listeners, which indicates that smart speakers may become a helpful addition for the screening of hearing deficits.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aciel Eshky|AUTHOR Aciel Eshky]], [[Manuel Sam Ribeiro|AUTHOR Manuel Sam Ribeiro]], [[Korin Richmond|AUTHOR Korin Richmond]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4100–4104&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audiovisual synchronisation is the task of determining the time offset between speech audio and a video recording of the articulators. In child speech therapy, audio and ultrasound videos of the tongue are captured using instruments which rely on hardware to synchronise the two modalities at recording time. Hardware synchronisation can fail in practice, and no mechanism exists to synchronise the signals post hoc. To address this problem, we employ a two-stream neural network which exploits the correlation between the two modalities to find the offset. We train our model on recordings from 69 speakers, and show that it correctly synchronises 82.9% of test utterances from unseen therapy sessions and unseen speakers, thus considerably reducing the number of utterances to be manually synchronised. An analysis of model performance on the test utterances shows that directed phone articulations are more difficult to automatically synchronise compared to utterances containing natural variation in speech such as words, sentences, or conversations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yilin Pan|AUTHOR Yilin Pan]], [[Bahman Mirheidari|AUTHOR Bahman Mirheidari]], [[Markus Reuber|AUTHOR Markus Reuber]], [[Annalena Venneri|AUTHOR Annalena Venneri]], [[Daniel Blackburn|AUTHOR Daniel Blackburn]], [[Heidi Christensen|AUTHOR Heidi Christensen]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4105–4109&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Picture description tasks are used for the detection of cognitive decline associated with Alzheimer’s disease (AD). Recent years have seen work on automatic AD detection in picture descriptions based on acoustic and word-based analysis of the speech. These methods have shown some success but lack an ability to capture any higher level effects of cognitive decline on the patient’s language. In this paper, we propose a novel model that encompasses both the hierarchical and sequential structure of the description and detect its informative units by attention mechanism. Automatic speech recognition (ASR) and punctuation restoration are used to transcribe and segment the data. Using the DementiaBank database of people with AD as well as healthy controls (HC), we obtain an F-score of 84.43% and 74.37% when using manual and automatic transcripts respectively. We further explore the effect of adding additional data (a total of 33 descriptions collected using a ‘ digital doctor’ ) during model training, and increase the F-score when using ASR transcripts to 76.09%. This outperforms baseline models, including bidirectional LSTM and bidirectional hierarchical neural network without an attention mechanism, and demonstrate that the use of hierarchical models with attention mechanism improves the AD/HC discrimination performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Venkata Srikanth Nallanthighal|AUTHOR Venkata Srikanth Nallanthighal]]^^1^^, [[Aki Härmä|AUTHOR Aki Härmä]]^^1^^, [[Helmer Strik|AUTHOR Helmer Strik]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Philips, The Netherlands; ^^2^^Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4110–4114&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we show the first results on the estimation of breathing signal from conversational speech using deep learning algorithms. Respiratory diseases such as COPD, asthma, and respiratory infections are common in the elderly population and patients in health care monitoring and medical alert services in general. In this work, we compare algorithms for the estimation of a known respiratory target signal, measured by respiratory belt transducers positioned across the rib cage and abdomen, from conversational speech. We demonstrate the estimation of the respiratory signal from speech using convolutional and recurrent neural networks. The estimated breathing pattern gives respiratory rate, breathing capacity and thus might provide indications of the pathological condition of the speaker. Evaluation of our model on our database of breathing signal and speech yielded a sensitivity of 91.2% for breath event detection and a mean absolute error of 1.01 breaths per minute for breathing rate estimation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fadi Biadsy|AUTHOR Fadi Biadsy]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]], [[Dimitri Kanvesky|AUTHOR Dimitri Kanvesky]], [[Ye Jia|AUTHOR Ye Jia]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4115–4119&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe Parrotron, an end-to-end-trained speech-to-speech conversion model that maps an input spectrogram directly to another spectrogram, without utilizing any intermediate discrete representation. The network is composed of an encoder, spectrogram and phoneme decoders, followed by a vocoder to synthesize a time-domain waveform. We demonstrate that this model can be trained to normalize speech from any speaker regardless of accent, prosody, and background noise, into the voice of a  single canonical target speaker with a fixed accent and consistent articulation and prosody. We further show that this normalization model can be adapted to normalize highly atypical speech from a deaf speaker, resulting in significant improvements in intelligibility and naturalness, measured via a speech recognizer and listening tests. Finally, demonstrating the utility of this model on other speech tasks, we show that the same model architecture can be trained to perform a speech separation task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shansong Liu|AUTHOR Shansong Liu]]^^1^^, [[Shoukang Hu|AUTHOR Shoukang Hu]]^^1^^, [[Yi Wang|AUTHOR Yi Wang]]^^2^^, [[Jianwei Yu|AUTHOR Jianwei Yu]]^^1^^, [[Rongfeng Su|AUTHOR Rongfeng Su]]^^3^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^University of Cambridge, UK; ^^3^^Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4120–4124&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) for disordered speech is a challenging task. People with speech disorders such as dysarthria often have physical disabilities, leading to severe degradation of speech quality, highly variable voice characteristics and large mismatch against normal speech. It is also difficult to record large amounts of high quality audio-visual data for developing audio-visual speech recognition (AVSR) systems. To address these issues, a novel Bayesian gated neural network (BGNN) based AVSR approach is proposed. Speaker level Bayesian gated control of contributions from visual features allows a more robust fusion of audio and video modality. A posterior distribution over the gating parameters is used to model their uncertainty given limited and variable disordered speech data. Experiments conducted on the UASpeech dysarthric speech corpus suggest the proposed BGNN AVSR system consistently outperforms state-of-the-art deep neural network (DNN) baseline ASR and AVSR systems by 4.5% and 4.7% absolute (14.9% and 15.5% relative) in word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Konstantinos Vougioukas|AUTHOR Konstantinos Vougioukas]], [[Pingchuan Ma|AUTHOR Pingchuan Ma]], [[Stavros Petridis|AUTHOR Stavros Petridis]], [[Maja Pantic|AUTHOR Maja Pantic]]
</p><p class="cpabstractcardaffiliationlist">Imperial College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4125–4129&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech is a means of communication which relies on both audio and visual information. The absence of one modality can often lead to confusion or misinterpretation of information. In this paper we present an end-to-end temporal model capable of directly synthesising audio from silent video, without needing to transform to-and-from intermediate features. Our proposed approach, based on GANs is capable of producing natural sounding, intelligible speech which is synchronised with the video. The performance of our model is evaluated on the GRID dataset for both speaker dependent and speaker independent scenarios. To the best of our knowledge this is the first method that maps video directly to raw audio and the first to produce intelligible speech when tested on previously unseen speakers. We evaluate the synthesised audio not only based on the sound quality but also on the accuracy of the spoken words.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[S. Zahra Razavi|AUTHOR S. Zahra Razavi]], [[Benjamin Kane|AUTHOR Benjamin Kane]], [[Lenhart K. Schubert|AUTHOR Lenhart K. Schubert]]
</p><p class="cpabstractcardaffiliationlist">University of Rochester, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4140–4144&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we address the problem of turn-taking prediction in open-ended communication between humans and dialogue agents. In a non-task-oriented interaction with dialogue agents, user inputs are apt to be grammatically and lexically diverse, and at times quite lengthy, with many pauses; all of this makes it harder for the system to decide when to jump in. As a result recent turn-taking predictors designed for specific tasks or for human-human interactions will scarcely be applicable. In this paper we focus primarily on the predictive potential of linguistic features, including lexical, syntactic and semantic features, as well as timing features, whereas past work has typically placed more emphasis on prosodic features, sometimes supplemented with non-verbal behaviors such as gaze and head movements. The basis for our study is a corpus of 15 “friendly” dialogues between humans and a (Wizard-of-Oz enabled) virtual dialogue agent, annotated for pause times and types. The model of turn-taking obtained by supervised learning predicts turn-taking points with increasing accuracy using only prosodic features, only timing and speech rate features, only lexical and syntactic features, and achieves state-of-the art performance with a mixture-of-experts model combining these features along with a semantic criterion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ming-Hsiang Su|AUTHOR Ming-Hsiang Su]], [[Chung-Hsien Wu|AUTHOR Chung-Hsien Wu]], [[Yi Chang|AUTHOR Yi Chang]]
</p><p class="cpabstractcardaffiliationlist">National Cheng Kung University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4185–4189&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study proposes an approach to follow-up question generation based on a populated domain ontology in a conversational interview coaching system. The purpose of this study is to generate the follow-up questions which are more related to the meaning beyond the literal content in the user’s answer based on the background knowledge in a populated domain ontology. Firstly, a convolutional neural tensor network (CNTN) was applied for selecting a key sentence from the user answer. Secondly, the neural tensor network (NTN) was used to model the relationship between the subjects and objects in the resource description framework (RDF) triple, defined as (subject, predicate, object), in each predicate from the ConceptNet for domain ontology population. The words in the key sentence were then used to retrieve relevant triples from the domain ontology for filling into the slots in the question templates to generate potential follow-up questions. Finally, the CNTN-based sentence matching model was employed to choose the one most related to the answer sentence as the final follow-up question. This study used 5-fold cross-validation for performance evaluation. The experimental results showed the generation performance in the proposed model was higher than the traditional method. The performance of key sentence selection model achieved 81.94%, and the sentence matching model achieved 92.28%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Frédéric Béchet|AUTHOR Frédéric Béchet]]^^1^^, [[Christian Raymond|AUTHOR Christian Raymond]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIS (UMR 7020), France; ^^2^^IRISA (UMR 6074), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4145–4149&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Empirical evaluation is nowadays the main evaluation paradigm in Natural Language Processing for assessing the relevance of a new machine-learning based model. If large corpora are available for tasks such as Automatic Speech Recognition, this is not the case for other tasks such as Spoken Language Understanding (SLU), consisting in translating spoken transcriptions into a formal representation often based on semantic frames. Corpora such as ATIS or SNIPS are widely used to compare systems, however differences in performance among systems are often very small, not statistically significant, and can be produced by biases in the data collection or the annotation scheme, as we presented on the ATIS corpus (“Is ATIS too shallow?, IS2018”). We propose in this study a new methodology for assessing the relevance of an SLU corpus. We claim that only taking into account systems performance does not provide enough insight about what is covered by current state-of-the-art models and what is left to be done. We apply our methodology on a set of 4 SLU systems and 5 benchmark corpora (ATIS, SNIPS, M2M, MEDIA) and automatically produce several indicators assessing the relevance (or not) of each corpus for benchmarking SLU models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chaoran Liu|AUTHOR Chaoran Liu]], [[Carlos Ishi|AUTHOR Carlos Ishi]], [[Hiroshi Ishiguro|AUTHOR Hiroshi Ishiguro]]
</p><p class="cpabstractcardaffiliationlist">ATR HIL, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4150–4154&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequential data such as speech and dialogs are usually modeled by Recurrent Neural Networks (RNN) and derivatives since the information can travel through time with such architecture. However, disadvantages exist with the use of RNNs, including the limited depth of neural networks and the GPU’s unfriendly training process.

Estimating the timing of turn-taking is a critical feature of dialog systems. Such tasks require knowledge about past dialog contexts and have been modeled using RNNs in several studies. In this paper, we propose a non-RNN model for the timing estimation of turn-taking in dialogs. The proposed model takes lexical and acoustic features as its input to predict a turn’s end. We conducted experiments on four types of Japanese conversation datasets and show that with proper neural network designs, the long-term information in a dialog could propagate without a recurrent structure. The proposed model outperformed canonical RNN-based architectures on a turn-taking estimation task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andrei C. Coman|AUTHOR Andrei C. Coman]]^^1^^, [[Koichiro Yoshino|AUTHOR Koichiro Yoshino]]^^2^^, [[Yukitoshi Murase|AUTHOR Yukitoshi Murase]]^^2^^, [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]^^2^^, [[Giuseppe Riccardi|AUTHOR Giuseppe Riccardi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Trento, Italy; ^^2^^NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4155–4159&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a human-machine dialog scenario, deciding the appropriate time for the machine to take the turn is an open research problem. In contrast, humans engaged in conversations are able to timely decide when to interrupt the speaker for competitive or non-competitive reasons. In state-of-the-art  turn-by-turn dialog systems the decision on the next dialog action is taken at the end of the utterance. In this paper, we propose a  token-by-token prediction of the dialog state from incremental transcriptions of the user utterance. To identify the point of maximal understanding in an ongoing utterance, we a) implement an incremental Dialog State Tracker which is updated on a token basis (iDST) b) re-label the Dialog State Tracking Challenge 2 (DSTC2) dataset and c) adapt it to the incremental turn-taking experimental scenario. The re-labeling consists of assigning a binary value to each token in the user utterance that allows to identify the appropriate point for taking the turn. Finally, we implement an incremental Turn Taking Decider (iTTD) that is trained on these new labels for the turn-taking decision. We show that the proposed model can achieve a better performance compared to a deterministic handcrafted turn-taking algorithm.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Feng-Guang Su|AUTHOR Feng-Guang Su]], [[Aliyah R. Hsu|AUTHOR Aliyah R. Hsu]], [[Yi-Lin Tuan|AUTHOR Yi-Lin Tuan]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4160–4164&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Personalized responses are essential for having an informative and human-like conversation. Because it is difficult to collect a large amount of dialogues involved with specific speakers, it is desirable that chatbot can learn to generate personalized responses simply from monologues of individuals. In this paper, we propose a novel personalized dialogue generation method which reduces the training data requirement to dialogues without speaker information and monologues of every target speaker. In the proposed approach, a generative adversarial network ensures the responses containing recognizable personal characteristics of the target speaker, and a backward SEQ2SEQ model reconstructs the input message for keeping the coherence of the generated responses. The proposed model demonstrates its flexibility to respond to open-domain conversations, and the experimental results show that the proposed method performs favorably against prior work in coherence, personality classification, and human evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mattias Heldner|AUTHOR Mattias Heldner]]^^1^^, [[Marcin Włodarczak|AUTHOR Marcin Włodarczak]]^^1^^, [[Štefan Beňuš|AUTHOR Štefan Beňuš]]^^2^^, [[Agustín Gravano|AUTHOR Agustín Gravano]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Stockholm University, Sweden; ^^2^^UKF, Slovak Republic; ^^3^^UBA, Argentina</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4165–4169&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work revisits the idea that voice quality dynamics (VQ) contributes to conveying pragmatic distinctions, with two case studies to further test this idea. First, we explore VQ as a turn-taking cue, and then as a cue for distinguishing between different functions of affirmative cue words. We employ acoustic VQ measures claimed to be better suited for continuous speech than those in own previous work. Both cases indicate that the degree of periodicity (as measured by CPPS) is indeed relevant in the production of the different pragmatic functions. In particular, turn-yielding is characterized by lower periodicity, sometimes accompanied by presence of creaky voice. Periodicity also distinguishes between backchannels, agreements and acknowledgements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kohei Hara|AUTHOR Kohei Hara]]^^1^^, [[Koji Inoue|AUTHOR Koji Inoue]]^^1^^, [[Katsuya Takanashi|AUTHOR Katsuya Takanashi]]^^1^^, [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Kyoto University, Japan; ^^2^^Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4170–4174&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We address turn-taking prediction in which spoken dialogue systems predict when to take the conversational floor. In natural conversations, many turn-taking decisions are arbitrary and subjective. In this study, we propose taking into account the concept of the transition relevance place (TRP) for turn-taking prediction. TRP is defined as a timing when the current speaking turn can be completed and other participants are able to take the turn. We conducted annotation of TRP on a human-robot dialogue corpus, ensuring the objectivity of this annotation among annotators. The proposed turn-taking prediction model adopts a two-step approach that detects TRP at first and then predicts a turn-taking event if TRP is detected. Experimental evaluations demonstrate that the proposed model improves the accuracy of turn-taking prediction by incorporating TRP detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Divesh Lala|AUTHOR Divesh Lala]], [[Shizuka Nakamura|AUTHOR Shizuka Nakamura]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]
</p><p class="cpabstractcardaffiliationlist">Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4175–4179&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Turn-taking for spoken dialogue systems is still below the speed of real human conversation due to latency in speech and natural language processing, but fillers can be used by the system to take the turn more quickly without sacrificing naturalness. In this work we analyze fillers which are used at the start of turns in conversation and determine a window of appropriate times to use them. We analyze a human-robot conversation corpus to obtain an average response time of the fillers, and find that this differs according to the filler’s form. We then conduct a subjective experiment in which participants dynamically change the timing of responses with and without fillers to designate a window of acceptable response timings. Our results show that the most suitable response time is around 200–500ms after the previous speaker has finished their turn. We also find differences in timing windows depending on existence of a filler used to begin the turn and its particular form. The implications of these results on the design of conversational systems are also discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]
</p><p class="cpabstractcardaffiliationlist">Hitachi, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4180–4184&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Response obligation detection, which determines whether a dialogue robot has to respond to a detected utterance, is an important function for intelligent dialogue robots. Some studies have tackled this problem; however, they narrow their applicability by impractical assumptions or use of scenario-specific features. Some attempts have been made to widen the applicability by avoiding the use of text modality, which is said to be highly domain dependent, but it decreases the detection accuracy. In this paper, we propose a novel multimodal response obligation detector, which uses visual, audio, and text information for highly-accurate detection, with its unsupervised online domain adaptation to solve the domain dependency problem. Our domain adaptation consists of the weights adaptation of the logistic regression for every modality and an embedding assignment for new words to cope with the high domain dependency of text modality. Experimental results on the dataset collected at a station and commercial building showed that our method achieved high response obligation detection accuracy and was able to handle domain change automatically.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Trang Tran|AUTHOR Trang Tran]]^^1^^, [[Jiahong Yuan|AUTHOR Jiahong Yuan]]^^2^^, [[Yang Liu|AUTHOR Yang Liu]]^^2^^, [[Mari Ostendorf|AUTHOR Mari Ostendorf]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Washington, USA; ^^2^^LAIX, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4190–4194&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The differences in written text and conversational speech are substantial; previous parsers trained on treebanked text have given very poor results on spontaneous speech. For spoken language, the mismatch in style also extends to prosodic cues, though it is less well understood. This paper re-examines the use of written text in parsing speech in the context of recent advances in neural language processing. We show that neural approaches facilitate using written text to improve parsing of spontaneous speech, and that prosody further improves over this state-of-the-art result. Further, we find an asymmetric degradation from read vs. spontaneous mismatch, with spontaneous speech more generally useful for training parsers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mittul Singh|AUTHOR Mittul Singh]]^^1^^, [[Sami Virpioja|AUTHOR Sami Virpioja]]^^2^^, [[Peter Smit|AUTHOR Peter Smit]]^^1^^, [[Mikko Kurimo|AUTHOR Mikko Kurimo]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^University of Helsinki, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4235–4239&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In spoken Keyword Search, the query may contain out-of-vocabulary (OOV) words not observed when training the speech recognition system. Using subword language models (LMs) in the first-pass recognition makes it possible to recognize the OOV words, but even the subword n-gram LMs suffer from data sparsity. Recurrent Neural Network (RNN) LMs alleviate the sparsity problems but are not suitable for first-pass recognition as such. One way to solve this is to approximate the RNNLMs by back-off n-gram models. In this paper, we propose to interpolate the conventional n-gram models and the RNNLM approximation for better OOV recognition. Furthermore, we develop a new RNNLM approximation method suitable for subword units: It produces variable-order n-grams to include long-span approximations and considers also n-grams that were not originally observed in the training corpus. To evaluate these models on OOVs, we setup Arabic and Finnish Keyword Search tasks concentrating only on OOV words. On these tasks, interpolating the baseline RNNLM approximation and a conventional LM outperforms the conventional LM in terms of the Maximum TermWeighted Value for single-character subwords. Moreover, replacing the baseline approximation with the proposed method achieves the best performance on both multi- and single-character subwords.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takashi Maekaku|AUTHOR Takashi Maekaku]], [[Yusuke Kida|AUTHOR Yusuke Kida]], [[Akihiko Sugiyama|AUTHOR Akihiko Sugiyama]]
</p><p class="cpabstractcardaffiliationlist">Yahoo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4240–4244&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a novel method for simultaneous detection and localization of a wake-up word using multi-task learning of the duration and endpoint. An onset of the wake-up word is estimated by going back in time by an estimated duration of the wake-up word from an estimated endpoint. Accurate endpoint estimation is achieved by training the network to fire only at the endpoint in contrast to the entire wake-up word. The accurate endpoint naturally leads to an accurate onset, when it is used as a basis to calculate an onset with an estimated duration that reflects the whole acoustic information over the entire wake-up word. Experimental results with real-environment data show that a relative improvement in accuracy of 41% for onset estimation and 38% for endpoint estimation are achieved compared to a baseline method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ankita Pasad|AUTHOR Ankita Pasad]]^^1^^, [[Bowen Shi|AUTHOR Bowen Shi]]^^1^^, [[Herman Kamper|AUTHOR Herman Kamper]]^^2^^, [[Karen Livescu|AUTHOR Karen Livescu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^TTIC, USA; ^^2^^Stellenbosch University, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4195–4199&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent work has shown that speech paired with images can be used to learn semantically meaningful speech representations even without any textual supervision. In real-world low-resource settings, however, we often have access to some transcribed speech. We study whether and how visual grounding is useful in the presence of varying amounts of textual supervision. In particular, we consider the task of semantic speech retrieval in a low-resource setting. We use a previously studied data set and task, where models are trained on images with spoken captions and evaluated on human judgments of semantic relevance. We propose a multitask learning approach to leverage both visual and textual modalities, with visual supervision in the form of keyword probabilities from an external tagger. We find that visual grounding is helpful even in the presence of textual supervision, and we analyze this effect over a range of sizes of transcribed data sets. With ~5 hours of transcribed speech, we obtain 23% higher average precision when also using visual supervision.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinhao Wang|AUTHOR Xinhao Wang]], [[Su-Youn Yoon|AUTHOR Su-Youn Yoon]], [[Keelan Evanini|AUTHOR Keelan Evanini]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Yao Qian|AUTHOR Yao Qian]]
</p><p class="cpabstractcardaffiliationlist">Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4200–4204&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Test takers in high-stakes speaking assessments may try to inflate their scores by providing a response to a question that they are more familiar with instead of the question presented in the test; such a response is referred to as an off-topic spoken response. The presence of these responses can make it difficult to accurately evaluate a test taker’s speaking proficiency, and thus may reduce the validity of assessment scores. This study aims to address this problem by building an automatic system to detect off-topic spoken responses which can inform the downstream automated scoring pipeline. We propose an innovative method to interpret the comparison between a test response and the question used to elicit it as a similarity grid, and then apply very deep convolutional neural networks to determine different degrees of topic relevance. In this study, Inception networks were applied to this task, and the experimental results demonstrate the effectiveness of the proposed method. Our system achieves an F1-score of 92.8% on the class of off-topic responses, which significantly outperforms a baseline system using a range of word embedding-based similarity metrics (F1-score = 85.5%).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anna Piunova|AUTHOR Anna Piunova]], [[Eugen Beck|AUTHOR Eugen Beck]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4205–4209&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Postprocessing of confidence scores in keyword search (KWS) task is known to be an efficient way of improving retrieval performance. In this paper, we extend the existing graph-based re-ranking algorithm proposed for KWS score calibration. We replace the originally used Dynamic TimeWarping (DTW) distance measure between prospective hits with distances between their Acoustic Word Embeddings (AWEs) learned from Neural Networks. We argue that AWEs trained to discriminate between the same and different words should improve the graph-based re-ranking performance. Experimental results on two languages from IARPA Babel program show that our approach outperforms the DTW and improves the baseline KWS result between 3.0–7.5% relative on the Maximum Term Weighted Value (MTWV) measure. It was previously shown, that enhancing detection lists with keyword exemplars given high confidence, improved the algorithm performance. We additionally expanded the detection lists with negative query exemplars and observed further improvements in MTWV.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yael Segal|AUTHOR Yael Segal]], [[Tzeviya Sylvia Fuchs|AUTHOR Tzeviya Sylvia Fuchs]], [[Joseph Keshet|AUTHOR Joseph Keshet]]
</p><p class="cpabstractcardaffiliationlist">Bar-Ilan University, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4210–4214&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose to apply object detection methods from the vision domain on the speech recognition domain, by treating audio fragments as objects. More specifically, we present SpeechYOLO, which is inspired by the YOLO algorithm [1] for object detection in images. The goal of SpeechYOLO is to localize boundaries of utterances within the input signal, and to correctly classify them. Our system is composed of a convolutional neural network, with a simple least-mean-squares loss function. We evaluated the system on several keyword spotting tasks, that include corpora of read speech and spontaneous speech. Our system compares favorably with other algorithms trained for both localization and classification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alp Öktem|AUTHOR Alp Öktem]]^^1^^, [[Mireia Farrús|AUTHOR Mireia Farrús]]^^2^^, [[Antonio Bonafonte|AUTHOR Antonio Bonafonte]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Col·lectivaT, Spain; ^^2^^Universitat Pompeu Fabra, Spain; ^^3^^Universitat Politècnica de Catalunya, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4215–4219&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Dubbing is a type of audiovisual translation where dialogues are translated and enacted so that they give the impression that the media is in the target language. It requires a careful alignment of dubbed recordings with the lip movements of performers in order to achieve visual coherence. In this paper, we deal with the specific problem of prosodic phrase synchronization within the framework of machine dubbing. Our methodology exploits the attention mechanism output in neural machine translation to find plausible phrasing for the translated dialogue lines and then uses them to condition their synthesis. Our initial work in this field records comparable speech rate ratio to professional dubbing translation, and improvement in terms of lip-syncing of long dialogue lines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christina Tånnander|AUTHOR Christina Tånnander]], [[Per Fallgren|AUTHOR Per Fallgren]], [[Jens Edlund|AUTHOR Jens Edlund]], [[Joakim Gusafsson|AUTHOR Joakim Gusafsson]]
</p><p class="cpabstractcardaffiliationlist">KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4220–4224&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an experimental platform for making voice likability assessments that are decoupled from individual voices, and instead capture voice characteristics over groups of speakers. We employ methods that we have previously used for other purposes to create the Cocktail platform, where respondents navigate in a voice buzz made up of about 400 voices on a touch screen. They then choose the location where they find the voice buzz most pleasant. Since there is no image or message on the screen, the platform can be used by visually impaired people, who often need to rely on spoken text, on the same premises as seeing people. In this paper, we describe the platform and its motivation along with our analysis method. We conclude by presenting two experiments in which we verify that the platform behaves as expected: one simple sanity test, and one experiment with voices grouped according to their mean pitch variance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhi Chen|AUTHOR Zhi Chen]], [[Wu Guo|AUTHOR Wu Guo]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4225–4229&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, the deep learning framework is applied in text clustering, an unsupervised task in natural language processing (NLP). Since there are no predefined labels available for text clustering, the deep neural network is trained in a pseudo-supervised fashion with labels generated from pre-clustering step. To address the wrong labelling problem from pre-clustering step, we adopt soft pseudo-labels instead of hard one-hot ones, and these labels are dynamically updated during training. Besides, we build a document-level attention over multiple documents based on dynamic soft pseudo-labels to further reduce the impact of the wrong labels. Experimental results on three public databases show that our model outperforms the state-of-the-art systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nguyen Bach|AUTHOR Nguyen Bach]], [[Fei Huang|AUTHOR Fei Huang]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4230–4234&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes BiLSTM-based models to disfluency detection in speech transcripts using residual BiLSTM blocks, self-attention, and noisy training approach. Our best model not only surpasses BERT in 4 non-Switchboard test sets, but also is 20 times smaller than the BERT-based model [1]. Thus, we demonstrate that strong performance can be achieved without extensively use of very large training data. In addition, we show that it is possible to be robust across data sets with noisy training approach in which we found insertion is the most useful noise for augmenting training data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ching-Hua Lee|AUTHOR Ching-Hua Lee]], [[Kuan-Lin Chen|AUTHOR Kuan-Lin Chen]], [[fred harris|AUTHOR fred harris]], [[Bhaskar D. Rao|AUTHOR Bhaskar D. Rao]], [[Harinath Garudadri|AUTHOR Harinath Garudadri]]
</p><p class="cpabstractcardaffiliationlist">University of California at San Diego, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4245–4249&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic feedback control continues to be a challenging problem due to the emerging form factors in advanced hearing aids (HAs) and hearables. In this paper, we present a novel use of well-known all-pass filters in a network to perform frequency warping that we call “freping.” Freping helps in breaking the Nyquist stability criterion and improves adaptive feedback cancellation (AFC). Based on informal subjective assessments, distortions due to freping are fairly benign. While common objective metrics like the perceptual evaluation of speech quality (PESQ) and the hearing-aid speech quality index (HASQI) may not adequately capture distortions due to freping and acoustic feedback artifacts from a perceptual perspective, they are still instructive in assessing the proposed method. We demonstrate quality improvements with freping for a basic AFC (PESQ: 2.56 to 3.52 and HASQI: 0.65 to 0.78) at a gain setting of 20; and an advanced AFC (PESQ: 2.75 to 3.17 and HASQI: 0.66 to 0.73) for a gain of 30. From our investigations, freping provides larger improvement for basic AFC, but still improves overall system performance for many AFC approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rongzhi Gu|AUTHOR Rongzhi Gu]]^^1^^, [[Lianwu Chen|AUTHOR Lianwu Chen]]^^2^^, [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]]^^3^^, [[Jimeng Zheng|AUTHOR Jimeng Zheng]]^^2^^, [[Yong Xu|AUTHOR Yong Xu]]^^3^^, [[Meng Yu|AUTHOR Meng Yu]]^^3^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Yuexian Zou|AUTHOR Yuexian Zou]]^^1^^, [[Dong Yu|AUTHOR Dong Yu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^Tencent, China; ^^3^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4290–4294&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The recent exploration of deep learning for supervised speech separation has significantly accelerated the progress on the multi-talker speech separation problem. The multi-channel approaches have attracted much research attention due to the benefit of spatial information. In this paper, integrated with the power spectra and inter-channel spatial features at the input level, we explore to leverage directional features, which imply the speaker source from the desired target direction, for target speaker separation. In addition, we incorporate an attention mechanism to dynamically tune the model’s attention to the reliable input features to alleviate spatial ambiguity problem when multiple speakers are closely located. We demonstrate, on the far-field WSJ0 2-mix dataset, that our proposed approach significantly improves the performance of speech separation against the baseline single-channel and multi-channel speech separation methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Triantafyllos Afouras|AUTHOR Triantafyllos Afouras]], [[Joon Son Chung|AUTHOR Joon Son Chung]], [[Andrew Zisserman|AUTHOR Andrew Zisserman]]
</p><p class="cpabstractcardaffiliationlist">University of Oxford, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4295–4299&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/3114" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Our objective is an audio-visual model for separating a single speaker from a mixture of sounds such as other speakers and background noise. Moreover, we wish to hear the speaker even when the visual cues are temporarily absent due to occlusion.

To this end we introduce a deep audio-visual speech enhancement network that is able to separate a speaker’s voice by conditioning on both the speaker’s lip movements and/or a representation of their voice. The voice representation can be obtained by either (i) enrollment, or (ii) by self-enrollment — learning the representation on-the-fly given sufficient unobstructed visual input. The model is trained by blending audios, and by introducing artificial occlusions around the mouth region that prevent the visual modality from dominating.

The method is speaker-independent, and we demonstrate it on real examples of speakers unheard (and unseen) during training. The method also improves over previous models in particular for cases of occlusion in the visual modality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amin Fazel|AUTHOR Amin Fazel]], [[Mostafa El-Khamy|AUTHOR Mostafa El-Khamy]], [[Jungwon Lee|AUTHOR Jungwon Lee]]
</p><p class="cpabstractcardaffiliationlist">Samsung, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4250–4254&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic echo cancellation or suppression methods aim to suppress the echo originated from acoustic coupling between loudspeakers and microphones. Conventional approaches estimate echo using adaptive filtering. Due to the nonlinearities in the acoustic path of far-end signal, further post-processing is needed to attenuate these nonlinear components. In this paper, we propose a novel architecture based on deep gated recurrent neural networks to estimate the near-end signal from the microphone signal. The proposed architecture is trained using multitask learning to learn the auxiliary task of estimating the echo in order to improve the main task of estimating the clean near-end speech signal. Experimental results show that our proposed deep learning based method outperforms the existing methods for unseen speakers in terms of the echo return loss enhancement (ERLE) for single-talk periods and the perceptual evaluation of speech quality (PESQ) score for double-talk periods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hao Zhang|AUTHOR Hao Zhang]], [[Ke Tan|AUTHOR Ke Tan]], [[DeLiang Wang|AUTHOR DeLiang Wang]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4255–4259&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We formulate acoustic echo and noise cancellation jointly as deep learning based speech separation, where near-end speech is separated from a single microphone recording and sent to the far end. We propose a causal system to address this problem, which incorporates a convolutional recurrent network (CRN) and a recurrent network with long short-term memory (LSTM). The system is trained to estimate the real and imaginary spectrograms of near-end speech and detect the activity of near-end speech from the microphone signal and far-end signal. Subsequently, the estimated real and imaginary spectrograms are used to separate the near-end signal, hence removing echo and noise. The trained near-end speech detector is employed to further suppress residual echo and noise. Evaluation results show that the proposed method effectively removes acoustic echo and background noise in the presence of nonlinear distortions for both simulated and measured room impulse responses (RIRs). Additionally, the proposed method generalizes well to untrained noises, RIRs and speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Charlotte Sørensen|AUTHOR Charlotte Sørensen]]^^1^^, [[Jesper B. Boldt|AUTHOR Jesper B. Boldt]]^^2^^, [[Mads G. Christensen|AUTHOR Mads G. Christensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalborg University, Denmark; ^^2^^GN Hearing, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4260–4264&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, research into objective speech intelligibility measures has gained increased interest as a tool to optimize speech enhancement algorithms. While most intelligibility measures are intrusive, i.e., they require a clean reference signal, this is rarely available in real-time applications. This paper proposes two non-intrusive intelligibility measures, which allow using the intrusive short-time objective intelligibility (STOI) measure without requiring access to the clean signal. Instead, a reference signal is obtained from the degraded signal using either a fixed or an adaptive harmonic spatial filter. This reference signal is then used as input to STOI. The experimental results show a high correlation between both proposed non-intrusive speech intelligibility measures and the original intrusively computed STOI scores.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nursadul Mamun|AUTHOR Nursadul Mamun]], [[Soheil Khorram|AUTHOR Soheil Khorram]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4265–4269&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attempts to develop speech enhancement algorithms with improved speech intelligibility for cochlear implant (CI) users have met with limited success. To improve speech enhancement methods for CI users, we propose to perform speech enhancement in a cochlear filter-bank feature space, a feature-set specifically designed for CI users based on CI auditory stimuli. We leverage a convolutional neural network (CNN) to extract both stationary and non-stationary components of environmental acoustics and speech. We propose three CNN architectures: (1) vanilla CNN that directly generates the enhanced signal; (2) spectral-subtraction-style CNN (SS-CNN) that first predicts noise and then generates the enhanced signal by subtracting noise from the noisy signal; (3) Wiener-style CNN (Wiener-CNN) that generates an optimal mask for suppressing noise. An important problem of the proposed networks is that they introduce considerable delays, which limits their real-time application for CI users. To address this, this study also considers causal variations of these networks. Our experiments show that the proposed networks (both causal and non-causal forms) achieve significant improvement over existing baseline systems. We also found that causal Wiener-CNN outperforms other networks, and leads to the best overall envelope coefficient measure (ECM). The proposed algorithms represent a viable option for implementation on the CCi-MOBILE research platform as a pre-processor for CI users in naturalistic environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Charlotte Sørensen|AUTHOR Charlotte Sørensen]]^^1^^, [[Jesper B. Boldt|AUTHOR Jesper B. Boldt]]^^2^^, [[Mads G. Christensen|AUTHOR Mads G. Christensen]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalborg University, Denmark; ^^2^^GN Hearing, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4270–4274&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, objective measures of speech intelligibility have gained increasing interest. However, most speech intelligibility metrics require a clean reference signal, which is often not available in real-life applications. In a recent publication, we proposed a method, the Non-Intrusive Codebook-based Short-Time Objective Intelligibility (NIC-STOI) metric, which allows using an intrusive method without requiring access to the clean signal. The statistics of the reference signal is estimated as a combination of predefined codebooks that best fit the degraded signal by modeling the speech and noisy spectra. In this paper, we perform additional validation of the NIC-STOI in more diverse noise condition as well as for speech processed non-linearly with binary masks, where it is shown to outperform existing non-intrusive metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kenichi Arai|AUTHOR Kenichi Arai]]^^1^^, [[Shoko Araki|AUTHOR Shoko Araki]]^^1^^, [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]]^^1^^, [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]^^1^^, [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]^^1^^, [[Katsuhiko Yamamoto|AUTHOR Katsuhiko Yamamoto]]^^2^^, [[Toshio Irino|AUTHOR Toshio Irino]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^Wakayama University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4275–4279&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ability of state-of-the-art automatic speech recognition (ASR) systems, which use deep neural networks (DNN), has recently been approaching that of human auditory systems. On the other hand, although measuring the intelligibility of enhanced speech signals is important for developing auditory algorithms and devices, the current measurement methods mainly rely on subjective experiments. Therefore, it would be preferable to employ an ASR system to predict the subjective speech intelligibility (SI) of enhanced speech. In this study, we evaluate the SI prediction performance of DNN-based ASR systems using phone accuracies. We found that an ASR system with multi-condition training achieves the best SI prediction accuracy for enhanced speech when compared with conventional methods (STOI, HASPI) and a recently proposed technique (GEDI). In addition, since our ASR system uses only a phone language model, it offers the advantage of being able to predict intelligibility independently of prior knowledge of words.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suliang Bu|AUTHOR Suliang Bu]]^^1^^, [[Yunxin Zhao|AUTHOR Yunxin Zhao]]^^1^^, [[Mei-Yuh Hwang|AUTHOR Mei-Yuh Hwang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Missouri, USA; ^^2^^Mobvoi, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4280–4284&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Accurate steering vectors (SV) are key to many beamformers. However, reliable SV is not easy to obtain. In this work, we investigate a novel method to identify and correct phase errors in SV for MVDR beamforming. Our idea stems from the linear relationship in the phase of a microphone component in narrowband SVs across frequency, as modeled by acoustic transfer function. We utilize this property and feedforward neural nets to make phase prediction for the microphone components in SVs, and use the predicted phase selectively for phase error correction and MVDR beamforming. Our method is robust to large fluctuations in phase spectrum wrapped within [-π, π]. We have evaluated our approach on CHiME-3 and obtained improved performances on both word error rate and short-time objective intelligibility in low reverberant acoustic environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hyeonseung Lee|AUTHOR Hyeonseung Lee]], [[Hyung Yong Kim|AUTHOR Hyung Yong Kim]], [[Woo Hyun Kang|AUTHOR Woo Hyun Kang]], [[Jeunghun Kim|AUTHOR Jeunghun Kim]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4285–4289&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes a novel waveform-level end-to-end model for multi-channel speech enhancement. The model first extracts sample-level speech embedding using channel-wise convolutional neural network (CNN) and compensates time-delays between the channels based on the embedding, resulting in time-aligned multi-channel signals. Then the signals are given as input of multi-channel enhancement extension of WaveUNet which directly outputs estimated clean speech waveform. The whole model is trained to minimize modified mean squared error (MSE), signal-to-distortion ratio (SDR) cost, and senone cross-entropy of back-end acoustic model at the same time. Evaluated on the CHiME-4 simulated set, the proposed system outperformed state-of-the-art generalized eigenvalue (GEV) beamformer in terms of perceptual evaluation of speech quality (PESQ) and SDR, and showed competitive results in short time objective intelligibility (STOI). Word-error-rates (WERs) of the system’s output on simulated sets were comparable to that of bidirectional long short-term memory (BLSTM) GEV beamformer. However, the system showed relatively high WERs on real sets, achieving relative error rate reduction (RERR) of 14.3% over noisy signal on real evaluation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ben Foley|AUTHOR Ben Foley]], [[Alina Rakhi|AUTHOR Alina Rakhi]], [[Nicholas Lambourne|AUTHOR Nicholas Lambourne]], [[Nicholas Buckeridge|AUTHOR Nicholas Buckeridge]], [[Janet Wiles|AUTHOR Janet Wiles]]
</p><p class="cpabstractcardaffiliationlist">University of Queensland, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4624–4625&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Elpis is a speech-to-text tool which has been designed to give language workers, including linguists and speech scientists, access to cutting-edge automatic speech recognition software, without the specialist training typically required to run these systems. Our presentation would demonstrate local and server-based versions of Elpis using sample data from the Abui (ISO 639: abz) language, about 17,000 speakers in Indonesia. Attendees would gain a sense of the benefits that a first-pass ASR transcription can bring to transcription workflows.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Martin Grůber|AUTHOR Martin Grůber]], [[Adam Chýlek|AUTHOR Adam Chýlek]], [[Jindřich Matoušek|AUTHOR Jindřich Matoušek]]
</p><p class="cpabstractcardaffiliationlist">University of West Bohemia, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4626–4627&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a web-based framework that improves and simplifies the design and the deployment of tasks that require human input. These tasks may include speech, text or image transcription, annotation and evaluation. The focus is on listening tests for the purpose of a speech synthesis quality assessment. The framework is quite flexible, i.e. many different types of tasks can be prepared and presented to participants. The participants can then work on the tasks via a user-friendly GUI and their responses are recorded in a database. The framework is ready to be integrated as an external task for Amazon Mechanical Turk but it can also be used as a stand-alone platform.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shen Huang|AUTHOR Shen Huang]]^^1^^, [[Bojie Hu|AUTHOR Bojie Hu]]^^1^^, [[Shan Huang|AUTHOR Shan Huang]]^^1^^, [[Pengfei Hu|AUTHOR Pengfei Hu]]^^1^^, [[Jian Kang|AUTHOR Jian Kang]]^^1^^, [[Zhiqiang Lv|AUTHOR Zhiqiang Lv]]^^1^^, [[Jinghao Yan|AUTHOR Jinghao Yan]]^^1^^, [[Qi Ju|AUTHOR Qi Ju]]^^1^^, [[Shiyin Kang|AUTHOR Shiyin Kang]]^^1^^, [[Deyi Tuo|AUTHOR Deyi Tuo]]^^1^^, [[Guangzhi Li|AUTHOR Guangzhi Li]]^^1^^, [[Nurmemet Yolwas|AUTHOR Nurmemet Yolwas]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, China; ^^2^^Xin Jiang University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4628–4629&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition for minority language is always behind main stream due to lack of resources. This work presents a system for simultaneous translation between Mandarin and major minority languages such as Uyghur, Tibetan in shape of speech, text and images. The general acoustic model is trained via factorized TDNN with lattice free MMI criteria using mixed-units based lexicon model. For each specific language, acoustic model is trained by multi-task mix-lingual modeling with shared bottleneck layers followed by transfer learning. Besides, the system also supports state-of-the-art OCR, TTS, and machine translation, by which language information will be real-time translated, punctuated and pronounced. The machine translation behind the system gets a high rank in WMT 18 Mandarin-English and CWMT 18 minority language translation task. The system has integrated into a micro-app at WeChat and can facilitate communication between Mandarin and Minority languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Erinc Dikici|AUTHOR Erinc Dikici]], [[Gerhard Backfried|AUTHOR Gerhard Backfried]], [[Jürgen Riedler|AUTHOR Jürgen Riedler]]
</p><p class="cpabstractcardaffiliationlist">SAIL LABS Technology, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4630–4631&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In today’s attention-driven news economy, rapid changes of topics and events go hand in hand with rapid changes of vocabulary and of language use. ASR systems aimed at transcribing contents pertaining to this fluid media landscape need to keep up-to-date in a continuous and dynamic manner. Static models, potentially created a long time ago, are hopelessly outdated within a short period of time. The frequent changes in vocabulary and wording need to be reflected in the models employed for optimal performance of transcription if one does not want to risk falling behind. In this demonstration paper we present the audio processing capabilities of the SAIL LABS Media Mining Indexer, and the CAVA Framework allowing semi-automatic and periodic updates of the ASR vocabulary and language model from relevant and new data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nagendra Kumar Goel|AUTHOR Nagendra Kumar Goel]], [[Mousmita Sarma|AUTHOR Mousmita Sarma]], [[Saikiran Valluri|AUTHOR Saikiran Valluri]], [[Dharmeshkumar Agrawal|AUTHOR Dharmeshkumar Agrawal]], [[Steve Braich|AUTHOR Steve Braich]], [[Tejendra Singh Kuswah|AUTHOR Tejendra Singh Kuswah]], [[Zikra Iqbal|AUTHOR Zikra Iqbal]], [[Surbhi Chauhan|AUTHOR Surbhi Chauhan]], [[Raj Karbar|AUTHOR Raj Karbar]]
</p><p class="cpabstractcardaffiliationlist">GoVivace, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>

</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 4632–4633&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We demonstrate CaptionAI, the system that can be used for speech to text transcription, multilingual translation, and real-time closed captioning. It can also broadcast the audio and translated text to personal devices. There are three components of the application, namely, speech to text conversion, machine translation, and real time broadcast of audio and its multilingual text transcription. CaptionAI makes meetings, conference, and events accessible to global audience members with its real-time multilingual captioning and broadcast capabilities, improving comprehension and retention. In this application, we support English and Spanish real-time speech transcription. It also supports seventeen popular languages for real-time Machine Translation of transcribed speech. The front-end is coded on c# and in back-end we use combination of python and c++ based software and packages such as Janus, Gstreamer, and libwebsockets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andreas Nautsch|AUTHOR Andreas Nautsch]]^^1^^, [[Catherine Jasserand|AUTHOR Catherine Jasserand]]^^2^^, [[Els Kindt|AUTHOR Els Kindt]]^^3^^, [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]^^1^^, [[Isabel Trancoso|AUTHOR Isabel Trancoso]]^^4^^, [[Nicholas Evans|AUTHOR Nicholas Evans]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EURECOM, France; ^^2^^Rijksuniversiteit Groningen, The Netherlands; ^^3^^Katholieke Universiteit Leuven, Belgium; ^^4^^INESC-ID, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3695–3699&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Privacy preservation and the protection of speech data is in high demand, not least as a result of recent regulation, e.g. the General Data Protection Regulation (GDPR) in the EU. While there has been a period with which to prepare for its implementation, its implications for speech data is poorly understood. This assertion applies to both the legal and technology communities, and is hardly surprising since there is no universal definition of ‘privacy’, let alone a clear understanding of when or how the GDPR applies to the capture, storage and processing of speech data. In aiming to initiate the discussion that is needed to establish a level of harmonisation that is thus far lacking, this contribution presents some reflections of both legal and technology communities on the implications of the GDPR as regards speech data. The article outlines the need for taxonomies at the intersection of speech technology and data privacy — a discussion that is still very much in its infancy — and describes the ways to safeguards and priorities for future research. In being agnostic to any specific application, the treatment should be of interest to the speech communication community at large.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]]^^1^^, [[Aurélien Bellet|AUTHOR Aurélien Bellet]]^^1^^, [[Marc Tommasi|AUTHOR Marc Tommasi]]^^1^^, [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CRIStAL (UMR 9189), France; ^^2^^Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3700–3704&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) is a key technology in many services and applications. This typically requires user devices to send their speech data to the cloud for ASR decoding. As the speech signal carries a lot of information about the speaker, this raises serious privacy concerns. As a solution, an encoder may reside on each user device which performs local computations to anonymize the representation. In this paper, we focus on the protection of speaker identity and study the extent to which users can be recognized based on the encoded representation of their speech as obtained by a deep encoder-decoder architecture trained for ASR. Through speaker identification and verification experiments on the Librispeech corpus with open and closed sets of speakers, we show that the representations obtained from a standard architecture still carry a lot of information about speaker identity. We then propose to use adversarial training to learn representations that perform well in ASR while hiding speaker identity. Our results demonstrate that adversarial training dramatically reduces the closed-set classification accuracy, but this does not translate into increased open-set verification error hence into increased protection of the speaker identity in practice. We suggest several possible reasons behind this negative result.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandru Nelus|AUTHOR Alexandru Nelus]], [[Silas Rech|AUTHOR Silas Rech]], [[Timm Koppelmann|AUTHOR Timm Koppelmann]], [[Henrik Biermann|AUTHOR Henrik Biermann]], [[Rainer Martin|AUTHOR Rainer Martin]]
</p><p class="cpabstractcardaffiliationlist">Ruhr-Universität Bochum, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3705–3709&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we propose a deep neural-network-based feature extraction scheme with the purpose of reducing the privacy risks encountered in speaker classification tasks. For this we choose a challenging scenario where we wish to perform gender recognition but at the same time prevent an attacker who has intercepted the features to perform speaker identification. Our approach is to employ Siamese training in order to obtain a feature representation that minimizes the Euclidean distance between same gender speakers while maximizing it for different gender speakers. It is experimentally shown that the obtained effect is of anonymizing speakers from the same gender class and thus drastically reducing privacy risks while still permitting class discrimination with a higher accuracy than other previously investigated methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandru Nelus|AUTHOR Alexandru Nelus]]^^1^^, [[Janek Ebbers|AUTHOR Janek Ebbers]]^^2^^, [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]^^2^^, [[Rainer Martin|AUTHOR Rainer Martin]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ruhr-Universität Bochum, Germany; ^^2^^Universität Paderborn, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3710–3714&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we highlight the privacy risks entailed in deep neural network feature extraction for domestic activity monitoring. We employ the baseline system proposed in the Task 5 of the DCASE 2018 challenge and simulate a feature interception attack by an eavesdropper who wants to perform speaker identification. We then propose to reduce the aforementioned privacy risks by introducing a variational information feature extraction scheme that allows for good activity monitoring performance while at the same time minimizing the information of the feature representation, thus restricting speaker identification attempts. We analyze the resulting model’s composite loss function and the budget scaling factor used to control the balance between the performance of the trusted and attacker tasks. It is empirically demonstrated that the proposed method reduces speaker identification privacy risks without significantly deprecating the performance of domestic activity monitoring tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Patricia Thaine|AUTHOR Patricia Thaine]], [[Gerald Penn|AUTHOR Gerald Penn]]
</p><p class="cpabstractcardaffiliationlist">University of Toronto, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3715–3719&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe a method for extracting Mel-Frequency and Bark-Frequency Cepstral Coefficient from an encrypted signal without having to decrypt any intermediate values. To do so, we introduce a novel approach for approximating the value of logarithms given encrypted input data. This method works over any interval for which logarithms are defined and bounded.

Extracting spectral features from encrypted signals is the first step towards achieving secure end-to-end automatic speech recognition over encrypted data. We experimentally determine the appropriate precision thresholds to support accurate WER for ASR over the TIMIT dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pablo Pérez Zarazaga|AUTHOR Pablo Pérez Zarazaga]]^^1^^, [[Sneha Das|AUTHOR Sneha Das]]^^1^^, [[Tom Bäckström|AUTHOR Tom Bäckström]]^^1^^, [[V. Vidyadhara Raju V.|AUTHOR V. Vidyadhara Raju V.]]^^2^^, [[Anil Kumar Vuppala|AUTHOR Anil Kumar Vuppala]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^IIIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3720–3724&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the growing popularity of social networks, cloud services and online applications, people are becoming concerned about the way companies store their data and the ways in which the data can be applied. Privacy with devices and services operated by the voice are of particular interest. To enable studies in privacy, this paper presents a database which quantifies the experience of privacy users have in spoken communication. We focus on the effect of the acoustic environment on that perception of privacy. Speech signals are recorded in scenarios simulating real-life situations, where the acoustic environment has an effect on the experience of privacy. The acoustic data is complemented with measures of the speakers’ experience of privacy, recorded using a questionnaire. The presented corpus enables studies in how acoustic environments affect peoples’ experience of privacy, which in turn, can be used to develop speech operated applications which are respectful of their right to privacy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tanja Schultz|AUTHOR Tanja Schultz]]
</p><p class="cpabstractcardaffiliationlist">Universität Bremen, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Human interaction is a complex process involving modalities such as speech, gestures, motion, and brain activities emitting a wide range of biosignals, which can be captured by a broad panoply of sensors. The processing and interpretation of these biosignals offer an inside perspective on human physical and mental activities and thus complement the traditional way of observing human interaction from the outside. As recent years have seen major advances in sensor technologies integrated into ubiquitous devices, and in machine learning methods to process and learn from the resulting data, the time is right to use of the full range of biosignals to gain further insights into the process of human-machine interaction.

In my talk I will present ongoing research at the Cognitive Systems Lab (CSL), where we explore interaction-related biosignals with the goal of advancing machine-mediated human communication and human-machine interaction. Several applications will be described such as Silent Speech Interfaces that rely on articulatory muscle movement captured by electromyography to recognize and synthesize silently produced speech, as well as Brain Computer Interfaces that use brain activity captured by electrocorticography to recognize speech (brain-to-text) and directly convert electrocortical signals into audible speech (brain-to-speech). I will also describe the recording, processing and automatic structuring of human everyday activities based on multimodal high-dimensional biosignals within the framework of EASE, a collaborative research center on cognition-enabled robotics. This work aims to establish an open-source biosignals corpus for investigations on how humans plan and execute interactions with the aim of facilitating robotic mastery of everyday activities.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Niehues|AUTHOR Jan Niehues]]
</p><p class="cpabstractcardaffiliationlist">Universiteit Maastricht, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>We will start with an overview on the different use cases and difficulties of speech translation. Due to the wide range of possible application these systems differ in data, difficulty of the language and spontaneous effects. Furthermore, the interaction with human has an important influence. In the main part of the talk, we will review state-of-the-art methods to build speech translation system. We will start with reviewing the translation approach of spoken language translation, a cascade of an automatic speech recognition system and a machine translation system. We will highlight the challenges when combining both systems. Especially, techniques to adapt the system to scenario will be reviewed. With the success of neural models in both areas, we see a rising research interest in end-to-end speech translation. While we see promising results on this approach, international evaluation campaigns like the Shared Task of the International Workshop on Spoken Language Translation (IWSLT) have shown that currently often cascaded systems still achieve a better translation performance. We will highlight the main challenges of end-to-end speech translation. In the final part of the talk, we will review techniques that address key challenges of speech translation, e.g. Latency, spontaneous effects, sentence segmentation and stream decoding.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ye Jia|AUTHOR Ye Jia]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Fadi Biadsy|AUTHOR Fadi Biadsy]], [[Wolfgang Macherey|AUTHOR Wolfgang Macherey]], [[Melvin Johnson|AUTHOR Melvin Johnson]], [[Zhifeng Chen|AUTHOR Zhifeng Chen]], [[Yonghui Wu|AUTHOR Yonghui Wu]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1123–1127&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an attention-based sequence-to-sequence neural network which can directly translate speech from one language into speech in another language, without relying on an intermediate text representation. The network is trained end-to-end, learning to map speech spectrograms into target spectrograms in another language, corresponding to the translated content (in a different canonical voice). We further demonstrate the ability to synthesize translated speech using the voice of the source speaker. We conduct experiments on two Spanish-to-English speech translation datasets, and find that the proposed model slightly underperforms a baseline cascade of a direct speech-to-text translation model and a text-to-speech synthesis model, demonstrating the feasibility of the approach on this very challenging task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuchen Liu|AUTHOR Yuchen Liu]]^^1^^, [[Hao Xiong|AUTHOR Hao Xiong]]^^2^^, [[Jiajun Zhang|AUTHOR Jiajun Zhang]]^^1^^, [[Zhongjun He|AUTHOR Zhongjun He]]^^2^^, [[Hua Wu|AUTHOR Hua Wu]]^^2^^, [[Haifeng Wang|AUTHOR Haifeng Wang]]^^2^^, [[Chengqing Zong|AUTHOR Chengqing Zong]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Baidu, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1128–1132&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end speech translation (ST), which directly translates from source language speech into target language text, has attracted intensive attentions in recent years. Compared to conventional pipeline systems, end-to-end ST model has potential benefits of lower latency, smaller model size and less error propagation. However, it is notoriously difficult to implement such model which combines automatic speech recognition (ASR) and machine translation (MT) together. In this paper, we propose a  knowledge distillation approach to improve ST by transferring the knowledge from text translation. Specifically, we first train a text translation model, regarded as the teacher model, and then ST model is trained to learn the output probabilities of teacher model through knowledge distillation. Experiments on English-French Augmented LibriSpeech and English-Chinese TED corpus show that end-to-end ST is possible to implement on both similar and dissimilar language pairs. In addition, with the instruction of the teacher model, end-to-end ST model can gain significant improvements by over 3.5 BLEU points.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mattia A. Di Gangi|AUTHOR Mattia A. Di Gangi]]^^1^^, [[Matteo Negri|AUTHOR Matteo Negri]]^^2^^, [[Marco Turchi|AUTHOR Marco Turchi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Trento, Italy; ^^2^^FBK, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1133–1137&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural end-to-end architectures for sequence-to-sequence learning represent the state of the art in machine translation (MT) and speech recognition (ASR). Their use is also promising for end-to-end spoken language translation (SLT), which combines the main challenges of ASR and MT. Exploiting existing neural architectures, however, requires task-specific adaptations. A network that has obtained state-of-the-art results in MT with reduced training time is Transformer. However, its direct application to speech input is hindered by two limitations of the self-attention network on which it is based: quadratic memory complexity and no explicit modeling of short-range dependencies between input features. High memory complexity poses constraints to the size of models trainable with a GPU, while the inadequate modeling of local dependencies harms final translation quality. This paper presents an adaptation of Transformer to end-to-end SLT that consists in:  i) downsampling the input with convolutional neural networks to make the training process feasible on GPUs,  ii) modeling the bidimensional nature of a spectrogram, and  iii) adding a distance penalty to the attention, so to bias it towards local context. SLT experiments on 8 language directions show that, with our adaptation, Transformer outperforms a strong RNN-based baseline with a significant reduction in training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Steven Hillis|AUTHOR Steven Hillis]], [[Anushree Prasanna Kumar|AUTHOR Anushree Prasanna Kumar]], [[Alan W. Black|AUTHOR Alan W. Black]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1138–1142&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We experiment with unsupervised methods for deriving and clustering symbolic representations of speech, working towards speech-to-speech translation for languages without regular (or any) written representations. We consider five low-resource African languages, and we produce three different segmental representations of text data for comparisons against four different segmental representations derived solely from acoustic data for each language. The text and speech data for each language comes from the CMU Wilderness dataset introduced in [1], where speakers read a version of the New Testament in their language. Our goal is to evaluate the translation performance not only of acoustically derived units but also of discovered sequences or “words” made from these units, with the intuition that such representations will encode more meaning than phones alone. We train statistical machine translation models for each representation and evaluate their outputs on the basis of BLEU-1 scores to determine their efficacy. Our experiments produce encouraging results: as we cluster our atomic phonetic representations into more word-like units, the amount information retained generally approaches that of the actual words themselves.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gautam Bhattacharya|AUTHOR Gautam Bhattacharya]]^^1^^, [[Jahangir Alam|AUTHOR Jahangir Alam]]^^2^^, [[Patrick Kenny|AUTHOR Patrick Kenny]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^McGill University, Canada; ^^2^^CRIM, Canada; ^^3^^CRIM, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1143–1147&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition has made extraordinary progress with the advent of deep neural networks. In this work, we analyze the performance of end-to-end deep speaker recognizers on two popular text-independent tasks - NIST-SRE 2016 and VoxCeleb. Through a combination of a deep convolutional feature extractor, self-attentive pooling and large-margin loss functions, we achieve state-of-the-art performance on VoxCeleb. Our best individual and ensemble models show a relative improvement of 70% an 82% respectively over the best reported results on this task.

On the challenging NIST-SRE 2016 task, our proposed end-to-end models show good performance but are unable to match a strong i-vector baseline. State-of-the-art systems for this task use a modular framework that combines neural network embeddings with a probabilistic linear discriminant analysis (PLDA) classifier. Drawing inspiration from this approach we propose to replace the PLDA classifier with a neural network. Our modular neural network approach is able to outperform the i-vector baseline using cosine distance to score verification trials.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuai Wang|AUTHOR Shuai Wang]]^^1^^, [[Johan Rohdin|AUTHOR Johan Rohdin]]^^2^^, [[Lukáš Burget|AUTHOR Lukáš Burget]]^^2^^, [[Oldřich Plchot|AUTHOR Oldřich Plchot]]^^2^^, [[Yanmin Qian|AUTHOR Yanmin Qian]]^^1^^, [[Kai Yu|AUTHOR Kai Yu]]^^1^^, [[Jan Černocký|AUTHOR Jan Černocký]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shanghai Jiao Tong University, China; ^^2^^Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1148–1152&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Embeddings extracted by deep neural networks have become the state-of-the-art utterance representation in speaker recognition systems. It has recently been shown that incorporating frame-level phonetic information in the embedding extractor can improve the speaker recognition performance. On the other hand, in the final embedding, phonetic information is just an additional source of session variability which may be harmful to the text-independent speaker recognition task. This suggests that at the embedding level phonetic information should be suppressed rather than encouraged. To verify this hypothesis, we perform several experiments that encourage or/and suppress phonetic information at various stages in the network. Our experiments confirm that multitask learning is beneficial if it is applied at the frame-level stage of the network, whereas adversarial training is beneficial if it is used at the segment-level stage of the network. Additionally, the combination of these two approaches improves the performance further, resulting in an equal error rate of 3.17% on the VoxCeleb dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mirco Ravanelli|AUTHOR Mirco Ravanelli]], [[Yoshua Bengio|AUTHOR Yoshua Bengio]]
</p><p class="cpabstractcardaffiliationlist">Université de Montréal, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1153–1157&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Learning good representations is of crucial importance in deep learning. Mutual Information (MI) or similar measures of statistical dependence are promising tools for learning these representations in an unsupervised way. Even though the mutual information between two random variables is hard to measure directly in high dimensional spaces, some recent studies have shown that an implicit optimization of MI can be achieved with an encoder-discriminator architecture similar to that of Generative Adversarial Networks (GANs).

In this work, we learn representations that capture speaker identities by maximizing the mutual information between the encoded representations of chunks of speech randomly sampled from the same sentence. The proposed encoder relies on the SincNet architecture and transforms raw speech waveform into a compact feature vector. The discriminator is fed by either positive samples (of the joint distribution of encoded chunks) or negative samples (from the product of the marginals) and is trained to separate them.

We report experiments showing that this approach effectively learns useful speaker representations, leading to promising results on speaker identification and verification tasks. Our experiments consider both unsupervised and semi-supervised settings and compare the performance achieved with different objective functions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lanhua You|AUTHOR Lanhua You]], [[Wu Guo|AUTHOR Wu Guo]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Jun Du|AUTHOR Jun Du]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1158–1162&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The x-vector based deep neural network (DNN) embedding systems have demonstrated effectiveness for text-independent speaker verification. This paper presents a multi-task learning architecture for training the speaker embedding DNN with the primary task of classifying the target speakers, and the auxiliary task of reconstructing the first- and higher-order statistics of the original input utterance. The proposed training strategy aggregates both the supervised and unsupervised learning into one framework to make the speaker embeddings more discriminative and robust. Experiments are carried out using the NIST SRE16 evaluation dataset and the VOiCES dataset. The results demonstrate that our proposed method outperforms the original x-vector approach with very low additional complexity added.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhanghao Wu|AUTHOR Zhanghao Wu]]^^1^^, [[Shuai Wang|AUTHOR Shuai Wang]]^^2^^, [[Yanmin Qian|AUTHOR Yanmin Qian]]^^1^^, [[Kai Yu|AUTHOR Kai Yu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shanghai Jiao Tong University, China; ^^2^^Shanghai Jiao Tong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1163–1167&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Domain or environment mismatch between training and testing, such as various noises and channels, is a major challenge for speaker verification. In this paper, a variational autoencoder (VAE) is designed to learn the patterns of speaker embeddings extracted from noisy speech segments, including i-vector and x-vector, and generate embeddings with more diversity to improve the robustness of speaker verification systems with probabilistic linear discriminant analysis (PLDA) back-end. The approach is evaluated on the standard NIST SRE 2016 dataset. Compared to manual and generative adversarial network (GAN) based augmentation approaches, the proposed VAE based augmentation achieves a slightly better performance for i-vector on Tagalog and Cantonese with EERs of 15.54% and 7.84%, and a more significant improvement for x-vector on those two languages with EERs of 11.86% and 4.20%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lanhua You|AUTHOR Lanhua You]], [[Wu Guo|AUTHOR Wu Guo]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Jun Du|AUTHOR Jun Du]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1168–1172&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, gating mechanisms are applied in deep neural network (DNN) training for x-vector-based text-independent speaker verification. First, a gated convolution neural network (GCNN) is employed for modeling the frame-level embedding layers. Compared with the time-delay DNN (TDNN), the GCNN can obtain more expressive frame-level representations through carefully designed memory cell and gating mechanisms. Moreover, we propose a novel gated-attention statistics pooling strategy in which the attention scores are shared with the output gate. The gated-attention statistics pooling combines both gating and attention mechanisms into one framework; therefore, we can capture more useful information in the temporal pooling layer. Experiments are carried out using the NIST SRE16 and SRE18 evaluation datasets. The results demonstrate the effectiveness of the GCNN and show that the proposed gated-attention statistics pooling can further improve the performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Riyaz Bhat|AUTHOR Riyaz Bhat]], [[John Chen|AUTHOR John Chen]], [[Rashmi Prasad|AUTHOR Rashmi Prasad]], [[Srinivas Bangalore|AUTHOR Srinivas Bangalore]]
</p><p class="cpabstractcardaffiliationlist">Interactions, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1173–1177&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While virtual agents are becoming ubiquitous in our daily life, their functionality is limited to simple commands which involve a single intent and an unstructured set of entities. Typically, in such systems, the natural language understanding (NLU) component uses a sequence tagging model to extract a flat meaning representation. However, in order to support complex user requests with multiple intents with their associated entities, such as those in a product ordering domain, a structured semantic representation is necessary. In this paper, we present hierarchical semantic representations for product ordering in the food services domain and two NLU models that produce such representations efficiently using deep neural networks. The models are based on transition-based algorithms which have been proven to be effective and scalable for multiple NLP tasks such as syntactic parsing and slot filling. The first model uses a multitasking architecture containing multiple transition systems with tree constraints to model the hierarchical annotations, while the second model treats the task as a constituency parsing problem by mapping the target domain annotations to a constituency tree. We demonstrate that both multi-task and constituency-based transition systems achieve competitive results and even show improvements over sequential models, showing their effectiveness in modeling hierarchical structure.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vedran Vukotić|AUTHOR Vedran Vukotić]]^^1^^, [[Christian Raymond|AUTHOR Christian Raymond]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LAMARK, France; ^^2^^IRISA (UMR 6074), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1178–1182&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The typical RNN (Recurrent Neural Network) pipeline in SLU (Spoken Language Understanding), and specifically in the slot-filling task, consists of three stages: word embedding, context window representation, and label prediction. Label prediction, as a classification task, is the one that creates a sensible context window representation during learning through back-propagation. However, due to natural variations of the data, differences in two same-labeled samples can lead to dissimilar representations, whereas similarities in two differently-labeled samples can lead to them having close representations. In computer vision applications, specifically in face recognition and person re-identification, this problem has recently been successfully tackled by introducing data triplets and a triplet loss function.

In SLU, each word can be mapped to one or multiple labels depending on small variations of its context. We exploit this fact to construct data triplets consisting of the same words with different contexts that form a pair of datapoints with matching target labels and an another pair with non-matching labels. By using these triplets and an additional loss function, we update the context window representation in order to improve it, make dissimilar samples more distant and similar samples closer, leading to better classification results and an improved rate of convergence.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Avik Ray|AUTHOR Avik Ray]], [[Yilin Shen|AUTHOR Yilin Shen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]
</p><p class="cpabstractcardaffiliationlist">Samsung, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1183–1187&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recurrent neural network (RNN) based joint intent classification and slot tagging models have achieved tremendous success in recent years for building spoken language understanding and dialog systems. However, these models suffer from poor performance for slots which often encounter large semantic variability in slot values after deployment (e.g. message texts, partial movie/artist names). While greedy delexicalization of slots in the input utterance via substring matching can partly improve performance, it often produces incorrect input. Moreover, such techniques cannot delexicalize slots with out-of-vocabulary slot values not seen at training. In this paper, we propose a novel iterative delexicalization algorithm, which can accurately delexicalize the input, even with out-of-vocabulary slot values. Based on model confidence of the current delexicalized input, our algorithm improves delexicalization in every iteration to converge to the best input having the highest confidence. We show on benchmark and in-house datasets that our algorithm can greatly improve parsing performance for RNN based models, especially for out-of-distribution slot values.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Swapnil Bhosale|AUTHOR Swapnil Bhosale]], [[Imran Sheikh|AUTHOR Imran Sheikh]], [[Sri Harsha Dumpala|AUTHOR Sri Harsha Dumpala]], [[Sunil Kumar Kopparapu|AUTHOR Sunil Kumar Kopparapu]]
</p><p class="cpabstractcardaffiliationlist">TCS Innovation Labs Mumbai, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1188–1192&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end Spoken Language Understanding (SLU) systems, without speech-to-text conversion, are more promising in low resource scenarios. They can be more effective when there is not enough labeled data to train reliable speech recognition and language understanding systems, or where running SLU on edge is preferred over cloud based services. In this paper, we present an approach for bootstrapping end-to-end SLU in low resource scenarios. We show that incorporating layers extracted from pre-trained acoustic models, instead of using the typical Mel filter bank features, lead to better performing SLU models. Moreover, the layers extracted from a model pre-trained on one language perform well even for (a) SLU tasks on a different language and also (b) on utterances from speakers with speech disorder.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroaki Takatsu|AUTHOR Hiroaki Takatsu]]^^1^^, [[Katsuya Yokoyama|AUTHOR Katsuya Yokoyama]]^^1^^, [[Yoichi Matsuyama|AUTHOR Yoichi Matsuyama]]^^1^^, [[Hiroshi Honda|AUTHOR Hiroshi Honda]]^^2^^, [[Shinya Fujie|AUTHOR Shinya Fujie]]^^1^^, [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Waseda University, Japan; ^^2^^Honda, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1193–1197&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In human-human conversations, listeners often convey intentions to their speakers through feedbacks comprising reflexive short responses. The speakers then recognize these intentions and dynamically change the conversational plans to transmit information more efficiently. For the design of spoken dialogue systems that deliver a massive amount of information, such as news, it is essential to accurately capture users’ intentions from reflexive short responses to efficiently select or eliminate the information to be transmitted depending on the user’s needs. However, such short responses from users are normally too short to recognize their actual intentions only from the prosodic and linguistic features of their short responses. In this paper, we propose a user’s short-response intention-recognition model that accounts for the previous system’s utterances as the context of the conversation in addition to prosodic and linguistic features of user’s utterances. To achieve this, we define types of short response intentions in terms of effective information transmission and created new dataset by annotating over the interaction data collected using our spoken dialogue system. Our experimental results demonstrate that the classification accuracy can be improved using the linguistic features of the system’s previous utterances encoded by Bidirectional Encoder Representations from Transformers (BERT) as the conversational context.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Antoine Caubrière|AUTHOR Antoine Caubrière]]^^1^^, [[Natalia Tomashenko|AUTHOR Natalia Tomashenko]]^^2^^, [[Antoine Laurent|AUTHOR Antoine Laurent]]^^1^^, [[Emmanuel Morin|AUTHOR Emmanuel Morin]]^^3^^, [[Nathalie Camelin|AUTHOR Nathalie Camelin]]^^1^^, [[Yannick Estève|AUTHOR Yannick Estève]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIUM (EA 4023), France; ^^2^^LIA (EA 4128), France; ^^3^^LS2N (UMR 6004), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1198–1202&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an end-to-end approach to extract semantic concepts directly from the speech audio signal. To overcome the lack of data available for this spoken language understanding approach, we investigate the use of a transfer learning strategy based on the principles of curriculum learning. This approach allows us to exploit out-of-domain data that can help to prepare a fully neural architecture. Experiments are carried out on the French MEDIA and PORTMEDIA corpora and show that this end-to-end SLU approach reaches the best results ever published on this task. We compare our approach to a classical pipeline approach that uses ASR, POS tagging, lemmatizer, chunker … and other NLP tools that aim to enrich ASR outputs that feed an SLU text to concepts system. Last, we explore the promising capacity of our end-to-end SLU approach to address the problem of domain portability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Debadatta Dash|AUTHOR Debadatta Dash]]^^1^^, [[Paul Ferrari|AUTHOR Paul Ferrari]]^^2^^, [[Jun Wang|AUTHOR Jun Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^University of Texas at Austin, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1203–1207&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Brain activity signals are unique subject-specific biological features that can not be forged or stolen. Recognizing this inherent trait, brain waves are recently being acknowledged as a far more secure, sensitive, and confidential biometric approach for user identification. Yet, current electroencephalography (EEG) based biometric systems are still in infancy considering their requirement of a large number of sensors and lower recognition performance compared to present biometric modalities. In this study, we investigated the spatial and spectral fingerprints in the brain with magnetoencephalography (MEG) for speaker identification during rest (pre-stimuli) and speech production. Experimental results suggested that the frontal and the temporal regions of the brain and higher frequency (gamma and high gamma) neural oscillations are more dominating for speaker identification. Moreover, we also found that two optimally located MEG sensors are sufficient to obtain a high speaker classification accuracy during speech tasks whereas at least eight optimally located sensors are needed to accurately identify these subjects during rest-state (pre-stimuli). These results indicated the unique neural traits of speech production across speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Annika Nijveld|AUTHOR Annika Nijveld]], [[L. ten Bosch|AUTHOR L. ten Bosch]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1208–1212&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In order to study the cognitive processes underlying speech comprehension, neuro-physiological measures (e.g., EEG and MEG), or behavioural measures (e.g., reaction times and response accuracy) can be applied. Compared to behavioural measures, EEG signals can provide a more fine-grained and complementary view of the processes that take place during the unfolding of an auditory stimulus.

EEG signals are often analysed after having chosen specific time windows, which are usually based on the temporal structure of ERP components expected to be sensitive to the experimental manipulation. However, as the timing of ERP components may vary between experiments, trials, and participants, such a-priori defined analysis time windows may significantly hamper the exploratory power of the analysis of components of interest. In this paper, we explore a wide-window analysis method applied to EEG signals collected in an auditory repetition priming experiment.

This approach is based on a bank of temporal filters arranged along the time axis in combination with linear mixed effects modelling. Crucially, it permits a temporal decomposition of effects in a single comprehensive statistical model which captures the entire EEG trace.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[L. ten Bosch|AUTHOR L. ten Bosch]], [[K. Mulder|AUTHOR K. Mulder]], [[L. Boves|AUTHOR L. Boves]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1213–1217&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The neural processing of speech leads to specific patterns in the brain which can be measured as, e.g., EEG signals. When properly aligned with the speech input and averaged over many tokens, the Event Related Potential (ERP) signal is able to differentiate specific contrasts between speech signals. Well-known effects relate to the difference between expected and unexpected words, in particular in the N400, while effects in N100 and P200 are related to attention and acoustic onset effects. Most EEG studies deal with the amplitude of EEG signals over time, sidestepping the effect of phase and phase synchronization. This paper investigates the relation between phase in the EEG signals measured in an auditory lexical decision task by Dutch participants listening to full and reduced English word forms. We show that phase synchronization takes place across stimulus conditions, and that the so-called circular variance is narrowly related to the type of contrast between stimuli.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mariya Kharaman|AUTHOR Mariya Kharaman]], [[Manluolan Xu|AUTHOR Manluolan Xu]], [[Carsten Eulitz|AUTHOR Carsten Eulitz]], [[Bettina Braun|AUTHOR Bettina Braun]]
</p><p class="cpabstractcardaffiliationlist">Universität Konstanz, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1218–1222&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In many languages, rhetorical questions (RQs) are produced with different prosodic realizations than string-identical information-seeking questions (ISQs). RQs typically have longer constituent durations and breathier voice quality than ISQs and differ in nuclear accent type. This paper reports on an identification experiment (Experiment 1) and an EEG experiment (Experiment 2) on German  wh-questions. In the identification experiment, we manipulated nuclear pitch accent type, voice quality and constituent duration and participants indicated whether they judged the realization as ISQ or RQ. The results showed additive effects of the three factors, with pitch accent as strongest predictor. In the EEG experiment, participants heard the stimuli in two contexts, triggering an ISQ or RQ (blocked). We manipulated pitch accent type and voice quality, resulting in RQ-coherent and ISQ-coherent stimuli, based on the outcome of Experiment 1. Results showed a prosodic expectancy positivity (PEP) for prosodic realizations that were incoherent with ISQ-contexts with an onset of ~120ms after the onset of the word with nuclear accent. This effect might reflect the emotional prosodic aspect of RQs. Taken together, participants use prosody to resolve the ambiguity and event-related potentials (ERPs) react to prosodic realizations that do not match contextually triggered expectations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Odette Scharenborg|AUTHOR Odette Scharenborg]]^^1^^, [[Jiska Koemans|AUTHOR Jiska Koemans]]^^2^^, [[Cybelle Smith|AUTHOR Cybelle Smith]]^^3^^, [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]]^^3^^, [[Kara D. Federmeier|AUTHOR Kara D. Federmeier]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universiteit Delft, The Netherlands; ^^2^^Radboud Universiteit Nijmegen, The Netherlands; ^^3^^University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1223–1227&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>There is ample evidence showing that listeners are able to quickly adapt their phoneme classes to ambiguous sounds using a process called lexically-guided perceptual learning. This paper presents the first attempt to examine the neural correlates underlying this process. Specifically, we compared the brain’s responses to ambiguous [f/s] sounds in Dutch non-native listeners of English (N=36) before and after exposure to the ambiguous sound to induce learning, using Event-Related Potentials (ERPs). We identified a group of participants who showed lexically-guided perceptual learning in their phonetic categorization behavior as observed by a significant difference in /s/ responses between pretest and posttest and a group who did not. Moreover, we observed differences in mean ERP amplitude to ambiguous phonemes at pretest and posttest, shown by a reliable reduction in amplitude of a positivity over medial central channels from 250 to 550 ms. However, we observed no significant correlation between the size of behavioral and neural pre/posttest effects. Possibly, the observed behavioral and ERP differences between pretest and posttest link to different aspects of the sound classification task. In follow-up research, these differences will be further investigated by assessing their relationship to neural responses to the ambiguous sounds in the exposure phase.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ivan Halim Parmonangan|AUTHOR Ivan Halim Parmonangan]]^^1^^, [[Hiroki Tanaka|AUTHOR Hiroki Tanaka]]^^1^^, [[Sakriani Sakti|AUTHOR Sakriani Sakti]]^^1^^, [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]]^^2^^, [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NAIST, Japan; ^^2^^University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1228–1232&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>As synthesized speech technology becomes more widely used, the synthesized speech quality must be assessed to ensure that it is acceptable. Subjective evaluation metrics, such as mean opinion score (MOS), can only provide an overall impression without any further detailed information about the speech. Therefore, this study proposes predicting speech quality using electroencephalographs (EEG), which are more objective and have high temporal resolution. In this paper, we use one natural speech and four types of synthesized speech lasting two to six seconds. First, to obtain ground truth of MOS, we gathered ten subjects to give opinion score on a scale of one to five for each recording. Second, another nine subjects were asked to measure how close to natural speech each synthesized speech sounded. The subjects’ EEGs were recorded while they were listening to and evaluating the listened speech. The best accuracy achieved for classification was 96.61% using support vector machine, 80.36% using linear discriminant analysis, and 59.9% using logistic regression. For regression, we achieved root mean squared error as low as 1.133 using SVR and 1.353 using linear regression. This study demonstrates that EEG could be used to evaluate the perceived speech quality objectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yiteng Huang|AUTHOR Yiteng Huang]], [[Turaj Z. Shabestary|AUTHOR Turaj Z. Shabestary]], [[Alexander Gruenstein|AUTHOR Alexander Gruenstein]], [[Li Wan|AUTHOR Li Wan]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1233–1237&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently we proposed a dual-microphone adaptive noise cancellation (ANC) algorithm with deferred filter coefficients for robust hotword detection in [1]. It exploits two unique hotword-related features: hotwords are the leading phrase of valid voice queries and they are short. These features allow us  not to compute a speech-noise mask that is a common prerequisite for many multichannel speech enhancement approaches. This novel idea was found effective against strong and ambiguous speech-like TV noise. In this paper, we show that it can be generalized to support more than two microphones. The development is validated using re-recorded data with background TV noise from a 3-mic array. By adding one more microphone, the false reject (FR) rate can be further reduced relatively by 33.5%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shengkui Zhao|AUTHOR Shengkui Zhao]], [[Chongjia Ni|AUTHOR Chongjia Ni]], [[Rong Tong|AUTHOR Rong Tong]], [[Bin Ma|AUTHOR Bin Ma]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1238–1242&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Robustness of automatic speech recognition (ASR) systems is a critical issue due to noise and reverberations. Speech enhancement and model adaptation have been studied for long time to address this issue. Recently, the developments of multi-task joint-learning scheme that addresses noise reduction and ASR criteria in a unified modeling framework show promising improvements, but the model training highly relies on paired clean-noisy data. To overcome this limit, the generative adversarial networks (GANs) and the adversarial training method are deployed, which have greatly simplified the model training process without the requirements of complex front-end design and paired training data. Despite the fast developments of GANs for computer visions, only regular GANs have been adopted for robust ASR. In this work, we adopt a more advanced cycle-consistency GAN (CycleGAN) to address the training failure problem due to mode collapse of regular GANs. Using deep residual networks (ResNets), we further expand the multi-task scheme to a multi-task multi-network joint-learning scheme for more robust noise reduction and model adaptation. Experiment results on CHiME-4 show that our proposed approach significantly improves the noise robustness of the ASR system by achieving much lower word error rates (WERs) than the state-of-the-art joint-learning approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuri Khokhlov|AUTHOR Yuri Khokhlov]]^^1^^, [[Alexander Zatvornitskiy|AUTHOR Alexander Zatvornitskiy]]^^2^^, [[Ivan Medennikov|AUTHOR Ivan Medennikov]]^^1^^, [[Ivan Sorokin|AUTHOR Ivan Sorokin]]^^1^^, [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]]^^1^^, [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]]^^1^^, [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]]^^1^^, [[Vladimir Bataev|AUTHOR Vladimir Bataev]]^^1^^, [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]]^^1^^, [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]]^^1^^, [[Oleg Petrov|AUTHOR Oleg Petrov]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^Speech Technology Center, Russia; ^^3^^ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1243–1247&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Distant speech recognition is an important problem which is far from being solved. Reverberation and noise are in the list of main problems in this area. The most popular methods of dealing with them are data augmentation and speech enhancement. In this paper, we propose a novel approach, inspired by modern methods of speaker adaptation.

First of all, a feed-forward network is trained to classify room impulse responses (RIRs) from speech recordings. Then this network is used for extracting embeddings, which we call R-vectors. These R-vectors are appended to input features of the acoustic model. Due to the lack of labeled data for RIRs classification task, we propose a self-supervised method of training the network, which consists of using artificial audio generated by room simulator.

Experimental evaluation was conducted on VOiCES19 and AMI single-channel tasks as well as CHiME5 multi-channel task. It is shown that the R-vector-adapted ASR systems achieve up to 14% relative WER reduction. Furthermore, it is additive with gains from state-of-the-art dereverberation (WPE) and speaker adaptation (x-vector) techniques.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]]^^1^^, [[Christoph Boeddeker|AUTHOR Christoph Boeddeker]]^^2^^, [[Jens Heitkaemper|AUTHOR Jens Heitkaemper]]^^2^^, [[Yusuke Fujita|AUTHOR Yusuke Fujita]]^^1^^, [[Shota Horiguchi|AUTHOR Shota Horiguchi]]^^1^^, [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]^^1^^, [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Hitachi, Japan; ^^2^^Universität Paderborn, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1248–1252&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present Hitachi and Paderborn University’s joint effort for automatic speech recognition (ASR) in a dinner party scenario. The main challenges of ASR systems for dinner party recordings obtained by multiple microphone arrays are (1) heavy speech overlaps, (2) severe noise and reverberation, (3) very natural conversational content, and possibly (4) insufficient training data. As an example of a dinner party scenario, we have chosen the data presented during the CHiME-5 speech recognition challenge, where the baseline ASR had a 73.3% word error rate (WER), and even the best performing system at the CHiME-5 challenge had a 46.1% WER. We extensively investigated a combination of the guided source separation-based speech enhancement technique and an already proposed strong ASR backend and found that a tight combination of these techniques provided substantial accuracy improvements. Our final system achieved WERs of 39.94% and 41.64% for the development and evaluation data, respectively, both of which are the best published results for the dataset. We also investigated with additional training data on the official small data in the CHiME-5 corpus to assess the intrinsic difficulty of this ASR task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lukas Drude|AUTHOR Lukas Drude]], [[Jahn Heymann|AUTHOR Jahn Heymann]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]
</p><p class="cpabstractcardaffiliationlist">Universität Paderborn, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1253–1257&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an unsupervised training approach for a neural network-based mask estimator in an acoustic beamforming application. The network is trained to maximize a likelihood criterion derived from a spatial mixture model of the observations. It is trained from scratch without requiring any parallel data consisting of degraded input and clean training targets. Thus, training can be carried out on real recordings of noisy speech rather than simulated ones. In contrast to previous work on unsupervised training of neural mask estimators, our approach avoids the need for a possibly pre-trained teacher model entirely. We demonstrate the effectiveness of our approach by speech recognition experiments on two different datasets: one mainly deteriorated by noise (CHiME 4) and one by reverberation (REVERB). The results show that the performance of the proposed system is on par with a supervised system using oracle target masks for training and with a system trained using a model-based teacher.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Feng Ma|AUTHOR Feng Ma]]^^1^^, [[Li Chai|AUTHOR Li Chai]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Diyuan Liu|AUTHOR Diyuan Liu]]^^2^^, [[Zhongfu Ye|AUTHOR Zhongfu Ye]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^iFlytek, China; ^^3^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1258–1262&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>CHiME-5 is a research community challenge targeting the problem of far-field and multi-talker conversational speech recognition in dinner party scenarios involving background noises, reverberations and overlapping speech. In this study, we present five different kinds of robust acoustic models which take advantages from both effective data augmentation and ensemble methods to improve the recognition performance for the CHiME-5 challenge. First, we detail the effective data augmentation for far-field scenarios, especially the far-field data simulation. Different from the conventional data simulation methods, we use a signal processing method originally developed for channel identification to estimate the room impulse responses and then simulate the far-field data. Second, we introduce the five different kinds of robust acoustic models. Finally, the effectiveness of our acoustic model ensembling strategies at the lattice level and the state posterior level are evaluated and demonstrated. Our system achieves the best performance of all four tasks among submitted systems in the CHiME-5 challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ming Li|AUTHOR Ming Li]], [[Weicheng Cai|AUTHOR Weicheng Cai]], [[Danwei Cai|AUTHOR Danwei Cai]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Speech signal not only contains lexicon information, but also delivers various kinds of paralinguistic speech attribute information, such as speaker, language, gender, age, emotion, etc. The core technique question behind it is utterance level supervised learning based on text independent or text dependent speech signal with flexible duration. In section 1, we will first formulate the problem of speaker and language recognition. In section 2, we introduce the traditional framework with different modules in a pipeline, namely, feature extraction, representation, variability compensation and backend classification. Then we naturally introduce the end-to-end idea and compare with the traditional framework. We will show the correspondence between feature extraction and CNN layers, representation and encoding layer, backend modeling and fully connected layers. Specifically, we will introduce the modules in the end-to-end frameworks with more details here, e.g. variable length data loader, frontend convolutional network structure design, encoding (or pooling) layer design, loss function design, data augmentation design, transfer learning and multitask learning, etc. In section 4, we will introduce some robust methods using the end-to-end framework for far-field and noisy conditions. Finally, we will connect the introduced end-to-end frameworks to other related tasks, e.g. speaker diarization, paralinguistic speech attribute recognition, anti-spoofing countermeasures, etc.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bharat Padi|AUTHOR Bharat Padi]]^^1^^, [[Anand Mohan|AUTHOR Anand Mohan]]^^2^^, [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^minds.ai, India; ^^2^^Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1263–1267&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a hybrid i-vector neural network framework (i-BLSTM) which models the sequence information present in a series of short segment i-vectors for the task of spoken language recognition (LRE) is proposed. A sequence of short segment i-vectors are extracted for every speech utterance and are then modeled using a bidirectional long short-term memory (BLSTM) recurrent neural network (RNN). Attention mechanism inside the neural network relevantly weights segments of the speech utterance and the model learns to give higher weights to parts of speech data which are more helpful to the classification task. The proposed framework performs better in short duration and noisy environments when compared with the conventional i-vector system. Experiments are performed on clean, noisy and multi-speaker speech data from NIST LRE 2017 and RATS language recognition corpus. In these experiments, the proposed approach yields significant improvements (relative improvements of 7.6–13% in terms of accuracy for noisy conditions) over the conventional i-vector based language recognition approach and also over an end-to-end LSTM-RNN based approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1268–1272&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, direct modeling of raw waveforms using deep neural networks has been widely studied for a number of tasks in audio domains. In speaker verification, however, utilization of raw waveforms is in its preliminary phase, requiring further investigation. In this study, we explore end-to-end deep neural networks that input raw waveforms to improve various aspects: front-end speaker embedding extraction including model architecture, pre-training scheme, additional objective functions, and back-end classification. Adjustment of model architecture using a pre-training scheme can extract speaker embeddings, giving a significant improvement in performance. Additional objective functions simplify the process of extracting speaker embeddings by merging conventional two-phase processes: extracting utterance-level features such as i-vectors or x-vectors and the feature enhancement phase, e.g., linear discriminant analysis. Effective back-end classification models that suit the proposed speaker embedding are also explored. We propose an end-to-end system that comprises two deep neural networks, one frontend for utterance-level speaker embedding extraction and the other for back-end classification. Experiments conducted on the VoxCeleb1 dataset demonstrate that the proposed model achieves state-of-the-art performance among systems without data augmentation. The proposed system is also comparable to the state-of-the-art x-vector system that adopts heavy data augmentation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Rao|AUTHOR Wei Rao]]^^1^^, [[Chenglin Xu|AUTHOR Chenglin Xu]]^^2^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^3^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^NTU, Singapore; ^^3^^NTU, Singapore; ^^4^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1273–1277&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The performance of speaker verification degrades significantly when the test speech is corrupted by interference from non-target speakers. Speaker diarization separates speakers well only if the speakers are not overlapped. However, if multiple talkers speak at the same time, we need a technique to separate the speech in the spectral domain. In this paper, we study a way to extract the target speaker’s speech from an overlapped multi-talker speech. Specifically, given some reference speech samples from the target speaker, the target speaker’s speech is firstly extracted from the overlapped multi-talker speech, then the extracted speech is processed in the speaker verification system. Experimental results show that the proposed approach significantly improves the performance of overlapped multi-talker speaker verification and achieves 64.4% relative EER reduction over the zero-effort baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hanna Mazzawi|AUTHOR Hanna Mazzawi]], [[Xavi Gonzalvo|AUTHOR Xavi Gonzalvo]], [[Aleks Kracun|AUTHOR Aleks Kracun]], [[Prashant Sridhar|AUTHOR Prashant Sridhar]], [[Niranjan Subrahmanya|AUTHOR Niranjan Subrahmanya]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]], [[Hyun Jin Park|AUTHOR Hyun Jin Park]], [[Patrick Violette|AUTHOR Patrick Violette]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1278–1282&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present a novel Neural Architecture Search (NAS) framework to improve keyword spotting and spoken language identification models. Even with the huge success of deep neural networks (DNNs) in many different domains, finding the best network architecture is still a laborious task and very computationally expensive at best with existing searching approaches. Our search approach efficiently and robustly finds better model sequences with respect to hand-designed systems. We do this by constructing architectures incrementally, using a custom mutation algorithm and leveraging the power of parameter transfer between layers. We demonstrate that our approach can automatically design DNNs with an order of magnitude fewer parameters that achieves better performance than the current best models. It leads to significant performance improvements: up to 4.09% accuracy increase for language identification (6.1% if we allow an increase in the number of parameters) and 0.3% for phoneme classification in keyword spotting with half the size of the model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yibin Zheng|AUTHOR Yibin Zheng]]^^1^^, [[Xi Wang|AUTHOR Xi Wang]]^^2^^, [[Lei He|AUTHOR Lei He]]^^2^^, [[Shifeng Pan|AUTHOR Shifeng Pan]]^^2^^, [[Frank K. Soong|AUTHOR Frank K. Soong]]^^2^^, [[Zhengqi Wen|AUTHOR Zhengqi Wen]]^^1^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1283–1287&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural end-to-end TTS can generate very high-quality synthesized speech, and even close to human recording within similar domain text. However, it performs unsatisfactory when scaling it to challenging test sets. One concern is that the encoder-decoder with attention-based network adopts autoregressive generative sequence model with the limitation of “exposure bias”. To address this issue, we propose two novel methods, which learn to predict future by improving agreement between forward and backward decoding sequence. The first one is achieved by introducing divergence regularization terms into model training objective to reduce the mismatch between two directional models, namely L2R and R2L (which generates targets from left-to-right and right-to-left, respectively). While the second one operates on decoder-level and exploits the future information during decoding. In addition, we employ a joint training strategy to allow forward and backward decoding to improve each other in an interactive process. Experimental results show our proposed methods especially the second one (bidirectional decoder regularization), leads a significantly improvement on both robustness and overall naturalness, as outperforming baseline (the revised version of Tacotron2) with a MOS gap of 0.14 in a challenging test, and achieving close to human quality (4.42 vs. 4.49 in MOS) on general test.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haohan Guo|AUTHOR Haohan Guo]]^^1^^, [[Frank K. Soong|AUTHOR Frank K. Soong]]^^2^^, [[Lei He|AUTHOR Lei He]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1288–1292&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end, autoregressive model-based TTS has shown significant performance improvements over the conventional ones. However, the autoregressive module training is affected by the exposure bias, or the mismatch between different distributions of real and predicted data. While real data is provided in training, in testing, predicted data is available only. By introducing both real and generated data sequences in training, we can alleviate the effects of the exposure bias. We propose to use Generative Adversarial Network (GAN) along with the idea of “Professor Forcing” in training. A discriminator in GAN is jointly trained to equalize the difference between real and the predicted data. In AB subjective listening test, the results show that the new approach is preferred over the standard transfer learning with a CMOS improvement of 0.1. Sentence level intelligibility tests also show significant improvement in a pathological test set. The GAN-trained new model is shown more stable than the baseline to produce better alignments for the Tacotron output.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mutian He|AUTHOR Mutian He]]^^1^^, [[Yan Deng|AUTHOR Yan Deng]]^^2^^, [[Lei He|AUTHOR Lei He]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Beihang University, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1293–1297&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural TTS has demonstrated strong capabilities to generate human-like speech with high quality and naturalness, while its generalization to out-of-domain texts is still a challenging task, with regard to the design of attention-based sequence-to-sequence acoustic modeling. Various errors occur in those inputs with unseen context, including attention collapse, skipping, repeating, etc., which limits the broader applications. In this paper, we propose a novel stepwise monotonic attention method in sequence-to-sequence acoustic modeling to improve the robustness on out-of-domain inputs. The method utilizes the strict monotonic property in TTS with constraints on monotonic hard attention that the alignments between inputs and outputs sequence must be not only monotonic but allowing no skipping on inputs. Soft attention could be used to evade mismatch between training and inference. The experimental results show that the proposed method could achieve significant improvements in robustness on out-of-domain scenarios for phoneme-based models, without any regression on the in-domain naturalness test.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingyang Zhang|AUTHOR Mingyang Zhang]]^^1^^, [[Xin Wang|AUTHOR Xin Wang]]^^2^^, [[Fuming Fang|AUTHOR Fuming Fang]]^^2^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1298–1302&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigated the training of a shared model for both text-to-speech (TTS) and voice conversion (VC) tasks. We propose using an extended model architecture of Tacotron, that is a multi-source sequence-to-sequence model with a dual attention mechanism as the shared model for both the TTS and VC tasks. This model can accomplish these two different tasks respectively according to the type of input. An end-to-end speech synthesis task is conducted when the model is given text as the input while a sequence-to-sequence voice conversion task is conducted when it is given the speech of a source speaker as the input. Waveform signals are generated by using WaveNet, which is conditioned by using a predicted mel-spectrogram. We propose jointly training a shared model as a decoder for a target speaker that supports multiple sources. Listening experiments show that our proposed multi-source encoder-decoder model can efficiently achieve both the TTS and VC tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hieu-Thi Luong|AUTHOR Hieu-Thi Luong]]^^1^^, [[Xin Wang|AUTHOR Xin Wang]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^1^^, [[Nobuyuki Nishizawa|AUTHOR Nobuyuki Nishizawa]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NII, Japan; ^^2^^KDDI Research, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1303–1307&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When the available data of a target speaker is insufficient to train a high quality speaker-dependent neural text-to-speech (TTS) system, we can combine data from multiple speakers and train a multi-speaker TTS model instead. Many studies have shown that neural multi-speaker TTS model trained with a small amount data from multiple speakers combined can generate synthetic speech with better quality and stability than a speaker-dependent one. However when the amount of data from each speaker is highly unbalanced, the best approach to make use of the excessive data remains unknown. Our experiments showed that simply combining all available data from every speaker to train a multi-speaker model produces better than or at least similar performance to its speaker-dependent counterpart. Moreover by using an ensemble multi-speaker model, in which each subsystem is trained on a subset of available data, we can further improve the quality of the synthetic speech especially for underrepresented speakers whose training data is limited.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuma Okamoto|AUTHOR Takuma Okamoto]]^^1^^, [[Tomoki Toda|AUTHOR Tomoki Toda]]^^2^^, [[Yoshinori Shiga|AUTHOR Yoshinori Shiga]]^^1^^, [[Hisashi Kawai|AUTHOR Hisashi Kawai]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NICT, Japan; ^^2^^Nagoya University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1308–1312&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates real-time high-fidelity neural text-to-speech (TTS) systems. For real-time neural vocoders, WaveGlow is introduced and single Gaussian (SG)WaveRNN is proposed. The proposed SG-WaveRNN can predict continuous valued speech waveforms with half the synthesis time compared with vanilla WaveRNN with dual-softmax for 16 bit audio prediction. Additionally, a sequence-to-sequence (seq2seq) acoustic model (AM) for pitch accent languages, such as Japanese, is investigated by introducing Tacotron 2 architecture. In the seq2seq AM, full-context labels extracted from a text analyzer are used as input and they are directly converted into mel-spectrograms. The results of subjective experiment using a Japanese female corpus indicate that the proposed SG-WaveRNN vocoder with noise shaping can synthesize high-quality speech waveforms and real-time high-fidelity neural TTS systems can be realized with the seq2seq AM and WaveGlow or SG-WaveRNN vocoders. Especially, the seq2seq AM and WaveGlow vocoder conditioned on mel-spectrograms with simple PyTorch implementations can be realized with real-time factors 0.06 and 0.10 for inference using a GPU.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sushant Kafle|AUTHOR Sushant Kafle]], [[Cecilia Ovesdotter Alm|AUTHOR Cecilia Ovesdotter Alm]], [[Matt Huenerfauth|AUTHOR Matt Huenerfauth]]
</p><p class="cpabstractcardaffiliationlist">RIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1313–1317&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate whether, and if so when, prosodic features in spoken dialogue aid in modeling the importance of words to the overall meaning of a dialogue turn. Starting from the assumption that acoustic-prosodic cues help identify important speech content, we investigate representation architectures that combine lexical and prosodic features and evaluate them for predicting word importance. We propose an attention-based feature fusion strategy and additionally show how the addition of strategic supervision of the attention weights results in especially competitive models. We evaluate our fusion strategy on spoken dialogues and demonstrate performance increases over state-of-the-art models. Specifically, our approach both achieves the lowest root mean square error on test data and generalizes better over out-of-vocabulary words.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Chun-Wei Wang|AUTHOR Chun-Wei Wang]]
</p><p class="cpabstractcardaffiliationlist">National Chiao Tung University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1318–1322&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention mechanism plays a crucial role in sequential learning for many speech and language applications. However, it is challenging to develop a  stochastic attention in a sequence-to-sequence model which consists of two recurrent neural networks (RNNs) as the encoder and decoder. The problem of  posterior collapse happens in variational inference and results in the estimated latent variables close to a standard Gaussian prior so that the information from input sequence is disregarded in learning process. This paper presents a new recurrent autoencoder for sentence representation where a  self attention scheme is incorporated to activate the interaction between inference and generation in training procedure. In particular, a stochastic RNN decoder is implemented to provide additional latent variable to fulfill self attention for sentence reconstruction. The posterior collapse is alleviated. The latent information is sufficiently attended in variational sequential learning. During test phase, the estimated prior distribution of decoder is sampled for stochastic attention and generation. Experiments on Penn Treebank and Yelp 2013 show the desirable generation performance in terms of perplexity. The visualization of attention weights also illustrates the usefulness of self attention. The evaluation on DUC 2007 demonstrates the merit of variational recurrent autoencoder for document summarization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhongkai Sun|AUTHOR Zhongkai Sun]]^^1^^, [[Prathusha K. Sarma|AUTHOR Prathusha K. Sarma]]^^1^^, [[William Sethares|AUTHOR William Sethares]]^^1^^, [[Erik P. Bucy|AUTHOR Erik P. Bucy]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UW–Madison, USA; ^^2^^Texas Tech University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1323–1327&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper learns multi-modal embeddings from text, audio, and video views/modes of data in order to improve upon downstream sentiment classification. The experimental framework also allows investigation of the relative contributions of the individual views in the final multi-modal embedding. Individual features derived from the three views are combined into a multi-modal embedding using Deep Canonical Correlation Analysis (DCCA) in two ways i) One-Step DCCA and ii) Two-Step DCCA. This paper learns text embeddings using BERT, the current state-of-the-art in text encoders. We posit that this highly optimized algorithm dominates over the contribution of other views, though each view does contribute to the final result. Classification tasks are carried out on two benchmark data sets and on a new Debate Emotion data set, and together these demonstrate that the one-Step DCCA outperforms the current state-of-the-art in learning multi-modal embeddings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yilin Shen|AUTHOR Yilin Shen]]^^1^^, [[Wenhu Chen|AUTHOR Wenhu Chen]]^^2^^, [[Hongxia Jin|AUTHOR Hongxia Jin]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Samsung, USA; ^^2^^University of California at Santa Barbara, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1328–1332&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken language understanding (SLU) is a crucial component in virtual personal assistants. It consists of two main tasks: intent detection and slot filling. State-of-the-art deep neural SLU models have demonstrated good performance on benchmark datasets. However, these models suffer from the significant performance drop in practice after deployment due to the data distribution discrepancy between training and real user utterances. In this paper, we first propose four research questions that help to understand what the state-of-the-art deep neural SLU models actually learn. To answer them, we study the vocabulary importance using a novel  Embedding Sparse Structure Learning (SparseEmb) approach. It can be applied onto various existing deep SLU models to efficiently prune the useless words without any additional manual hyperparameter tuning. We evaluate SparseEmb on benchmark datasets using two existing SLU models and answer the proposed research questions. Then, we utilize SparseEmb to sanitize the training data based on the selected useless words as well as the model re-validation during training. Using both benchmark and our collected testing data, we show that our sanitized training data helps to significantly improve the SLU model performance. Both SparseEmb and training data sanitization approaches can be applied onto any deep learning based SLU models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Máté Ákos Tündik|AUTHOR Máté Ákos Tündik]], [[Valér Kaszás|AUTHOR Valér Kaszás]], [[György Szaszák|AUTHOR György Szaszák]]
</p><p class="cpabstractcardaffiliationlist">BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1333–1337&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Ambitions in artificial intelligence involve machine understanding of human language. The state-of-the-art approach for Spoken Language Understanding is using an Automatic Speech Recognizer (ASR) to generate transcripts, which are further processed with text-based tools. ASR yields error prone transcripts, these errors then propagate further into the processing pipeline. Subjective tests show on the other hand, that humans understand quite well ASR closed captions despite the word and punctuation errors. Our goal is to assess and quantify the loss in the semantic space resulting from error propagation and also analyze error propagation into speech summarization as a special use-case. We show, that word errors cause a slight shift in the semantic space, which is fairly below the average semantic distance between the sentences within a document. We also show, that punctuation errors have higher impact on summarization performance, which suggests that proper sentence level tokenization is crucial for this task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peisong Huang|AUTHOR Peisong Huang]], [[Peijie Huang|AUTHOR Peijie Huang]], [[Wencheng Ai|AUTHOR Wencheng Ai]], [[Jiande Ding|AUTHOR Jiande Ding]], [[Jinchuan Zhang|AUTHOR Jinchuan Zhang]]
</p><p class="cpabstractcardaffiliationlist">SCAU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1338–1342&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based bidirectional long short-term network (BiLSTM) models have recently shown promising results in text classification tasks. However, when the amount of training data is restricted, or the distribution of the test data is quite different from the training data, some potential informative words maybe hard to be captured in training. In this work, we propose a new method to learn attention mechanism for domain classification. Unlike the past attention mechanisms only guided by domain tags of training data, we explore using the latent topics in the data set to learn topic attention, and employ it for BiLSTM. Experiments on the SMP-ECDT benchmark corpus show that the proposed latent topic attention mechanism outperforms the state-of-the-art soft and hard attention mechanisms in domain classification. Moreover, experiment result shows that the proposed method can be trained with additional unlabeled data and further improve the domain classification performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chaitanya Narisetty|AUTHOR Chaitanya Narisetty]]
</p><p class="cpabstractcardaffiliationlist">NEC, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1343–1347&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a determined blind source separation (BSS) method with a Bayesian generalization for unified modelling of multiple audio sources. Our probabilistic framework allows a flexible multi-source modelling where the number of latent features required for the unified model is optimally estimated. When partitioning the latent features of the unified model to represent individual sources, the proposed approach helps to avoid over-fitting or under-fitting the correlations among sources. This adaptability of our Bayesian generalization therefore adds flexibility to conventional BSS approaches, where the number of latent features in the unified model has to be specified in advance. In the task of separating speech mixture signals, we show that our proposed method models diverse sources in a flexible manner and markedly improves the separation performance as compared to the conventional methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Naoya Takahashi|AUTHOR Naoya Takahashi]]^^1^^, [[Sudarsanam Parthasaarathy|AUTHOR Sudarsanam Parthasaarathy]]^^2^^, [[Nabarun Goswami|AUTHOR Nabarun Goswami]]^^2^^, [[Yuki Mitsufuji|AUTHOR Yuki Mitsufuji]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Sony, Japan; ^^2^^Sony, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1348–1352&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we propose a method of single-channel speaker-independent multi-speaker speech separation for an unknown number of speakers. As opposed to previous works, in which the number of speakers is assumed to be known in advance and speech separation models are specific for the number of speakers, our proposed method can be applied to cases with different numbers of speakers using a single model by recursively separating a speaker. To make the separation model recursively applicable, we propose one-and-rest permutation invariant training (OR-PIT). Evaluation on WSJ0-2mix and WSJ0-3mix datasets show that our proposed method achieves state-of-the-art results for two- and three-speaker mixtures with a single model. Moreover, the same model can separate four-speaker mixture, which was never seen during the training. We further propose the detection of the number of speakers in a mixture during recursive separation and show that this approach can more accurately estimate the number of speakers than detection in advance by using a deep neural network based classifier.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pieter Appeltans|AUTHOR Pieter Appeltans]], [[Jeroen Zegers|AUTHOR Jeroen Zegers]], [[Hugo Van hamme|AUTHOR Hugo Van hamme]]
</p><p class="cpabstractcardaffiliationlist">Katholieke Universiteit Leuven, Belgium</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1353–1357&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper examines the applicability in realistic scenarios of two deep learning based solutions to the overlapping speaker separation problem. Firstly, we present experiments that show that these methods are applicable for a broad range of languages. Further experimentation indicates limited performance loss for untrained languages, when these have common features with the trained language(s). Secondly, it investigates how the methods deal with realistic background noise and proposes some modifications to better cope with these disturbances. The deep learning methods that will be examined are deep clustering and deep attractor networks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhaoyi Gu|AUTHOR Zhaoyi Gu]], [[Jing Lu|AUTHOR Jing Lu]], [[Kai Chen|AUTHOR Kai Chen]]
</p><p class="cpabstractcardaffiliationlist">Nanjing University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1358–1362&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Independent vector analysis (IVA) utilizing Gaussian mixture model (GMM) as source priors has been demonstrated as an effective algorithm for joint blind source separation (JBSS). However, an extra pre-training process is required to provide initial parameter values for successful speech separation. In this paper, we introduce a time-varying parameter in the GMM to adapt to the temporal power fluctuation embedded in the nonstationary speech signal so as to avoid the pre-training process. The expectation-maximization (EM) process updating both the demixing matrix and the signal model is altered correspondingly. Experimental results confirm the efficacy of the proposed method under random initialization and further show its advantage in terms of a competitive separation accuracy and a faster convergence speed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gene-Ping Yang|AUTHOR Gene-Ping Yang]], [[Chao-I Tuan|AUTHOR Chao-I Tuan]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]], [[Lin-shan Lee|AUTHOR Lin-shan Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1363–1367&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech separation has been very successful with deep learning techniques. Substantial effort has been reported based on approaches over magnitude spectrogram, which is well known as the standard time-and-frequency cross-domain representation for speech signals. It is highly correlated to the phonetic structure of speech, or “how the speech sounds” when perceived by human, but primarily frequency domain features carrying temporal behaviour. Very impressive work achieving speech separation over time domain was reported recently, probably because waveforms in time domain may describe the different realizations of speech in a more precise way than magnitude spectrogram lacking phase information. In this paper, we propose a framework properly integrating the above two directions, hoping to achieve both purposes. We construct a time-and-frequency feature map by concatenating 1-dim convolution encoded feature map (for time domain) and magnitude spectrogram (for frequency domain), which was then processed by an embedding network and clustering approaches very similar to those used in time and frequency domain prior works. In this way, the information in time and frequency domains, as well as the interactions between them, can be jointly considered during embedding and clustering. Very encouraging results (state-of-the-art to our knowledge) were obtained with WSJ0-2mix dataset in preliminary experiments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gordon Wichern|AUTHOR Gordon Wichern]]^^1^^, [[Joe Antognini|AUTHOR Joe Antognini]]^^2^^, [[Michael Flynn|AUTHOR Michael Flynn]]^^2^^, [[Licheng Richard Zhu|AUTHOR Licheng Richard Zhu]]^^2^^, [[Emmett McQuinn|AUTHOR Emmett McQuinn]]^^2^^, [[Dwight Crow|AUTHOR Dwight Crow]]^^2^^, [[Ethan Manilow|AUTHOR Ethan Manilow]]^^1^^, [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MERL, USA; ^^2^^Whisper.ai, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1368–1372&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent progress in separating the speech signals from multiple overlapping speakers using a single audio channel has brought us closer to solving the cocktail party problem. However, most studies in this area use a constrained problem setup, comparing performance when speakers overlap almost completely, at artificially low sampling rates, and with no external background noise. In this paper, we strive to move the field towards more realistic and challenging scenarios. To that end, we created the WSJ0 Hipster Ambient Mixtures (WHAM!) dataset, consisting of two speaker mixtures from the wsj0-2mix dataset combined with real ambient noise samples. The samples were collected in coffee shops, restaurants, and bars in the San Francisco Bay Area, and are made publicly available. We benchmark various speech separation architectures and objective functions to evaluate their robustness to noise. While separation performance decreases as a result of noise, we still observe substantial gains relative to the noisy signals for most approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andreas Nautsch|AUTHOR Andreas Nautsch]]
</p><p class="cpabstractcardaffiliationlist">EURECOM, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>The survey addresses recent work that has the aim of preserving privacy in speech communication applications. The talk discusses recent privacy legislation in the US and especially the European Union, and focuses upon the GDPR (EU Regulation 2016/679) and the Police Directive (EU Directive 2016/680), covering also ‘Privacy by Design’ and ‘Privacy by Default’ policy concepts. Emphasis is placed on voice biometrics and non-biometric speech technology. Since there is no “one size fits all” solution, specific cryptographic solutions to privacy preservation are highlighted. Among other classification tasks, voice biometrics can intrude on privacy when misused; the talk surveys a number of privacy safeguards. The international standard for biometric information protection is reviewed and figures of merit are proposed regarding, e.g., the extent to which privacy is preserved. More interdisciplinary efforts are necessary to reach a common understanding between speech technology, legislation, and cryptography communities (among many others). Future challenges include the need to not only carry out decision inference securely, but also to preserve privacy, where cryptographic methods need to meet the demands of speech signal processing. In communication, speech is a medium, not a message.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Carol Chermaz|AUTHOR Carol Chermaz]]^^1^^, [[Cassia Valentini-Botinhao|AUTHOR Cassia Valentini-Botinhao]]^^1^^, [[Henning Schepker|AUTHOR Henning Schepker]]^^2^^, [[Simon King|AUTHOR Simon King]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Edinburgh, UK; ^^2^^Carl von Ossietzky Universität Oldenburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1373–1377&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech playback (e.g., TV, radio, public address) becomes harder to understand in the presence of noise and reverberation. NELE (Near End Listening Enhancement) algorithms can improve intelligibility by modifying the signal before it is played back. Substantial intelligibility improvements have been achieved in the lab for both natural and synthetic speech. However, evidence is still scarce on how these algorithms work under conditions of realistic noise and reverberation.

We present a realistic test platform, featuring two representative everyday scenarios in which speech playback may occur (in the presence of both noise and reverberation): a domestic space (living room) and a public space (cafeteria). The generated stimuli are evaluated by measuring keyword accuracy rates in a listening test with normal hearing subjects.

We use the new platform to compare three state-of-the-art NELE algorithms, employing either noise-adaptive or non-adaptive strategies, and with or without compensation for reverberation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amin Edraki|AUTHOR Amin Edraki]]^^1^^, [[Wai-Yip Chan|AUTHOR Wai-Yip Chan]]^^1^^, [[Jesper Jensen|AUTHOR Jesper Jensen]]^^2^^, [[Daniel Fogerty|AUTHOR Daniel Fogerty]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Queen’s University, Canada; ^^2^^Aalborg University, Denmark; ^^3^^University of South Carolina, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1378–1382&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Several recent high-performing intelligibility estimators of acoustically degraded speech signals employ temporal modulation analysis. In this paper, we investigate the utility of using both spectro- and temporal-modulation for estimating speech intelligibility. We modified a pre-existing speech intelligibility estimation scheme (STMI) that was inspired by human auditory spectro-temporal modulation analysis. We produced several variants of the modified STMI and assessed their intelligibility prediction accuracy, in comparison with several high-performing estimators. Among the estimators tested, one of the STMI variants and eSTOI performed consistently well on both noisy and reverberated speech. These results suggest that spectro-temporal modulation analysis is useful for certain degradation conditions such as modulated noise and reverberation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhuohuang Zhang|AUTHOR Zhuohuang Zhang]], [[Yi Shen|AUTHOR Yi Shen]]
</p><p class="cpabstractcardaffiliationlist">Indiana University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1383–1387&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Ideal binary mask (IBM) is a signal-processing technique that retains the time-frequency regions in a mixture of target speech and background noise when the local signal-to-noise ratio (SNR) is higher than a local criterion (LC) and removes the regions otherwise. The intelligibility of IBM-processed speech is typically high and does not depend on the choice of LC for a wide range of LC values. The current study investigates the listeners’ preferences on the LC value for IBM processed speech. Concatenated everyday sentences were mixed with three types of background noises (airplane noise, train noise, and multi-talker babble) and were presented continuously to the listeners following the IBM processing. The IBM algorithm was implemented so that the listeners were able to adjust the LC value in real-time using a programmable knob. The listeners were instructed to adjust the LC value until the IBM-processed stimuli reached the most preferable quality. Across 20 listeners, large individual differences were observed for the preferred LC values. A cluster analysis identified that 11 of the 20 listeners exhibited consistent patterns of results. For this main cluster of listeners, the preferred LC value depended on the noise type, overall SNR, and the difficulty of the target sentences.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tuan Dinh|AUTHOR Tuan Dinh]]^^1^^, [[Alexander Kain|AUTHOR Alexander Kain]]^^1^^, [[Kris Tjaden|AUTHOR Kris Tjaden]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Oregon Health & Science University, USA; ^^2^^University at Buffalo, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1388–1392&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a new type of spectral feature that is both compact and interpolable, and thus ideally suited for regression approaches that involve averaging. The feature is realized by means of a speaker-independent variational autoencoder (VAE), which learns a latent space based on the low-dimensional manifold of high-resolution speech spectra. In vocoding experiments, we showed that using a 12-dimensional VAE feature (VAE-12) resulted in significantly better perceived speech quality compared to a 12-dimensional MCEP feature. In voice conversion experiments, using VAE-12 resulted in significantly better perceived speech quality as compared to 40-dimensional MCEPs, with similar speaker accuracy. In habitual to clear style conversion experiments, we significantly improved the speech intelligibility for one of three speakers, using a custom skip-connection deep neural network, with the average keyword recall accuracy increasing from 24% to 46%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[P. von Platen|AUTHOR P. von Platen]]^^1^^, [[Chao Zhang|AUTHOR Chao Zhang]]^^2^^, [[P.C. Woodland|AUTHOR P.C. Woodland]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Cambridge, UK; ^^2^^University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1393–1397&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Traditional automatic speech recognition (ASR) systems often use an acoustic model (AM) built on handcrafted acoustic features, such as log Mel-filter bank (FBANK) values. Recent studies found that AMs with convolutional neural networks (CNNs) can directly use the raw waveform signal as input. Given sufficient training data, these AMs can yield a competitive word error rate (WER) to those built on FBANK features. This paper proposes a novel multi-span structure for acoustic modelling based on the raw waveform with multiple streams of CNN input layers, each processing a different span of the raw waveform signal. Evaluation on both the single channel CHiME4 and AMI data sets show that multi-span AMs give a lower WER than FBANK AMs by an average of about 5% (relative). Analysis of the trained multi-span model reveals that the CNNs can learn filters that are rather different to the log Mel-filters. Furthermore, the paper shows that a widely used single span raw waveform AM can be improved by using a smaller CNN kernel size and increased stride to yield improved WERs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[André Merboldt|AUTHOR André Merboldt]], [[Albert Zeyer|AUTHOR Albert Zeyer]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1398–1402&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech recognition using attention-based models is an effective approach to transcribing audio directly to text within an integrated end-to-end architecture. Global attention approaches compute a weighting over the complete input sequence, whereas local attention mechanisms are restricted to only a localized window of the sequence. For speech, the latter approach supports the monotonicity property of the speech-text alignment. Therefore, we revise several variants of such models and provide a comprehensive comparison, which has been missing so far in the literature. Additionally, we introduce a simple technique to implement windowed attention. This can be applied on top of an existing global attention model. The goal is to transition into a local attention model, by using a local window for the otherwise unchanged attention mechanism, starting from the temporal position with the most recent most active attention energy. We test this method on Switchboard and LibriSpeech and show that the proposed model can even be trained from random initialization and achieve results comparable to the global attention baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eric Sun|AUTHOR Eric Sun]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1403–1407&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, we proposed layer trajectory (LT) LSTM (ltLSTM) which significantly outperforms LSTM by decoupling the functions of senone classification and temporal modeling with separate depth and time LSTMs. We further improved ltLSTM with contextual layer trajectory LSTM (cltLSTM) which uses the future context frames to predict target labels. Given bidirectional LSTM (BLSTM) also uses future context frames to improve its modeling power, in this study we first compare the performance between these two models. Then we apply the layer trajectory idea to further improve BLSTM models, in which BLSTM is in charge of modeling the temporal information while depth-LSTM takes care of senone classification. In addition, we also investigate the model performance among different LT component designs on BLSTM models. Trained with 30 thousand hours of EN-US Microsoft internal data, the proposed layer trajectory BLSTM (ltBLSTM) model improved the baseline BLSTM with up to 14.5% relative word error rate (WER) reduction across different tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shigeki Karita|AUTHOR Shigeki Karita]]^^1^^, [[Nelson Enrique Yalta Soplin|AUTHOR Nelson Enrique Yalta Soplin]]^^2^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^3^^, [[Marc Delcroix|AUTHOR Marc Delcroix]]^^1^^, [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]]^^1^^, [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTT, Japan; ^^2^^Waseda University, Japan; ^^3^^Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1408–1412&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The state-of-the-art neural network architecture named Transformer has been used successfully for many sequence-to-sequence transformation tasks. The advantage of this architecture is that it has a fast iteration speed in the training stage because there is no sequential operation as with recurrent neural networks (RNN). However, an RNN is still the best option for end-to-end automatic speech recognition (ASR) tasks in terms of overall training speed (i.e., convergence) and word error rate (WER) because of effective joint training and decoding methods. To realize a faster and more accurate ASR system, we combine Transformer and the advances in RNN-based ASR. In our experiments, we found that the training of Transformer is slower than that of RNN as regards the learning curve and integration with the naive language model (LM) is difficult. To address these problems, we integrate connectionist temporal classification (CTC) with Transformer for joint training and decoding. This approach makes training faster than with RNNs and assists LM integration. Our proposed ASR system realizes significant improvements in various ASR tasks. For example, it reduced the WERs from 11.1% to 4.5% on the Wall Street Journal and from 16.1% to 11.6% on the TED-LIUM by introducing CTC and LM integration into the Transformer baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shucong Zhang|AUTHOR Shucong Zhang]], [[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Yumo Xu|AUTHOR Yumo Xu]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1413–1417&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Jointly optimised attention-based encoder-decoder models have yielded impressive speech recognition results. The recurrent neural network (RNN) encoder is a key component in such models — it learns the hidden representations of the inputs. However, it is difficult for RNNs to model the long sequences characteristic of speech recognition. To address this, subsampling between stacked recurrent layers of the encoder is commonly employed. This method reduces the length of the input sequence and leads to gains in accuracy. However, static subsampling may both include redundant information and miss relevant information.

We propose using a dynamic subsampling RNN (dsRNN) encoder. Unlike a statically subsampled RNN encoder, the dsRNN encoder can learn to skip redundant frames. Furthermore, the skip ratio may vary at different stages of training, thus allowing the encoder to learn the most relevant information for each epoch. Although the dsRNN is unidirectional, it yields lower phone error rates (PERs) than a bidirectional RNN on TIMIT. The dsRNN encoder has a 16.8% PER on the TIMIT test set, a considerable improvement over static subsampling methods used with unidirectional and bidirectional RNN encoders (23.5% and 20.4% PER respectively).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ding Zhao|AUTHOR Ding Zhao]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[David Rybach|AUTHOR David Rybach]], [[Pat Rondon|AUTHOR Pat Rondon]], [[Deepti Bhatia|AUTHOR Deepti Bhatia]], [[Bo Li|AUTHOR Bo Li]], [[Ruoming Pang|AUTHOR Ruoming Pang]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1418–1422&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Contextual biasing to a specific domain, including a user’s song names, app names and contact names, is an important component of any production-level automatic speech recognition (ASR) system. Contextual biasing is particularly challenging in end-to-end models because these models keep a small list of candidates during beam search, and also do poorly on proper nouns, which is the main source of biasing phrases. In this paper, we present various algorithmic and training improvements to shallow-fusion-based biasing for end-to-end models. We will show that the proposed approach obtains better performance than a state-of-the-art conventional model across a variety of tasks, the first time this has been demonstrated.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Md. Nasir|AUTHOR Md. Nasir]]^^1^^, [[Sandeep Nallan Chakravarthula|AUTHOR Sandeep Nallan Chakravarthula]]^^1^^, [[Brian R.W. Baucom|AUTHOR Brian R.W. Baucom]]^^2^^, [[David C. Atkins|AUTHOR David C. Atkins]]^^3^^, [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]]^^1^^, [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern California, USA; ^^2^^University of Utah, USA; ^^3^^University of Washington, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1423–1427&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Linguistic coordination is a well-established phenomenon in spoken conversations and often associated with positive social behaviors and outcomes. While there have been many attempts to measure lexical coordination or entrainment in literature, only a few have explored coordination in syntactic or semantic space. In this work, we attempt to combine these different aspects of coordination into a single measure by leveraging distances in a neural word representation space. In particular, we adopt the recently proposed Word Mover’s Distance with  word2vec embeddings and extend it to measure the dissimilarity in language used in multiple consecutive speaker turns. To validate our approach, we apply this measure for two case studies in the clinical psychology domain. We find that our proposed measure is correlated with the therapist’s empathy towards their patient in Motivational Interviewing and with affective behaviors in Couples Therapy. In both case studies, our proposed metric exhibits higher correlation than previously proposed measures. When applied to the couples with relationship improvement, we also notice a significant decrease in the proposed measure over the course of therapy, indicating higher linguistic coordination.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenchao Du|AUTHOR Wenchao Du]]^^1^^, [[Louis-Philippe Morency|AUTHOR Louis-Philippe Morency]]^^1^^, [[Jeffrey Cohn|AUTHOR Jeffrey Cohn]]^^2^^, [[Alan W. Black|AUTHOR Alan W. Black]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Carnegie Mellon University, USA; ^^2^^University of Pittsburgh, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1428–1432&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite the recent success of deep learning, it is generally difficult to apply end-to-end deep neural networks to small datasets, such as those from the health domain, due to the tendency of neural networks to over-fit. In addition, how neural models reach their decisions is not well understood. In this paper, we present a two-stage approach to acoustic-based classification of behavior markers related to mental health disorders: first, a dictionary and the mapping from speech signals to the dictionary are learned jointly by a deep autoencoder, then the bag-of-words representation of speech is used for classification, using classifiers with simple decision boundaries. This deep bag-of-features approach has the advantage of offering more interpretability, while the use of deep autoencoder gains improvements in prediction by learning higher level features with long range dependencies, comparing to previous work using only low-level descriptors. In addition, we demonstrate the use of labeled emotion recognition data from other domains to supervise acoustic word encoding in order to help predict psychological traits. Experiments are conducted on audio recordings of 65 clinically recorded interviews with the self-reported level of post-traumatic stress disorder (PTSD), depression, and rapport with the interviewers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rohit Voleti|AUTHOR Rohit Voleti]]^^1^^, [[Stephanie Woolridge|AUTHOR Stephanie Woolridge]]^^2^^, [[Julie M. Liss|AUTHOR Julie M. Liss]]^^1^^, [[Melissa Milanovic|AUTHOR Melissa Milanovic]]^^2^^, [[Christopher R. Bowie|AUTHOR Christopher R. Bowie]]^^2^^, [[Visar Berisha|AUTHOR Visar Berisha]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Arizona State University, USA; ^^2^^Queen’s University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1433–1437&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Several studies have shown that speech and language features, automatically extracted from clinical interviews or spontaneous discourse, have diagnostic value for mental disorders such as schizophrenia and bipolar disorder. They typically make use of a large feature set to train a classifier for distinguishing between two groups of interest, i.e. a clinical and control group. However, a purely data-driven approach runs the risk of overfitting to a particular data set, especially when sample sizes are limited. Here, we first down-select the set of language features to a small subset that is related to a well-validated test of functional ability, the Social Skills Performance Assessment (SSPA). This helps establish the concurrent validity of the selected features. We use only these features to train a simple classifier to distinguish between groups of interest. Linear regression reveals that a subset of language features can effectively model the SSPA, with a correlation coefficient of 0.75. Furthermore, the same feature set can be used to build a strong binary classifier to distinguish between healthy controls and a clinical group (AUC = 0.96) and also between patients within the clinical group with schizophrenia and bipolar I disorder (AUC = 0.83).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Katie Matton|AUTHOR Katie Matton]], [[Melvin G. McInnis|AUTHOR Melvin G. McInnis]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]
</p><p class="cpabstractcardaffiliationlist">University of Michigan, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1438–1442&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Bipolar Disorder, a mood disorder with recurrent mania and depression, requires ongoing monitoring and specialty management. Current monitoring strategies are clinically-based, engaging highly specialized medical professionals who are becoming increasingly scarce. Automatic speech-based monitoring via smartphones has the potential to augment clinical monitoring by providing inexpensive and unobtrusive measurements of a patient’s daily life. The success of such an approach is contingent on the ability to successfully utilize “in-the-wild” data. However, most existing work on automatic mood detection uses datasets collected in clinical or laboratory settings. This study presents experiments in automatically detecting depression severity in individuals with Bipolar Disorder using data derived from clinical interviews and from personal conversations. We find that mood assessment is more accurate using data collected from clinical interactions, in part because of their highly structured nature. We demonstrate that although the features that are most effective in clinical interactions do not extend well to personal conversational data, we can identify alternative features relevant in personal conversational speech to detect mood symptom severity. Our results highlight the challenges unique to working with “in-the-wild” data, providing insight into the degree to which the predictive ability of speech features is preserved outside of a clinical interview.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Morteza Rohanian|AUTHOR Morteza Rohanian]], [[Julian Hough|AUTHOR Julian Hough]], [[Matthew Purver|AUTHOR Matthew Purver]]
</p><p class="cpabstractcardaffiliationlist">Queen Mary University of London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1443–1447&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Semi-structured clinical interviews are frequently used diagnostic tools for identifying depression during an assessment phase. In addition to the lexical content of a patient’s responses, multimodal cues concurrent with the responses are indicators of their motor and cognitive state, including those derivable from their voice quality and gestural behaviour. In this paper, we use information from different modalities in order to train a classifier capable of detecting the binary state of a subject (clinically depressed or not), as well as the level of their depression. We propose a model that is able to perform modality fusion incrementally after each word in an utterance using a time-dependent recurrent approach in a deep learning set-up. To mitigate noisy modalities, we utilize fusion gates that control the degree to which the audio or visual modality contributes to the final prediction. Our results show the effectiveness of word-level multimodal fusion, achieving state-of-the-art results in depression detection and outperforming early feature-level and late fusion techniques.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]^^1^^, [[Adam C. Lammert|AUTHOR Adam C. Lammert]]^^2^^, [[Nadee Seneviratne|AUTHOR Nadee Seneviratne]]^^1^^, [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Maryland at College Park, USA; ^^2^^MIT Lincoln Laboratory, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1448–1452&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech articulation is a complex activity that requires finely timed coordination across articulators, i.e., tongue, jaw, lips, and velum. In a depressed state involving psychomotor retardation, this coordination changes and in turn modifies the perceived speech signal. In previous work, we used the correlation structure of formant trajectories as a proxy for articulatory coordination, from which features were derived for predicting the degree of depression. Ideally, however, we seek coordination of the actual articulators using characteristics such as the degree and place of tongue constriction, often referred to as a  tract variable (TV). In this paper, applying a novel articulatory inversion process, we investigate the relation between correlation structure of formant tracks versus that of TVs. We show on a pilot depressed/control dataset that, with the same number of variables, TV coordination-based features, although with some characteristics similar to their counterpart, outperform the corresponding formant track correlation features in detection of the depressed state. We speculate on the latent information being captured by TVs that is not present in formants.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shachi Paul|AUTHOR Shachi Paul]], [[Rahul Goel|AUTHOR Rahul Goel]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1453–1457&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Machine learning approaches for building task-oriented dialogue systems require large conversational datasets with labels to train on. We are interested in building task-oriented dialogue systems from human-human conversations, which may be available in ample amounts in existing customer care center logs or can be collected from crowd workers. Annotating these datasets can be prohibitively expensive. Recently multiple annotated task-oriented human-machine dialogue datasets have been released, however their annotation schema varies across different collections, even for well-defined categories such as dialogue acts (DAs). We propose a Universal DA schema for task-oriented dialogues and align existing annotated datasets with our schema. Our aim is to train a Universal DA tagger (U-DAT) for task-oriented dialogues and use it for tagging human-human conversations. We investigate multiple datasets, propose manual and automated approaches for aligning the different schema, and present results on a target corpus of human-human dialogues. In unsupervised learning experiments we achieve an F1 score of 54.1% on system turns in human-human dialogues. In a semi-supervised setup, the F1 score increases to 57.7% which would otherwise require at least 1.7K manually annotated turns. For new domains, we show further improvements when unlabeled or labeled target domain data is available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rahul Goel|AUTHOR Rahul Goel]], [[Shachi Paul|AUTHOR Shachi Paul]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1458–1462&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent works on end-to-end trainable neural network based approaches have demonstrated state-of-the-art results on dialogue state tracking. The best performing approaches estimate a probability distribution over all possible slot values. However, these approaches do not scale for large value sets commonly present in real-life applications and are not ideal for tracking slot values that were not observed in the training set. To tackle these issues, candidate-generation-based approaches have been proposed. These approaches estimate a set of values that are possible at each turn based on the conversation history and/or language understanding outputs, and hence enable state tracking over unseen values and large value sets however, they fall short in terms of performance in comparison to the first group. In this work, we analyze the performance of these two alternative dialogue state tracking methods, and present a hybrid approach (HyST) which learns the appropriate method for each slot type. To demonstrate the effectiveness of HyST on a rich-set of slot types, we experiment with the recently released MultiWOZ-2.0 multi-domain, task-oriented dialogue-dataset. Our experiments show that HyST scales to multi-domain applications. Our best performing model results in a relative improvement of 24% and 10% over the previous SOTA and our best baseline respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiří Martínek|AUTHOR Jiří Martínek]]^^1^^, [[Pavel Král|AUTHOR Pavel Král]]^^1^^, [[Ladislav Lenc|AUTHOR Ladislav Lenc]]^^1^^, [[Christophe Cerisara|AUTHOR Christophe Cerisara]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of West Bohemia, Czech Republic; ^^2^^Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1463–1467&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper deals with multi-lingual dialogue act (DA) recognition. The proposed approaches are based on deep neural networks and use word2vec embeddings for word representation. Two multi-lingual models are proposed for this task. The first approach uses one general model trained on the embeddings from all available languages. The second method trains the model on a single pivot language and a linear transformation method is used to project other languages onto the pivot language. The popular convolutional neural network and LSTM architectures with different set-ups are used as classifiers. To the best of our knowledge this is the first attempt at multi-lingual DA recognition using neural networks. The multi-lingual models are validated experimentally on two languages from the Verbmobil corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guan-Lin Chao|AUTHOR Guan-Lin Chao]], [[Ian Lane|AUTHOR Ian Lane]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1468–1472&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology ( e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for  none and  dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David Griol|AUTHOR David Griol]]^^1^^, [[Zoraida Callejas|AUTHOR Zoraida Callejas]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad Carlos III de Madrid, Spain; ^^2^^Universidad de Granada, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1473–1477&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Designing the rules for the dialog management process is one of the most resources-consuming tasks when developing a dialog system. Although statistical approaches to dialog management are becoming mainstream in research and industrial contexts, still many systems are being developed following the rule-based or hybrid paradigms. For example, when developers require deterministic system responses to keep total control on the decisions made by the system, or because the infrastructure employed is designed for rule-based systems using technologies currently used in commercial platforms. In this paper, we propose the use of evolutionary algorithms to automatically obtain the dialog rules that are implicit in a dialog corpus. Our proposal makes it possible to exploit the benefits of statistical approaches to build rule-based systems. Our proposal has been evaluated with a practical spoken dialog system, for which we have automatically obtained a set of fuzzy rules to successfully manage the dialog.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xi C. Chen|AUTHOR Xi C. Chen]], [[Adithya Sagar|AUTHOR Adithya Sagar]], [[Justine T. Kao|AUTHOR Justine T. Kao]], [[Tony Y. Li|AUTHOR Tony Y. Li]], [[Christopher Klein|AUTHOR Christopher Klein]], [[Stephen Pulman|AUTHOR Stephen Pulman]], [[Ashish Garg|AUTHOR Ashish Garg]], [[Jason D. Williams|AUTHOR Jason D. Williams]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1478–1482&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe a method for selecting relevant new training data for the LSTM-based domain selection component of our personal assistant system. Adding more annotated training data for any ML system typically improves accuracy, but only if it provides examples not already adequately covered in the existing data. However, obtaining, selecting, and labeling relevant data is expensive. This work presents a simple technique that automatically identifies new helpful examples suitable for human annotation. Our experimental results show that the proposed method, compared with random-selection and entropy-based methods, leads to higher accuracy improvements given a fixed annotation budget. Although developed and tested in the setting of a commercial intelligent assistant, the technique is of wider applicability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seyed Omid Sadjadi|AUTHOR Seyed Omid Sadjadi]]^^1^^, [[Craig Greenberg|AUTHOR Craig Greenberg]]^^1^^, [[Elliot Singer|AUTHOR Elliot Singer]]^^2^^, [[Douglas Reynolds|AUTHOR Douglas Reynolds]]^^2^^, [[Lisa Mason|AUTHOR Lisa Mason]]^^3^^, [[Jaime Hernandez-Cordero|AUTHOR Jaime Hernandez-Cordero]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NIST, USA; ^^2^^MIT Lincoln Laboratory, USA; ^^3^^DoD, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1483–1487&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In 2018, the U.S. National Institute of Standards and Technology (NIST) conducted the most recent in an ongoing series of speaker recognition evaluations (SRE). SRE18 was organized in a similar manner to SRE16, focusing on speaker detection over conversational telephony speech (CTS) collected outside north America. SRE18 also featured several new aspects including: two new data domains, namely voice over internet protocol (VoIP) and audio extracted from  amateur online videos (AfV), as well as a new language (Tunisian Arabic). A total of 78 organizations (forming 48 teams) from academia and industry participated in SRE18 and submitted 129 valid system outputs under  fixed and  open training conditions first introduced in SRE16. This paper presents an overview of the evaluation and several analyses of system performance for all primary conditions in SRE18. The evaluation results suggest that 1) speaker recognition on AfV was more challenging than on telephony data, 2) speaker representations (aka embeddings) extracted using end-to-end neural network frameworks were most effective, 3) top performing systems exhibited similar performance, and 4) greatest performance improvements were largely due to data augmentation, use of extended and more complex models for data representation, as well as effective use of the provided development sets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jesús Villalba|AUTHOR Jesús Villalba]]^^1^^, [[Nanxin Chen|AUTHOR Nanxin Chen]]^^1^^, [[David Snyder|AUTHOR David Snyder]]^^1^^, [[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]]^^1^^, [[Alan McCree|AUTHOR Alan McCree]]^^1^^, [[Gregory Sell|AUTHOR Gregory Sell]]^^1^^, [[Jonas Borgstrom|AUTHOR Jonas Borgstrom]]^^2^^, [[Fred Richardson|AUTHOR Fred Richardson]]^^2^^, [[Suwon Shon|AUTHOR Suwon Shon]]^^3^^, [[François Grondin|AUTHOR François Grondin]]^^3^^, [[Réda Dehak|AUTHOR Réda Dehak]]^^4^^, [[Leibny Paola García-Perera|AUTHOR Leibny Paola García-Perera]]^^1^^, [[Daniel Povey|AUTHOR Daniel Povey]]^^1^^, [[Pedro A. Torres-Carrasquillo|AUTHOR Pedro A. Torres-Carrasquillo]]^^2^^, [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]^^1^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^MIT Lincoln Laboratory, USA; ^^3^^MIT, USA; ^^4^^EPITA-LSE, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1488–1492&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a condensed description of the joint effort of JHU-CLSP, JHU-HLTCOE, MIT-LL., MIT CSAIL and LSE-EPITA for NIST SRE18. All the developed systems consisted of x-vector/i-vector embeddings with some flavor of PLDA backend. Very deep x-vector architectures — Extended and Factorized TDNN, and ResNets — clearly outperformed shallower x-vectors and i-vectors. The systems were tailored to the video (VAST) or to the telephone (CMN2) condition. The VAST data was challenging, yielding 4 times worse performance than other video based datasets like Speakers in the Wild. We were able to calibrate the VAST data with very few development trials by using careful adaptation and score normalization methods. The VAST primary fusion yielded EER=10.18% and Cprimary=0.431. By improving calibration in post-eval, we reached Cprimary=0.369. In CMN2, we used unsupervised SPLDA adaptation based on agglomerative clustering and score normalization to correct the domain shift between English and Tunisian Arabic models. The CMN2 primary fusion yielded EER=4.5% and Cprimary=0.313. Extended TDNN x-vector was the best single system obtaining EER=11.1% and Cprimary=0.452 in VAST; and 4.95% and 0.354 in CMN2.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]], [[David Snyder|AUTHOR David Snyder]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Alan McCree|AUTHOR Alan McCree]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1493–1496&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State-of-the-art text-independent speaker recognition systems for long recordings (a few minutes) are based on deep neural network (DNN) speaker embeddings. Current implementations of this paradigm use short speech segments (a few seconds) to train the DNN. This introduces a mismatch between training and inference when extracting embeddings for long duration recordings. To address this, we present a DNN refinement approach that updates a subset of the DNN parameters with full recordings to reduce this mismatch. At the same time, we also modify the DNN architecture to produce embeddings optimized for cosine distance scoring. This is accomplished using a large-margin strategy with angular softmax. Experimental validation shows that our approach is capable of producing embeddings that achieve record performance on the SITW benchmark.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^1^^, [[Ville Hautamäki|AUTHOR Ville Hautamäki]]^^2^^, [[Tomi H. Kinnunen|AUTHOR Tomi H. Kinnunen]]^^2^^, [[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]]^^1^^, [[Koji Okabe|AUTHOR Koji Okabe]]^^1^^, [[Ville Vestman|AUTHOR Ville Vestman]]^^1^^, [[Jing Huang|AUTHOR Jing Huang]]^^3^^, [[Guohong Ding|AUTHOR Guohong Ding]]^^4^^, [[Hanwu Sun|AUTHOR Hanwu Sun]]^^5^^, [[Anthony Larcher|AUTHOR Anthony Larcher]]^^6^^, [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]]^^7^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^7^^, [[Mickael Rouvier|AUTHOR Mickael Rouvier]]^^8^^, [[Pierre-Michel Bousquet|AUTHOR Pierre-Michel Bousquet]]^^8^^, [[Wei Rao|AUTHOR Wei Rao]]^^9^^, [[Qing Wang|AUTHOR Qing Wang]]^^10^^, [[Chunlei Zhang|AUTHOR Chunlei Zhang]]^^11^^, [[Fahimeh Bahmaninezhad|AUTHOR Fahimeh Bahmaninezhad]]^^11^^, [[Héctor Delgado|AUTHOR Héctor Delgado]]^^12^^, [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]^^12^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NEC, Japan; ^^2^^University of Eastern Finland, Finland; ^^3^^JD.com, USA; ^^4^^JD.com, China; ^^5^^A*STAR, Singapore; ^^6^^LIUM (EA 4023), France; ^^7^^NUS, Singapore; ^^8^^LIA (EA 4128), France; ^^9^^NTU, Singapore; ^^10^^Northwestern Polytechnical University, China; ^^11^^University of Texas at Dallas, USA; ^^12^^EURECOM, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1497–1501&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The I4U consortium was established to facilitate a joint entry to NIST speaker recognition evaluations (SRE). The latest edition of such joint submission was in SRE 2018, in which the I4U submission was among the best-performing systems. SRE’18 also marks the 10-year anniversary of I4U consortium into NIST SRE series of evaluation. The primary objective of the current paper is to summarize the results and lessons learned based on the twelve sub-systems and their fusion submitted to SRE’18. It is also our intention to present a shared view on the advancements, progresses, and major paradigm shifts that we have witnessed as an SRE participant in the past decade from SRE’08 to SRE’18. In this regard, we have seen, among others, a paradigm shift from supervector representation to deep speaker embedding, and a switch of research challenge from channel compensation to domain adaptation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Elie Khoury|AUTHOR Elie Khoury]], [[Khaled Lakhdhar|AUTHOR Khaled Lakhdhar]], [[Andrew Vaughan|AUTHOR Andrew Vaughan]], [[Ganesh Sivaraman|AUTHOR Ganesh Sivaraman]], [[Parav Nagarsheth|AUTHOR Parav Nagarsheth]]
</p><p class="cpabstractcardaffiliationlist">Pindrop, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1502–1505&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper summarizes Pindrop Labs’ submission to the multi-target speaker detection and identification challenge evaluation (MCE 2018). The MCE challenge is geared towards detecting blacklisted speakers (fraudsters) in the context of call centers. Particularly, it aims to answer the following two questions: Is the speaker of the test utterance on the blacklist? If so, which speaker is it among the blacklisted speakers? While one single system can answer both questions, this work looks at them as two separate tasks: blacklist detection and closed-set identification. The former is addressed using four different systems including probabilistic linear discriminant analysis (PLDA), two deep neural network (DNN) based systems, and a simple system based on cosine similarity and logistic regression. The latter is addressed by combining PLDA and neural network based systems. The proposed system was the best performing system at the challenge on both tasks, reducing the blacklist detection error (Top-S EER) by 31.9% and the identification error (Top-1 EER) by 46.4% over the MCE baseline on the evaluation data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]], [[David Snyder|AUTHOR David Snyder]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Alan McCree|AUTHOR Alan McCree]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1506–1510&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we introduce a speaker recognition benchmark derived from the publicly-available CHiME-5 corpus. Our goal is to foster research that tackles the challenging artifacts introduced by far-field multi-speaker recordings of naturally occurring spoken interactions. The benchmark comprises four tasks that involve enrollment and test conditions with single-speaker and/or multi-speaker recordings. Additionally, it supports performance comparisons between close-talking vs distant/far-field microphone recordings, and single-microphone vs microphone-array approaches. We validate the evaluation design with a single-microphone state-of-the-art DNN speaker recognition and diarization system (that we are making publicly available). The results show that the proposed tasks are very challenging, and can be used to quantify the performance gap due to the degradations present in far-field multi-speaker recordings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David Ayllón|AUTHOR David Ayllón]], [[Héctor A. Sánchez-Hevia|AUTHOR Héctor A. Sánchez-Hevia]], [[Carol Figueroa|AUTHOR Carol Figueroa]], [[Pierre Lanchantin|AUTHOR Pierre Lanchantin]]
</p><p class="cpabstractcardaffiliationlist">ObEN, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1511–1515&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/3104" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>The quality of the voices synthesized by a Text-to-Speech (TTS) system depends on the quality of the training data. In real case scenario of TTS personalization from user’s voice recordings, the latter are usually affected by noise and reverberation. Speech enhancement can be useful to clean the corrupted speech but it is necessary to understand the effects that noise and reverberation have on the different statistical models that compose the TTS system. In this work we perform a thorough study of how noise and reverberation impact the acoustic and duration models of the TTS system. We also evaluate the effectiveness of time-frequency masking for cleaning the training data. Objective and subjective evaluations reveal that under normal recording scenarios noise leads to a higher degradation than reverberation in terms of naturalness of the synthesized speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ioannis K. Douros|AUTHOR Ioannis K. Douros]]^^1^^, [[Jacques Felblinger|AUTHOR Jacques Felblinger]]^^2^^, [[Jens Frahm|AUTHOR Jens Frahm]]^^3^^, [[Karyna Isaieva|AUTHOR Karyna Isaieva]]^^2^^, [[Arun A. Joseph|AUTHOR Arun A. Joseph]]^^3^^, [[Yves Laprie|AUTHOR Yves Laprie]]^^1^^, [[Freddy Odille|AUTHOR Freddy Odille]]^^2^^, [[Anastasiia Tsukanova|AUTHOR Anastasiia Tsukanova]]^^1^^, [[Dirk Voit|AUTHOR Dirk Voit]]^^3^^, [[Pierre-André Vuissoz|AUTHOR Pierre-André Vuissoz]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Loria (UMR 7503), France; ^^2^^IADI (Inserm U1254), France; ^^3^^MPI for Biophysical Chemistry, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1556–1560&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work we describe the creation of ArtSpeechMRIfr: a real-time as well as static magnetic resonance imaging (rtMRI, 3D MRI) database of the vocal tract. The database contains also processed data: denoised audio, its phonetically aligned annotation, articulatory contours, and vocal tract volume information, which provides a rich resource for speech research. The database is built on data from two male speakers of French.

It covers a number of phonetic contexts in the controlled part, as well as spontaneous speech, 3D MRI scans of sustained vocalic articulations, and of the dental casts of the subjects. The corpus for rtMRI consists of 79 synthetic sentences constructed from a phonetized dictionary that makes possible to shorten the duration of acquisitions while keeping a very good coverage of the phonetic contexts which exist in French. The 3D MRI includes acquisitions for 12 French vowels and 10 consonants, each of which was pronounced in several vocalic contexts. Articulatory contours (tongue, jaw, epiglottis, larynx, velum, lips) as well as 3D volumes were manually drawn for a part of the images.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jia-Xiang Chen|AUTHOR Jia-Xiang Chen]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1561–1565&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Identifying speakers in novels aims at determining who says a quote in a given context by text analysis. This task is important for speech synthesis systems to assign appropriate voices to the quotes when producing audio books. Several English datasets have been constructed for this task. However, the difference between English and Chinese impedes processing Chinese novels using the models built on English datasets directly. Therefore, this paper presents a Chinese dataset, which contains 2,548 quotes from  World of Plainness, a famous Chinese novel, with manually labelled speaker identities. Furthermore, two baseline speaker identification methods, i.e., a rule-based one and a classifier-based one, are designed and experimented using this Chinese dataset. These two methods achieve accuracies of 53.77% and 58.66% respectively on the test set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kyubyong Park|AUTHOR Kyubyong Park]]^^1^^, [[Thomas Mulc|AUTHOR Thomas Mulc]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Kakao Brain, Korea; ^^2^^Expedia Group, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1566–1570&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe our development of CSS10, a collection of single speaker speech datasets for ten languages. It is composed of short audio clips from LibriVox audiobooks and their aligned texts. To validate its quality we train two neural text-to-speech models on each dataset. Subsequently, we conduct Mean Opinion Score tests on the synthesized speech samples. We make our datasets, pre-trained models, and test resources publicly available. We hope they will be used for future speech tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[F.-Y. Kuo|AUTHOR F.-Y. Kuo]], [[I.C. Ouyang|AUTHOR I.C. Ouyang]], [[S. Aryal|AUTHOR S. Aryal]], [[Pierre Lanchantin|AUTHOR Pierre Lanchantin]]
</p><p class="cpabstractcardaffiliationlist">ObEN, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1516–1520&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work investigates different selection and training schemes to improve the naturalness of synthesized text-to-speech voices built on found data. The approach outlined in this paper examines the combinations of different metrics to detect and reject segments of training data that can degrade the performance of the system. We conducted a series of objective and subjective experiments on two 24-hour single-speaker corpuses of found data collected from diverse sources. We show that using an even smaller, yet carefully selected, set of data can lead to a text-to-speech system able to generate more natural speech than a system trained on the complete dataset. Moreover, we show that training the system by fine-tuning from the system trained on the whole dataset leads to additional improvement in naturalness by allowing a more aggressive selection of training data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David A. Braude|AUTHOR David A. Braude]]^^1^^, [[Matthew P. Aylett|AUTHOR Matthew P. Aylett]]^^1^^, [[Caoimhín Laoide-Kemp|AUTHOR Caoimhín Laoide-Kemp]]^^1^^, [[Simone Ashby|AUTHOR Simone Ashby]]^^2^^, [[Kristen M. Scott|AUTHOR Kristen M. Scott]]^^2^^, [[Brian Ó Raghallaigh|AUTHOR Brian Ó Raghallaigh]]^^3^^, [[Anna Braudo|AUTHOR Anna Braudo]]^^4^^, [[Alex Brouwer|AUTHOR Alex Brouwer]]^^4^^, [[Adriana Stan|AUTHOR Adriana Stan]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CereProc, UK; ^^2^^Universidade da Madeira, Portugal; ^^3^^Dublin City University, Ireland; ^^4^^University of Edinburgh, UK; ^^5^^Technical University of Cluj-Napoca, Romania</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1521–1525&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ongoing focus in speech technology research on machine learning based approaches leaves the community hungry for data. However, datasets tend to be recorded once and then released, sometimes behind registration requirements or paywalls. In this paper we describe our Living Audio Dataset. The aim is to provide audio data that is in the public domain, multilingual, and expandable by communities. We discuss the role of linguistic resources, given the success of systems such as Tacotron which use direct text-to-speech mappings, and consider how data provenance could be built into such resources. So far the data has been collected for TTS purposes, however, it is also suitable for ASR. At the time of publication audio resources already exist for Dutch, R.P. English, Irish, and Russian.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Heiga Zen|AUTHOR Heiga Zen]]^^1^^, [[Viet Dang|AUTHOR Viet Dang]]^^2^^, [[Rob Clark|AUTHOR Rob Clark]]^^2^^, [[Yu Zhang|AUTHOR Yu Zhang]]^^3^^, [[Ron J. Weiss|AUTHOR Ron J. Weiss]]^^3^^, [[Ye Jia|AUTHOR Ye Jia]]^^3^^, [[Zhifeng Chen|AUTHOR Zhifeng Chen]]^^3^^, [[Yonghui Wu|AUTHOR Yonghui Wu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, Japan; ^^2^^Google, UK; ^^3^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1526–1530&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a new speech corpus called “LibriTTS” designed for text-to-speech use. It is derived from the original audio and text materials of the LibriSpeech corpus, which has been used for training and evaluating automatic speech recognition systems. The new corpus inherits desired properties of the LibriSpeech corpus while addressing a number of issues which make LibriSpeech less than ideal for text-to-speech work. The released corpus consists of 585 hours of speech data at 24kHz sampling rate from 2,456 speakers and the corresponding texts. Experimental results show that neural end-to-end TTS models trained from the LibriTTS corpus achieved above 4.0 in mean opinion scores in naturalness in five out of six evaluation speakers. The corpus is freely available for download from http://www.openslr.org/60/.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meysam Shamsi|AUTHOR Meysam Shamsi]], [[Damien Lolive|AUTHOR Damien Lolive]], [[Nelly Barbot|AUTHOR Nelly Barbot]], [[Jonathan Chevelu|AUTHOR Jonathan Chevelu]]
</p><p class="cpabstractcardaffiliationlist">IRISA (UMR 6074), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1531–1535&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we propose an approach for script selection in order to design TTS speech corpora. A Deep Convolutional Neural Network (DCNN) is used to project linguistic information to an embedding space. The embedded representation of the corpus is then fed to a selection process to extract a subset of utterances which offers a good linguistic coverage while tending to limit the linguistic unit repetition. We present two selection processes: a clustering approach based on utterance distance and another method that tends to reach a target distribution of linguistic events. We compare the synthetic signal quality of the proposed methods to state of art methods objectively and subjectively. The subjective and objective measures confirm the performance of the proposed methods in order to design speech corpora with better synthetic speech quality. The perceptual test shows that our TTS global cost can be used as an alternative to synthetic overall quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nobukatsu Hojo|AUTHOR Nobukatsu Hojo]], [[Noboru Miyazaki|AUTHOR Noboru Miyazaki]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1536–1540&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Text-to-speech (TTS) synthesis systems have been evaluated with respect to attributes such as quality, naturalness and intelligibility. However, an evaluation protocol with respect to communication of intentions has not yet been established. Evaluating this sometimes produce unreliable results because participants can misinterpret definitions of intentions. This misinterpretation is caused by the colloquial and implicit description of intentions. To address this problem, this work explicitly defines each intention following theoretical definitions, “felicity conditions”, in speech-act theory. We define the communication of each intention with one to four necessary and sufficient conditions to be satisfied. In listening tests, participants rated whether each condition was satisfied or not. We compared the proposed protocol with the conventional baseline using four different voice conditions; neutral TTS, conversational TTS w/ and w/o intention inputs, and recorded speech. The experimental results with 10 participants showed that the proposed protocol produced smaller within-group variation and larger between-group variation. These results indicate that the proposed protocol can be used to evaluate intention communication with higher inter-rater reliability and sensitivity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chen-Chou Lo|AUTHOR Chen-Chou Lo]]^^1^^, [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]]^^2^^, [[Wen-Chin Huang|AUTHOR Wen-Chin Huang]]^^1^^, [[Xin Wang|AUTHOR Xin Wang]]^^3^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^3^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica, Taiwan; ^^2^^Academia Sinica, Taiwan; ^^3^^NII, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1541–1545&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Existing objective evaluation metrics for voice conversion (VC) are not always correlated with human perception. Therefore, training VC models with such criteria may not effectively improve naturalness and similarity of converted speech. In this paper, we propose deep learning-based assessment models to predict human ratings of converted speech. We adopt the convolutional and recurrent neural network models to build a mean opinion score (MOS) predictor, termed as MOSNet. The proposed models are tested on large-scale listening test results of the Voice Conversion Challenge (VCC) 2018. Experimental results show that the predicted scores of the proposed MOSNet are highly correlated with human MOS ratings at the system level while being fairly correlated with human MOS ratings at the utterance level. Meanwhile, we have modified MOSNet to predict the similarity scores, and the preliminary results show that the predicted scores are also fairly correlated with human ratings. These results confirm that the proposed models could be used as a computational evaluator to measure the MOS of VC systems to reduce the need for expensive human rating.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jason Fong|AUTHOR Jason Fong]], [[Pilar Oplustil Gallegos|AUTHOR Pilar Oplustil Gallegos]], [[Zack Hodari|AUTHOR Zack Hodari]], [[Simon King|AUTHOR Simon King]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1546–1550&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequence-to-sequence (S2S) text-to-speech (TTS) models can synthesise high quality speech when large amounts of annotated training data are available. Transcription errors exist in all data and are especially prevalent in found data such as audiobooks. In previous generations of TTS technology, alignment using Hidden Markov Models (HMMs) was widely used to identify and eliminate bad data. In S2S models, the use of attention replaces HMM-based alignment, and there is no explicit mechanism for removing bad data. It is not yet understood how such models deal with transcription errors in the training data.

We evaluate the quality of speech from S2S-TTS models when trained on data with imperfect transcripts, simulated using corruption, or provided by an Automatic Speech Recogniser (ASR).We find that attention can skip over extraneous words in the input sequence, providing robustness to insertion errors. But substitutions and deletions pose a problem because there is no ground truth input available to align to the ground truth acoustics during teacher-forced training. We conclude that S2S-TTS systems are only partially robust to training on imperfectly-transcribed data and further work is needed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Avashna Govender|AUTHOR Avashna Govender]]^^1^^, [[Anita E. Wagner|AUTHOR Anita E. Wagner]]^^2^^, [[Simon King|AUTHOR Simon King]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Edinburgh, UK; ^^2^^Rijksuniversiteit Groningen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1551–1555&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With increased use of text-to-speech (TTS) systems in real-world applications, evaluating how such systems influence the human cognitive processing system becomes important. Particularly in situations where cognitive load is high, there may be negative implications such as fatigue. For example, noisy situations generally require the listener to exert increased mental effort. A better understanding of this could eventually suggest new ways of generating synthetic speech that demands low cognitive load. In our previous study, pupil dilation was used as an index of cognitive effort. Pupil dilation was shown to be sensitive to the quality of synthetic speech, but there were some uncertainties regarding exactly what was being measured. The current study resolves some of those uncertainties. Additionally, we investigate how the pupil dilates when listening to synthetic speech in the presence of speech-shaped noise. Our results show that, in quiet listening conditions, pupil dilation does not reflect listening effort but rather attention and engagement. In noisy conditions, increased pupil dilation indicates that listening effort increases as signal-to-noise ratio decreases, under all conditions tested.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ievgen Karaulov|AUTHOR Ievgen Karaulov]], [[Dmytro Tkanov|AUTHOR Dmytro Tkanov]]
</p><p class="cpabstractcardaffiliationlist">SciForce, Ukraine</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1571–1575&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Articulatory distinctive features, as well as phonetic transcription, play important role in speech-related tasks: computer-assisted pronunciation training, text-to-speech conversion (TTS), studying speech production mechanisms, speech recognition for low-resourced languages. End-to-end approaches to speech-related tasks got a lot of traction in recent years. We apply Listen, Attend and Spell (LAS) [1] architecture to phones recognition on a small small training set, like TIMIT [2]. Also, we introduce a novel decoding technique that allows to train manners and places of articulation detectors end-to-end using attention models. We also explore joint phones recognition and articulatory features detection in multitask learning setting.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gakuto Kurata|AUTHOR Gakuto Kurata]]^^1^^, [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, Japan; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1616–1620&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Conventional automatic speech recognition (ASR) systems trained from frame-level alignments can easily leverage posterior fusion to improve ASR accuracy and build a better single model with knowledge distillation. End-to-end ASR systems trained using the Connectionist Temporal Classification (CTC) loss do not require frame-level alignment and hence simplify model training. However, sparse and arbitrary posterior spike timings from CTC models pose a new set of challenges in posterior fusion from multiple models and knowledge distillation between CTC models. We propose a method to train a CTC model so that its spike timings are guided to align with those of a pre-trained  guiding CTC model. As a result, all models that share the same guiding model have aligned spike timings. We show the advantage of our method in various scenarios including posterior fusion of CTC models and knowledge distillation between CTC models with different architectures. With the 300-hour Switchboard training data, the single word CTC model distilled from multiple models improved the word error rates to 13.7%/23.1% from 14.9%/24.1% on the Hub5 2000 Switchboard/CallHome test sets without using any data augmentation, language model, or complex decoder.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takashi Fukuda|AUTHOR Takashi Fukuda]], [[Masayuki Suzuki|AUTHOR Masayuki Suzuki]], [[Gakuto Kurata|AUTHOR Gakuto Kurata]]
</p><p class="cpabstractcardaffiliationlist">IBM, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1621–1625&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a method to create a robust acoustic model by directly fusing multiple neural networks that have dissimilar characteristics without any additional layers/nodes involving retraining procedures. The fused neural networks derive from a shared parent neural network and are referred to as cognate (child) neural networks in this paper. The neural networks are fused by interpolating weight and bias parameters associated with each neuron with a different fusion weight, assuming that cognate neural networks to be fused have the same topology. Therefore, no extra computational cost during decoding is required. The fusion weight is determined by considering a cosine similarity estimated from parameters connecting to the neuron and the fusion is performed for every neuron. Experiments were carried out using a test suite consisting of various acoustic conditions with a wide SNR range, speakers including foreign accented speakers, and speaking styles. From the experiments, the network created by fusing cognate neural networks showed consistent improvement on average compared with the commercial-grade domain-free network originating from the parent model. In addition, we demonstrate that the fusion considering input connections to the neuron achieves the highest accuracy in our experiments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pranav Ladkat|AUTHOR Pranav Ladkat]], [[Oleg Rybakov|AUTHOR Oleg Rybakov]], [[Radhika Arava|AUTHOR Radhika Arava]], [[Sree Hari Krishnan Parthasarathi|AUTHOR Sree Hari Krishnan Parthasarathi]], [[I-Fan Chen|AUTHOR I-Fan Chen]], [[Nikko Strom|AUTHOR Nikko Strom]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1626–1630&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a hybrid approach for scaling distributed training of neural networks by combining Gradient Threshold Compression (GTC) algorithm — a variant of stochastic gradient descent (SGD) — which compresses gradients with thresholding and quantization techniques and Blockwise Model Update Filtering (BMUF) algorithm — a variant of model averaging (MA). In this proposed method, we divide total number of workers into smaller subgroups in a hierarchical manner and limit frequent communication across subgroups. We update local model using GTC within a subgroup and global model using BMUF across different subgroups. We evaluate this approach in an Automatic Speech Recognition (ASR) task, by training deep long short-term memory (LSTM) acoustic models on 2000 hours of speech. Experiments show that, for a wide range in the number of GPUs used for distributed training, the proposed approach achieves a better trade-off between accuracy and scalability compared to GTC and BMUF.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pin-Tuan Huang|AUTHOR Pin-Tuan Huang]]^^1^^, [[Hung-Shin Lee|AUTHOR Hung-Shin Lee]]^^1^^, [[Syu-Siang Wang|AUTHOR Syu-Siang Wang]]^^1^^, [[Kuan-Yu Chen|AUTHOR Kuan-Yu Chen]]^^2^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica, Taiwan; ^^2^^Taiwan Tech, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1631–1635&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Discriminative autoencoders (DcAEs) have been proven to improve generalization of the learned acoustic models by increasing their reconstruction capacity of input features from the frame embeddings. In this paper, we integrate DcAEs into two models, namely TDNNs and LSTMs, which have been commonly adopted in the Kaldi recipes for LVCSR in recent years, using the modified nnet3 neural network library. We also explore two kinds of skip-connection mechanisms for DcAEs, namely concatenation and addition. The results of LVCSR experiments on the MATBN Mandarin Chinese corpus and the WSJ English corpus show that the proposed DcAE-TDNN-based system achieves relative word error rate reductions of 3% and 10% over the TDNN-based baseline system, respectively. The DcAE-TDNN-LSTM-based system also outperforms the TDNN-LSTM-based baseline system. The results imply the flexibility of DcAEs to be integrated with other existing or prospective neural network-based acoustic models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gakuto Kurata|AUTHOR Gakuto Kurata]]^^1^^, [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, Japan; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1636–1640&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a multi-task Connectionist Temporal Classification (CTC) training for end-to-end (E2E) automatic speech recognition with input feature reconstruction as an auxiliary task. Whereas the main task of E2E CTC training and the auxiliary reconstruction task share the encoder network, the auxiliary task tries to reconstruct the input feature from the encoded information. In addition to standard feature reconstruction, we distort the input feature only in the auxiliary reconstruction task, such as (1) swapping the former and latter parts of an utterance, or (2) using a part of an utterance by stripping the beginning or end parts. These distortions intentionally suppress long-span dependencies in the time domain, which avoids overfitting to the training data. We trained phone-based CTC and word-based CTC models with the proposed multi-task learning and demonstrated that it improves ASR accuracy on various test sets that are matched and unmatched with the training data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohan Li|AUTHOR Mohan Li]], [[Yuanjiang Cao|AUTHOR Yuanjiang Cao]], [[Weicong Zhou|AUTHOR Weicong Zhou]], [[Min Liu|AUTHOR Min Liu]]
</p><p class="cpabstractcardaffiliationlist">Toshiba China R&D Center, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1641–1645&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recurrent neural networks (RNNs) trained with connectionist temporal classification (CTC) technique have delivered promising results in many speech recognition tasks. However, the forward-backward algorithm that CTC takes for model optimization requires a huge amount of computation. This paper introduces a new training method towards RNN-based end-to-end models, which significantly saves computing power without losing accuracy. Unlike CTC, the label sequence is aligned to the labelling hypothesis and then to the input sequence by the Weighted Minimum Edit-Distance Aligning (WMEDA) algorithm. Based on the alignment, the framewise supervised training is conducted. Moreover, Pronunciation Embedding (PE), the acoustic representation towards a linguistic target, is proposed in order to calculate the weights in WMEDA algorithm. The model is evaluated on TIMIT and AIShell-1 datasets for English phoneme and Chinese character recognitions. For TIMIT, the model achieves a comparable 18.57% PER to the 18.4% PER of the CTC baseline. As for AIShell-1, a joint Pinyin-character model is trained, giving a 19.38% CER, which is slightly better than the 19.43% CER obtained by the CTC character model, and the training time of this model is only 54.3% of the CTC model’s.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sibo Tong|AUTHOR Sibo Tong]], [[Apoorv Vyas|AUTHOR Apoorv Vyas]], [[Philip N. Garner|AUTHOR Philip N. Garner]], [[Hervé Bourlard|AUTHOR Hervé Bourlard]]
</p><p class="cpabstractcardaffiliationlist">Idiap Research Institute, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1576–1580&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The lattice-free MMI objective (LF-MMI) with finite-state transducer (FST) supervision lattice has been used in semi-supervised training of state-of-the-art neural network acoustic models for automatic speech recognition (ASR). However, the FST based supervision lattice does not sample from the posterior predictive distribution of word-sequences but only contains the decoding hypotheses corresponding to the Maximum Likelihood estimate of weights, so that the training might be biased towards incorrect hypotheses in the supervision lattice even if the best path is perfectly correct. In this paper, we propose a novel framework which uses Dropout at the test time to sample from the posterior predictive distribution of word-sequences to produce unbiased supervision lattices for semi-supervised training. We investigate the dropout sampling from both the acoustic model and the language model to generate supervision. Results on Fisher English show that the proposed approach achieves WER recovery of ~51.6% over regular semi-supervised LF-MMI training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[Michael Picheny|AUTHOR Michael Picheny]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1581–1585&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Evolutionary stochastic gradient descent (ESGD) was proposed as a population-based approach that combines the merits of gradient-aware and gradient-free optimization algorithms for superior overall optimization performance. In this paper we investigate a variant of ESGD for optimization of acoustic models for automatic speech recognition (ASR). In this variant, we assume the existence of a well-trained acoustic model and use it as an anchor in the parent population whose good “gene” will prorogate in the evolution to the offsprings. We propose an ESGD algorithm leveraging the anchor models such that it guarantees the best fitness of the population will never degrade from the anchor model. Experiments on 50-hour Broadcast News (BN50) and 300-hour Switchboard (SWB300) show that the ESGD with anchors can further improve the loss and ASR performance over the existing well-trained acoustic models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nirmesh J. Shah|AUTHOR Nirmesh J. Shah]]^^1^^, [[Hardik B. Sailor|AUTHOR Hardik B. Sailor]]^^2^^, [[Hemant A. Patil|AUTHOR Hemant A. Patil]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^DA-IICT, India; ^^2^^University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1586–1590&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, Deep Neural Network (DNN)-based Voice Conversion (VC) techniques have become popular in the VC literature. These techniques suffer from the issue of overfitting due to less amount of available training data from a target speaker. To alleviate this, pre-training is used for better initialization of the DNN parameters, which leads to faster convergence of parameters. Greedy layerwise pre-training of the stacked Restricted Boltzmann Machine (RBM) or the stacked De-noising AutoEncoder (DAE) is used with extra available speaker-pairs‘ data. This pre-training is time-consuming and requires a separate network to learn the parameters of the network. In this work, we propose to analyze the DNN training strategies for the VC task, specifically with and without pre-training. In particular, we investigate whether an extra pre-training step could be avoided by using recent advances in deep learning. The VC experiments were performed on two VC Challenge (VCC) databases 2016 and 2018. Objective and subjective tests show that DNN trained with Adam optimization and Exponential Linear Unit (ELU) performed comparable or better than the pre-trained DNN without compromising on speech quality and speaker similarity of the converted voices.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohit Goyal|AUTHOR Mohit Goyal]], [[Varun Srivastava|AUTHOR Varun Srivastava]], [[Prathosh A. P.|AUTHOR Prathosh A. P.]]
</p><p class="cpabstractcardaffiliationlist">IIT Delhi, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1591–1595&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Glottal Closure Instants (GCIs) correspond to the temporal locations of significant excitation to the vocal tract occurring during the production of voiced speech. GCI detection from speech signals is a well-studied problem given its importance in speech processing. Most of the existing approaches for GCI detection adopt a two-stage approach (i) Transformation of speech signal into a representative signal where GCIs are localized better, (ii) extraction of GCIs using the representative signal obtained in first stage. The former stage is accomplished using signal processing techniques based on the principles of speech production and the latter with heuristic-algorithms such as dynamic-programming and peak-picking. These methods are thus task-specific and rely on the methods used for representative signal extraction. However in this paper, we formulate the GCI detection problem from a representation learning perspective where appropriate representation is implicitly learned from the raw-speech data samples. Specifically, GCI detection is cast as a supervised multi-task learning problem solved using a deep convolutional neural network jointly optimizing a classification and regression cost. The learning capability is demonstrated with several experiments on standard datasets. The results compare well with the state-of- the-art algorithms while performing better in the case of presence of real-world non-stationary noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Joachim Fainberg|AUTHOR Joachim Fainberg]], [[Ondřej Klejch|AUTHOR Ondřej Klejch]], [[Steve Renals|AUTHOR Steve Renals]], [[Peter Bell|AUTHOR Peter Bell]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1596–1600&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the broadcast domain there is an abundance of related text data and partial transcriptions, such as closed captions and subtitles. This text data can be used for lightly supervised training, in which text matching the audio is selected using an existing speech recognition model. Current approaches to light supervision typically filter the data based on matching error rates between the transcriptions and biased decoding hypotheses. In contrast, semi-supervised training does not require matching text data, instead generating a hypothesis using a background language model. State-of-the-art semi-supervised training uses lattice-based supervision with the lattice-free MMI (LF-MMI) objective function. We propose a technique to combine inaccurate transcriptions with the lattices generated for semi-supervised training, thus preserving uncertainty in the lattice where appropriate. We demonstrate that this combined approach reduces the expected error rates over the lattices, and reduces the word error rate (WER) on a broadcast task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wilfried Michel|AUTHOR Wilfried Michel]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1601–1605&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sequence discriminative training criteria have long been a standard tool in automatic speech recognition for improving the performance of acoustic models over their maximum likelihood / cross entropy trained counterparts. While previously a lattice approximation of the search space has been necessary to reduce computational complexity, recently proposed methods use other approximations to dispense of the need for the computationally expensive step of separate lattice creation.

In this work we present a memory efficient implementation of the forward-backward computation that allows us to use unigram word-level language models in the denominator calculation while still doing a full summation on GPU. This allows for a direct comparison of lattice-based and lattice-free sequence discriminative training criteria such as MMI and sMBR, both using the same language model during training.

We compared performance, speed of convergence, and stability on large vocabulary continuous speech recognition tasks like Switchboard and Quaero. We found that silence modeling seriously impacts the performance in the lattice-free case and needs special treatment. In our experiments lattice-free MMI comes on par with its lattice-based counterpart. Lattice-based sMBR still outperforms all lattice-free training criteria.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryo Masumura|AUTHOR Ryo Masumura]], [[Hiroshi Sato|AUTHOR Hiroshi Sato]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Yusuke Ijima|AUTHOR Yusuke Ijima]], [[Takanobu Oba|AUTHOR Takanobu Oba]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1606–1610&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel end-to-end automatic speech recognition (ASR) method that considers whether an input speech can be reconstructed from a generated text or not. A speech-to-text encoder-decoder model is one of the most powerful end-to-end ASR methods since it does not make any conditional independence assumptions. However, encoder-decoder models often suffer from a problem that is caused from a gap between the teacher forcing in a training phase and the free running in a testing phase. In fact, there is no guarantee that texts can be generated correctly when some generation errors occur in conditioning contexts. In order to mitigate this problem, our proposed method utilizes not only a generation probability of the text computed from a speech-to-text encoder-decoder but also a reconstruction probability of the speech computed from a text-to-speech encoder-decoder on the basis of a maximum mutual information criterion. We can expect that considering the reconstruction criterion can impose a constraint against generation errors. In addition, in order to compute the reconstruction probability, we introduce a mixture density network into the text-to-speech encoder-decoder. Our experiments on Japanese lecture ASR tasks demonstrate that considering the reconstruction criterion can yield ASR performance improvements.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abdelwahab Heba|AUTHOR Abdelwahab Heba]]^^1^^, [[Thomas Pellegrini|AUTHOR Thomas Pellegrini]]^^1^^, [[Jean-Pierre Lorré|AUTHOR Jean-Pierre Lorré]]^^2^^, [[Régine Andre-Obrecht|AUTHOR Régine Andre-Obrecht]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IRIT (UMR 5505), France; ^^2^^LINAGORA, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1611–1615&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous work has shown that end-to-end neural-based speech recognition systems can be improved by adding auxiliary tasks at intermediate layers. In this paper, we report multitask learning (MTL) experiments in the context of connectionist temporal classification (CTC) based speech recognition at character level. We compare several MTL architectures that jointly learn to predict characters (sometimes called graphemes) and consonant/vowel (CV) binary labels. The best approach, which we call Char+CV-CTC, adds up the character and CV logits to obtain the final character predictions. The idea is to put more weight on the vowel (consonant) characters when the vowel (consonant) symbol ‘V’ (‘C’) is predicted in the auxiliary-task branch of the network. Experiments were carried out on the Wall Street Journal (WSJ) corpus. Char+CV-CTC achieved the best ASR results with a 2.2% Character Error Rate and a 6.1% Word Error Rate (WER) on the Eval92 evaluation subset. This model outperformed its monotask model counterpart by 0.7% absolute in WER and also achieved almost the same performance of 6.0% as a strong baseline phone-based Time Delay Neural Network (“TDNN-Phone+TR2”) model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Efthymios Georgiou|AUTHOR Efthymios Georgiou]], [[Charilaos Papaioannou|AUTHOR Charilaos Papaioannou]], [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]]
</p><p class="cpabstractcardaffiliationlist">NTUA, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1646–1650&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recognizing the emotional tone in spoken language is a challenging research problem that requires modeling not only the acoustic and textual modalities separately but also their cross-interactions. In this work, we introduce a hierarchical fusion scheme for sentiment analysis of spoken sentences. Two bidirectional Long-Short-Term-Memory networks (BiLSTM), followed by multiple fully connected layers, are trained in order to extract feature representations for each of the textual and audio modalities. The representations of the unimodal encoders are both fused at each layer and propagated forward, thus achieving fusion at the word, sentence and high/sentiment levels. The proposed approach of deep hierarchical fusion achieves state-of-the-art results for sentiment analysis tasks. Through an ablation study, we show that the proposed fusion method achieves greater performance gains over the unimodal baseline compared to other fusion approaches in the literature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andreas Triantafyllopoulos|AUTHOR Andreas Triantafyllopoulos]]^^1^^, [[Gil Keren|AUTHOR Gil Keren]]^^2^^, [[Johannes Wagner|AUTHOR Johannes Wagner]]^^1^^, [[Ingmar Steiner|AUTHOR Ingmar Steiner]]^^1^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^audEERING, Germany; ^^2^^Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1691–1695&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The use of deep learning (DL) architectures for speech enhancement has recently improved the robustness of voice applications under diverse noise conditions. These improvements are usually evaluated based on the perceptual quality of the enhanced audio or on the performance of automatic speech recognition (ASR) systems. We are interested instead in the usefulness of these algorithms in the field of speech emotion recognition (SER), and specifically in whether an enhancement architecture can effectively remove noise while preserving enough information for an SER algorithm to accurately identify emotion in speech. We first show how a scalable DL architecture can be trained to enhance audio signals in a large number of unseen environments, and go on to show how that can benefit common SER pipelines in terms of noise robustness. Our results show that incorporating a speech enhancement architecture is beneficial, especially for low signal-to-noise ratio (SNR) conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhixuan Li|AUTHOR Zhixuan Li]]^^1^^, [[Liang He|AUTHOR Liang He]]^^1^^, [[Jingyang Li|AUTHOR Jingyang Li]]^^2^^, [[Li Wang|AUTHOR Li Wang]]^^2^^, [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Ministry of Public Security, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1696–1700&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition (SER) is a challenging task: the complex emotional expressions make it difficult to discriminate different emotions; the unbalanced data misleads models to give biased predictions. In this work, we tackle these two problems by the angular softmax loss. First, we replace the vanilla softmax with angular softmax to learn emotional representations with strong discriminant power. Besides, inspired by its novel geometric interpretation, we establish a general calculation model and deduce a concise formula of decision domain. Based on these derivations, we propose our solution to data imbalance: class-specific angular softmax by which we can directly adjust decision domains of different emotion classes. Experimental results on the IEMOCAP corpus indicate significant improvements on two state-of-the-art models therefore demonstrate the effectiveness of our proposed methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Md. Asif Jalal|AUTHOR Md. Asif Jalal]]^^1^^, [[Erfan Loweimi|AUTHOR Erfan Loweimi]]^^2^^, [[Roger K. Moore|AUTHOR Roger K. Moore]]^^1^^, [[Thomas Hain|AUTHOR Thomas Hain]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Sheffield, UK; ^^2^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1701–1705&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion recognition from speech plays a significant role in adding emotional intelligence to machines and making human-machine interaction more natural. One of the key challenges from machine learning standpoint is to extract patterns which bear maximum correlation with the emotion information encoded in this signal while being as insensitive as possible to other types of information carried by speech. In this paper, we propose a novel temporal modelling framework for robust emotion classification using bidirectional long short-term memory network (BLSTM), CNN and Capsule networks. The BLSTM deals with the temporal dynamics of the speech signal by effectively representing forward/backward contextual information while the CNN along with the dynamic routing of the Capsule net learn temporal clusters which altogether provide a state-of-the-art technique for classifying the extracted patterns. The proposed approach was compared with a wide range of architectures on the FAU-Aibo and RAVDESS corpora and remarkable gain over state-of-the-art systems were obtained. For FAO-Aibo and RAVDESS 77.6% and 56.2% accuracy was achieved, respectively, which is 3% and 14% (absolute) higher than the best-reported result for the respective tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vikramjit Mitra|AUTHOR Vikramjit Mitra]]^^1^^, [[Sue Booker|AUTHOR Sue Booker]]^^2^^, [[Erik Marchi|AUTHOR Erik Marchi]]^^2^^, [[David Scott Farrar|AUTHOR David Scott Farrar]]^^2^^, [[Ute Dorothea Peitz|AUTHOR Ute Dorothea Peitz]]^^2^^, [[Bridget Cheng|AUTHOR Bridget Cheng]]^^2^^, [[Ermine Teves|AUTHOR Ermine Teves]]^^2^^, [[Anuj Mehta|AUTHOR Anuj Mehta]]^^2^^, [[Devang Naik|AUTHOR Devang Naik]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Apple, USA; ^^2^^Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1651–1655&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Millions of people reach out to digital assistants such as Siri every day, asking for information, making phone calls, seeking assistance, and much more. The expectation is that such assistants should understand the intent of the user’s query. Detecting the intent of a query from a short, isolated utterance is a difficult task. Intent cannot always be obtained from speech-recognized transcriptions. A transcription-driven approach can interpret what has been said but fails to acknowledge how it has been said, and as a consequence, may ignore the expression present in the voice. Our work investigates whether a system can reliably detect vocal expression in queries using acoustic and paralinguistic embedding. Results show that the proposed method offers a relative equal error rate (EER) decrease of 60% compared to a bag-of-word based system, corroborating that expression is significantly represented by vocal attributes, rather than being purely lexical. Addition of emotion embedding helped to reduce the EER by 30% relative to the acoustic embedding, demonstrating the relevance of emotion in expressive voice.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jack Parry|AUTHOR Jack Parry]], [[Dimitri Palaz|AUTHOR Dimitri Palaz]], [[Georgia Clarke|AUTHOR Georgia Clarke]], [[Pauline Lecomte|AUTHOR Pauline Lecomte]], [[Rebecca Mead|AUTHOR Rebecca Mead]], [[Michael Berger|AUTHOR Michael Berger]], [[Gregor Hofer|AUTHOR Gregor Hofer]]
</p><p class="cpabstractcardaffiliationlist">Speech Graphics, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1656–1660&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech Emotion Recognition (SER) is an important and challenging task for human-computer interaction. In the literature deep learning architectures have been shown to yield state-of-the-art performance on this task when the model is trained and evaluated on the same corpus. However, prior work has indicated that such systems often yield poor performance on unseen data. To improve the generalisation capabilities of emotion recognition systems one possible approach is cross-corpus training, which consists of training the model on an aggregation of different corpora. In this paper we present an analysis of the generalisation capability of deep learning models using cross-corpus training with six different speech emotion corpora. We evaluate the models on an unseen corpus and analyse the learned representations using the t-SNE algorithm, showing that architectures based on recurrent neural networks are prone to overfit the corpora present in the training set, while architectures based on convolutional neural networks (CNNs) show better generalisation capabilities. These findings indicate that (1) cross-corpus training is a promising approach for improving generalisation and (2) CNNs should be the architecture of choice for this approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bo Wang|AUTHOR Bo Wang]]^^1^^, [[Maria Liakata|AUTHOR Maria Liakata]]^^2^^, [[Hao Ni|AUTHOR Hao Ni]]^^2^^, [[Terry Lyons|AUTHOR Terry Lyons]]^^1^^, [[Alejo J. Nevado-Holgado|AUTHOR Alejo J. Nevado-Holgado]]^^1^^, [[Kate Saunders|AUTHOR Kate Saunders]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Oxford, UK; ^^2^^Alan Turing Institute, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1661–1665&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech emotion recognition (SER) remains a difficult task within human-computer interaction, despite increasing interest in the research community. One key challenge is how to effectively integrate short-term characterisation of speech segments with long-term information such as temporal variations. Motivated by the numerical approximation theory of stochastic differential equations (SDEs), we propose the novel use of path signatures. The latter provide a pathwise definition to solve SDEs, for the integration of short speech frames. Furthermore we propose a hierarchical tree structure of path signatures, to capture both global and local information. A simple tree-based convolutional neural network (TBCNN) is used for learning the structural information stemming from dyadic path-tree signatures. Our experimental results on a widely used benchmark dataset demonstrate comparable performance to complex neural network based systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Olga Egorow|AUTHOR Olga Egorow]]^^1^^, [[Tarik Mrech|AUTHOR Tarik Mrech]]^^2^^, [[Norman Weißkirchen|AUTHOR Norman Weißkirchen]]^^1^^, [[Andreas Wendemuth|AUTHOR Andreas Wendemuth]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Otto-von-Guericke-Universität Magdeburg, Germany; ^^2^^Fraunhofer IFF, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1666–1670&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The detection of different levels of physical load from speech has many applications: Besides telemedicine, non-contact detection of certain heart rate ranges can be useful for sports and other leisure time devices. Available approaches mainly use a high number of spectral and prosodic features. In this setting of typically small data sets, such as the Talk & Run data set and the Munich Biovoice Corpus, the high-dimensional feature spaces are only sparsely populated. Therefore, we aim at a reduction of the feature number using modern neural net inspired features: Bottleneck layer features, obtained from standard low-level descriptors via a feed-forward neural network, and activation map features, obtained from spectrograms via a convolutional neural network. We use these features for an SVM classification of high and low physical load and compare their performance. We also discuss the possibility of hyperparameter transfer of the extracting networks between different data sets. We show that even for limited amounts of data, deep learning based methods can bring a substantial improvement over “conventional” features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jinming Zhao|AUTHOR Jinming Zhao]], [[Shizhe Chen|AUTHOR Shizhe Chen]], [[Jingjun Liang|AUTHOR Jingjun Liang]], [[Qin Jin|AUTHOR Qin Jin]]
</p><p class="cpabstractcardaffiliationlist">Renmin University of China, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1671–1675&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In dyadic human-human interactions, a more complex interaction scenario, a person’s emotional state can be influenced by both self emotional evolution and the interlocutor’s behaviors. However, previous speech emotion recognition studies infer the speaker’s emotional state mainly based on the targeted speech segment without considering the above two contextual factors. In this paper, we propose an Attentive Interaction Model (AIM) to capture both self- and interlocutor-context to enhance the speech emotion recognition in the dyadic dialog. The model learns to dynamically focus on long-term relevant contexts of the speaker and the interlocutor via the self-attention mechanism and fuse the adaptive context with the present behavior to predict the current emotional state. We carry out extensive experiments on the IEMOCAP corpus for dimensional emotion recognition in arousal and valence. Our model achieves on par performance with baselines for arousal recognition and significantly outperforms baselines for valence recognition, which demonstrates the effectiveness of the model to select useful contexts for emotion recognition in dyadic interactions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shun-Chang Zhong|AUTHOR Shun-Chang Zhong]]^^1^^, [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]]^^1^^, [[Chun-Min Chang|AUTHOR Chun-Min Chang]]^^1^^, [[Yi-Ching Liu|AUTHOR Yi-Ching Liu]]^^2^^, [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Tsing Hua University, Taiwan; ^^2^^National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1676–1680&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Personality has not only been studied at an individual level, its composite effect between team members has also been indicated to be related to the overall group performance. In this work, we propose a Personality Composite-Network (P-CompN) architecture that models the group-level personality composition with its intertwining effect being integrated into the network modeling of team members vocal behaviors in order to predict the group performances during collaborative problem solving tasks. In specific, we evaluate our proposed P-CompN in a large-scale dataset consist of three-person small group interactions. Our framework achieves a promising group performance classification accuracy of 70.0%, which outperforms baseline model of using only vocal behaviors without personality attributes by 14.4% absolutely. Our analysis further indicates that our proposed personality composite network impacts the vocal behavior models more significantly on the high performing groups versus the low performing groups.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gao-Yi Chao|AUTHOR Gao-Yi Chao]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chun-Min Chang|AUTHOR Chun-Min Chang]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1681–1685&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Issues of mismatch between databases remain a major challenge in performing emotion recognition on target unlabeled corpus from labeled source data. While studies have shown that by means of aligning source and target data distribution to learn a common feature space can mitigate these issues partially, they neglect the effect of distortion in emotion semantics across different databases. This distortion is especially crucial when regressing higher level emotion attribute such as valence. In this work, we propose a maximum regression discrepancy (MRD) network, which enforces cross corpus semantic consistency by learning a common acoustic feature space that minimizes discrepancy on those maximally-distorted samples through adversarial training. We evaluate our framework on two large emotion corpus, the USC IEMOCAP and the MSP-IMPROV, for the task of cross corpus valence regression from speech. Our MRD demonstrates a significant 10% and 5% improvement in concordance correlation coefficients (CCC) compared to using baseline source-only methods, and we also show that it outperforms two state-of-art domain adaptation techniques. Further analysis reveals that our model is more effective in reducing semantic distortion on low valence than high valence samples.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[Tan Lee|AUTHOR Tan Lee]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1686–1690&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose to combine the deep learning of feature representation with multiple instance learning (MIL) to recognize emotion from speech. The key idea of our approach is to first consciously classify the emotional state of each segment. Then the utterance-level classification is constructed as an aggregation of the segment-level decisions. For the segment-level classification, we attempt two different deep neural network (DNN) architectures called SegMLP and SegCNN, respectively. SegMLP is a multilayer perceptron (MLP) that extracts high-level feature representation from the manually designed perceptual features, and SegCNN is a convolutional neural network (CNN) that automatically learn emotion-specific features from the log Mel filterbanks. Extensive emotion recognition experiments are carried out on the CASIA corpus and the IEMOCAP database. We find that: (1) the aggregation of segment-level decisions provides richer information than the statistics over the low-level descriptors (LLDs) across the whole utterance; (2) automatic feature learning outperforms manual features. Our experimental results are also compared with those of state-of-the-art methods, further demonstrating the effectiveness of the proposed approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sonia d’Apolito|AUTHOR Sonia d’Apolito]], [[Barbara Gili Fivela|AUTHOR Barbara Gili Fivela]]
</p><p class="cpabstractcardaffiliationlist">Università del Salento, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1706–1710&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the interaction between the characteristics of both L1 and L2 phonetic-phonological systems and how the context, in terms of the amount of information available (less vs more information), may influence the accuracy in producing L2 sounds as well as speech fluency. Specifically, it focuses on how French learners of Italian as L2, representing two different competence levels (lower and higher), realize geminates (non-native sounds) in two different contexts (less and more rich). A rich context is expected to induce lower accuracy. Acoustic data of nine subjects (three beginners, three advanced and three natives as control) were collected and analyzed in order to observe: 1) the realization of geminates (duration of the consonant and preceding vowel as an index of accuracy); and 2) the speech fluency (number and duration of disfluencies; speech/articulation rate). Results suggest that learners’ productions are affected by L1, above all in the case of beginners, who show a lower degree of accuracy. As regards the accuracy and context interaction, results show that the production of geminates is more accurate (longer duration) in poor than in rich context. Further, a higher number of disfluencies is found in rich than in poor context.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xizi Wei|AUTHOR Xizi Wei]]^^1^^, [[Melvyn Hunt|AUTHOR Melvyn Hunt]]^^2^^, [[Adrian Skilling|AUTHOR Adrian Skilling]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Birmingham, UK; ^^2^^Apple, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1751–1755&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A deep neural network (DNN)-based model has been developed to predict non-parametric distributions of durations of phonemes in specified phonetic contexts and used to explore which factors influence durations most. Major factors in US English are pre-pausal lengthening, lexical stress, and speaking rate. The model can be used to check that text-to-speech (TTS) training speech follows the script and words are pronounced as expected. Duration prediction is poorer with training speech for automatic speech recognition (ASR) because the training corpus typically consists of single utterances from many speakers and is often noisy or casually spoken. Low probability durations in ASR training material nevertheless mostly correspond to non-standard speech, with some having disfluencies. Children’s speech is disproportionately present in these utterances, since children show much more variation in timing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Janina Mołczanow|AUTHOR Janina Mołczanow]], [[Beata Łukaszewicz|AUTHOR Beata Łukaszewicz]], [[Anna Łukaszewicz|AUTHOR Anna Łukaszewicz]]
</p><p class="cpabstractcardaffiliationlist">University of Warsaw, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1756–1760&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The paper presents the results of a pilot study investigating the relationship between vowel quality and duration in Ukrainian. In this language, lexical stress is cued by increased duration; smaller but systematic differences in length occur between unstressed, rhythmic stress-bearing, and pretonic syllables. The presence of several degrees of lengthening within one word makes it possible to test the long-established theories of vowel reduction posing a direct link between decreased duration and vowel undershoot. Overall, the analysis of the aggregated data collected from four native speakers of Ukrainian points to a strong correlation between decreasing duration and the undershoot of F1 targets. However, in separate by-position and by-speaker analyses, no correlation between F1 and duration is observed in the positions of rhythmic and lexical stress. We thus conclude that the stability of the F1 target  vis-à-vis temporal parameters may constitute another parameter expressing metrical prominence. In addition, our data suggests that formant undershoot may be affected by an articulatory effort. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Stephanie Berger|AUTHOR Stephanie Berger]]^^1^^, [[Oliver Niebuhr|AUTHOR Oliver Niebuhr]]^^2^^, [[Margaret Zellers|AUTHOR Margaret Zellers]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Christian-Albrechts-Universität zu Kiel, Germany; ^^2^^University of Southern Denmark, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1761–1765&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is a first investigation into the influence of the pitch range and the intensity variation on the number of subscribers, views and likes of YouTube Creators. A total of ten minutes of speech material from five English and five North-American YouTubers was analyzed. The results for pitch range and intensity variation suggest that an increase in both parameters results in higher subscriber counts. For views, there was no influence of pitch range, but an increase in intensity variation results in a lower number of views. Pitch range and intensity variation had no influence on the like count. Furthermore, both origin and gender had an influence on the results. Ultimately, this study will provide further information for the phonetic research of charisma (i.e., the perceived charm, competence, power, and persuasiveness of a speaker), as it is suspected that the acoustic features that have so far been connected to charisma also play an important role in the success of a YouTuber and their channel.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shan Luo|AUTHOR Shan Luo]]
</p><p class="cpabstractcardaffiliationlist">Yangzhou University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1766–1770&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the ultimate goal of understanding the production planning scope, this study manipulates phonetic information (place of articulation and voicing) and measures three acoustic cues to analyze consonant clusters across words produced by English (L1) and Mandarin (L2) speakers. We continue to explore a) how phonetic detail interacts with prosodic boundary in modulating surface realization, and b) the roles of phonetic information in speech planning motor control. The results show that L2 speakers exhibited different acoustic deviations varying with their proficiency level. The group with lower L2 proficiency significantly deviated from the L1 group in release likelihood and closure shortening, while the higher-proficiency group exhibited less nativelike performance in terms of closure durations. The results also discover that all speakers are subject to language-independent articulatory constraint at word boundaries, while language-specific phonetic detail accounts for more nonnative deviations. The core findings highlight a long-distance speech planning scope in native speech, with cross-word phonetic information interacting with prosodic encoding. It is argued that phonology applies blindly across words and is independent of lexical cognitive load.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dina El Zarka|AUTHOR Dina El Zarka]]^^1^^, [[Barbara Schuppler|AUTHOR Barbara Schuppler]]^^2^^, [[Francesco Cangemi|AUTHOR Francesco Cangemi]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Graz, Austria; ^^2^^Technische Universität Graz, Austria; ^^3^^Universität zu Köln, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1771–1775&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigates acoustic cues (duration, scaling and alignment of peaks and valleys) to the prosodic realization of  topics and  narrow subject foci in a declarative SVO sentence in Egyptian Arabic. Morpho-syntactically identical sentences were elicited in appropriately designed contexts from 18 native speakers by means of a question-answer paradigm. The results show that the stressed syllable of a focused word is longer than the stressed syllable of the same word in topic condition. Additionally, the peaks of foci are generally scaled higher than those of topics. These differences clearly point to varying degrees of prosodic prominence. Furthermore, the alignment of the F0 peak and the subsequent low endpoint of a rising-falling tonal contour is earlier in foci than in topics, indicating that focus is signaled by an early sharp fall whereas the falling part of the tonal gesture starts later and is shallower in the case of a topic. Overall, our results suggest that narrow subject foci and topics tend to be associated with different pitch events.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kowovi Comivi Alowonou|AUTHOR Kowovi Comivi Alowonou]]^^1^^, [[Jianguo Wei|AUTHOR Jianguo Wei]]^^1^^, [[Wenhuan Lu|AUTHOR Wenhuan Lu]]^^1^^, [[Zhicheng Liu|AUTHOR Zhicheng Liu]]^^1^^, [[Kiyoshi Honda|AUTHOR Kiyoshi Honda]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^JAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1776–1780&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In order to investigate the difference in Ewe males and Ewe females during the production of Ewe vowels, results from the comparative quantitative and qualitative assessments of tongue shape and movement using ultrasound imaging as well as the comparative evaluation of F1 and F2 frequency values from data collected from 9 Ewe male speakers and 6 Ewe female speakers, were presented in this study. The results showed that vowels are produced with higher formant frequencies by Ewe female speakers compared to Ewe male speakers, except for the vowel /ε/ produced with a lower F1 frequency by Ewe females. The articulatory results showed a higher and more forwarder tongue configuration for Ewe male compared to female counterparts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nisad Jamakovic|AUTHOR Nisad Jamakovic]]^^1^^, [[Robert Fuchs|AUTHOR Robert Fuchs]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^WWU Münster, Germany; ^^2^^Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1711–1715&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Postcolonial varieties of English, used in countries such as Nigeria, the Philippines and India, are influenced by local (“endonormative”) and external (“exonormative”) forces, the latter often in the form of British/American English. In the ensuing stylistic continuum, informal speech is more endonormatively oriented than formal/educated speech — which is, in turn, clearly distinguishable from British/American English. The formal subvariety is often regarded as the incipient local standard and is commonly less marked by L1 influence than the informal subvariety.

Nigerian English (NigE) is the most widely spoken African variety of English, but empirical/quantitative descriptions are rare. In this pilot study, we present an acoustic analysis of eleven phonological monophthongs and two phonological diphthongs that are commonly monophthongised. A total of 811 occurrences, produced in formal contexts by nine educated speakers of NigE with L1 Igbo, was extracted from the ICE Nigeria corpus and analysed acoustically (Lobanov-normalised vowel formants at vowel midpoint).

Results show that the NigE speakers reduced the thirteen vowel system to a total of nine distinct phonemes that closely resembles the L1 Igbo vowel inventory. This result suggests substantial L1 influence even at the level of Formal NigE.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pablo Arantes|AUTHOR Pablo Arantes]]^^1^^, [[Anders Eriksson|AUTHOR Anders Eriksson]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidade Federal de São Paulo, Brazil; ^^2^^Stockholm University, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1716–1720&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we outline a methodology to quantify the degree of similarity between pairs of f,,0,, distributions based on the Anderson-Darling measure that underlies its namesake goodness-of-fit test. The procedure emphasizes differences due to more fine-grained f,,0,, modulations rather than differences in measures of central tendency, such as the mean and median. In order to assess the procedure’s usefulness for speaker comparison, we applied it to a multilingual corpus in which participants contributed speech delivered in three speaking styles. The similarity measure was calculated separately as function of speaking style and speaker. Between-speaker variability (different speakers, same style) in distribution similarity varied significantly between styles — spontaneous interview shows greater variability than read sentences and word list in five languages (English, French, Italian, Portuguese and Swedish); in Estonian and German, read sentences yield more variability. Within-speaker variability (same speaker, different styles) levels are lower than between-speaker in the style that exhibit the greatest variability. The results point to the potential use of the proposed methodology as a way to identify possible idiosyncratic traits in f,,0,, distributions. Also, they further demonstrate the effect of speaking styles on intonation patterns.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Niamh E. Kelly|AUTHOR Niamh E. Kelly]], [[Lara Keshishian|AUTHOR Lara Keshishian]]
</p><p class="cpabstractcardaffiliationlist">AUB, Lebanon</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1721–1725&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Research on Western Armenian has described it as having a contrast between voiceless aspirated stops and affricates, and voiced stops and affricates [1, 2]. The variety of Western Armenian spoken by a large population in Lebanon has not yet been examined phonetically, to determine the acoustic correlates of this contrast. The current study examines the alveolar and postalveolar affricates and alveolar stops (voiceless aspirated and voiced) in both word-initial and word-medial position, using nonsense words written in the Armenian script. The results indicate that voiced sounds have prevoicing, voiceless affricates have some aspiration, but voiceless stops have very short VOT, which aligns better with an analysis of them being classified as unaspirated. It was also found that position in the word does not affect VOT, duration of the closure or frication.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adèle Jatteau|AUTHOR Adèle Jatteau]]^^1^^, [[Ioana Vasilescu|AUTHOR Ioana Vasilescu]]^^1^^, [[Lori Lamel|AUTHOR Lori Lamel]]^^1^^, [[Martine Adda-Decker|AUTHOR Martine Adda-Decker]]^^1^^, [[Nicolas Audibert|AUTHOR Nicolas Audibert]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIMSI (UPR 3251), France; ^^2^^LPP (UMR 7018), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1726–1730&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigates the tendency towards word-final devoicing of voiced obstruents in Standard French, and how devoicing is influenced by domain, speech style, manner and place of articulation. Three large corpora with automatic segmentations produced by forced alignment are used: ESTER, ETAPE and NCCFr. A voicing-ratio is established for each obstruent via F0 extraction in Praat, and the percentage of fully voiced segments is computed. We find a salient pattern of devoicing before pause, with no clear effect of speech style. Fricatives devoice more than stops, and posterior fricatives devoice more than anterior ones. Since voicing plays a central role in the cross-linguistic pattern of word-final [voice] neutralisation, this study gives insight into the potential phonetic precursors of this process.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chih-Hsiang Huang|AUTHOR Chih-Hsiang Huang]], [[Huang-Cheng Chou|AUTHOR Huang-Cheng Chou]], [[Yi-Tong Wu|AUTHOR Yi-Tong Wu]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]], [[Yi-Wen Liu|AUTHOR Yi-Wen Liu]]
</p><p class="cpabstractcardaffiliationlist">National Tsing Hua University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1731–1735&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Being able to distinguish the differences between deceptive and truthful statements in a dialogue is an important skill in daily life. Extensive studies on the acoustic features of deceptive English speech have been reported, but such research in Mandarin is relatively scarce. We constructed a Mandarin deception database of daily dialogues from native speakers in Taiwan. College students were recruited to participate in a game in which they were encouraged to lie and convince their opponents of experiences that they did not have. After data collection, acoustic-prosodic features were extracted. The statistics of these features were calculated so that the differences between truthful and deceptive sentences, both as they were intended and perceived, can be compared. Results indicate that different people tend to use different acoustic features when telling a lie; the participants could be put into 10 categories in a dendrogram, with an exception of 31 people from whom no acoustic indicators for deception were found. Without considering interpersonal differences, our best classifier reached an F1 score of 53.37% in distinguishing deceptive and truthful segmentation units. We hope to present this new database as a corpus for future studies on deception in Mandarin conversations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Barbara Schuppler|AUTHOR Barbara Schuppler]]^^1^^, [[Margaret Zellers|AUTHOR Margaret Zellers]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Graz, Austria; ^^2^^Christian-Albrechts-Universität zu Kiel, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1736–1740&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigates the acoustic cues used to mark prosodic boundaries in two varieties of German, with a specific focus on variations in production of fortis and lenis plosives. We extracted prosodic-boundary-adjacent and non-boundary-adjacent plosives from GRASS (Austrian German) and the Kiel Corpus of Read Speech (Northern German), and investigated closure duration, burst features, and duration characteristics of the surrounding segments. We find that closure and burst duration features, as well as duration of a preceding adjacent segment, vary consistently in relationship to the presence or absence of a prosodic boundary, but that the relative weights of these features differ in the two varieties studied.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cibu Johny|AUTHOR Cibu Johny]], [[Alexander Gutkin|AUTHOR Alexander Gutkin]], [[Martin Jansche|AUTHOR Martin Jansche]]
</p><p class="cpabstractcardaffiliationlist">Google, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1741–1745&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The concept of a phoneme arose historically as a theoretical abstraction that applies language-internally. Using phonemes and phonological features in cross-linguistic settings raises an important question of conceptual validity: Are contrasts that are meaningful within a language also empirically robust across languages? This paper develops a method for assessing the crosslinguistic consistency of phonological features in phoneme inventories. The method involves training separate binary neural classifiers for several phonological contrast in audio spans centered on particular segments within continuous speech. To assess cross-linguistic consistency, these classifiers are evaluated on held-out languages and classification quality is reported. We apply this method to several common phonological contrasts, including vowel height, vowel frontness, and retroflex consonants, in the context of multi-speaker corpora for ten languages from three language families (Indo-Aryan, Dravidian, and Malayo-Polynesian). We empirically evaluate and discuss the consistency of phonological contrasts derived from features found in phonological ontologies such as  PanPhon and PHOIBLE.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fanny Guitard-Ivent|AUTHOR Fanny Guitard-Ivent]]^^1^^, [[Gabriele Chignoli|AUTHOR Gabriele Chignoli]]^^1^^, [[Cécile Fougeron|AUTHOR Cécile Fougeron]]^^1^^, [[Laurianne Georgeton|AUTHOR Laurianne Georgeton]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LPP (UMR 7018), France; ^^2^^SCPTS, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1746–1750&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Past results have suggested that initial strengthening (IS) effects target the contrastive phonetic properties of segments, with a maximization of acoustic contrasts in initial position of strong prosodic domains. Here, we investigate whether IS effects translate into a better acoustic discriminability within the French oral vowels system. Discriminability is assessed on the basis of classification results of two types of classifiers: a linear discriminant analysis (LDA) based on the four formants frequencies, and a deep convolutional neural network (CNN) based on spectrograms. The test set includes 720 exemplars of /i, y, e, ε, a, x, u, o, ɔ/ (with /x/=/ø, œ/) produced in a labial context, either in intonational phrase initial (IPi) or word initial (Wi) position. Classifiers were trained using a set of 4500 vowels extracted from a large read speech corpus. Results show a better discriminability of vowels (overall better classification rate) in IPi than in Wi with the two methods. Less confusion in IPi is found between rounded and unrounded, and between back and front vowels, but not between the vowels along the four-way height contrast. Less confusion between peripheral and central vowels also expresses a maximization of contrasts within the acoustic space in IPi position.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ya’nan Guo|AUTHOR Ya’nan Guo]]^^1^^, [[Ziping Zhao|AUTHOR Ziping Zhao]]^^2^^, [[Yide Ma|AUTHOR Yide Ma]]^^3^^, [[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Augsburg, Germany; ^^2^^Tianjin Normal University, China; ^^3^^Lanzhou University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1781–1785&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech augmentation is a common and effective strategy to avoid overfitting and improve on the robustness of an emotion recognition model. In this paper, we investigate for the first time the intrinsic attributes in a speech signal using the multi-resolution analysis theory and the Hilbert-Huang Spectrum, with the goal of developing a robust speech augmentation approach from raw speech data. Specifically, speech decomposition in a double tree complex wavelet transform domain is realized, to obtain sub-speech signals; then, the Hilbert Spectrum using Hilbert-Huang Transform is calculated for each sub-band to capture the noise content in unseen environments with the voice restriction to 100–4000 Hz; finally, the speech-specific noise that varies with the speaker individual, scenarios, environment, and voice recording equipment, can be reconstructed from the top two high-frequency sub-bands to enhance the raw signal. Our proposed speech augmentation is demonstrated using five robust machine learning architectures based on the RAVDESS database, achieving up to 9.3% higher accuracy compared to the performance on raw data for an emotion recognition task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Muhammed Shifas P.V.|AUTHOR Muhammed Shifas P.V.]], [[Nagaraj Adiga|AUTHOR Nagaraj Adiga]], [[Vassilis Tsiaras|AUTHOR Vassilis Tsiaras]], [[Yannis Stylianou|AUTHOR Yannis Stylianou]]
</p><p class="cpabstractcardaffiliationlist">University of Crete, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1826–1830&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we suggest a new parallel, non-causal and shallow waveform domain architecture for speech enhancement based on FFTNet, a neural network for generating high quality audio waveform. In contrast to other waveform based approaches like WaveNet, FFTNet uses an initial wide dilation pattern. Such an architecture better represents the long term correlated structure of speech in the time domain, where noise is usually highly non-correlated, and therefore it is suitable for waveform domain based speech enhancement. To further strengthen this feature of FFTNet, we suggest a non-causal FFTNet architecture, where the present sample in each layer is estimated from the past and future samples of the previous layer. By suggesting a shallow network and applying non-causality within certain limits, the suggested FFTNet for speech enhancement (SE-FFTNet) uses much fewer parameters compared to other neural network based approaches for speech enhancement like WaveNet and SEGAN. Specifically, the suggested network has considerably reduced model parameters: 32% fewer compared to WaveNet and 87% fewer compared to SEGAN. Finally, based on subjective and objective metrics, SE-FFTNet outperforms WaveNet in terms of enhanced signal quality, while it provides equally good performance as SEGAN.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[D.T. Braithwaite|AUTHOR D.T. Braithwaite]], [[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]
</p><p class="cpabstractcardaffiliationlist">Victoria University of Wellington, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1831–1835&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent machine learning based approaches to speech enhancement operate in the time domain and have been shown to outperform the classical enhancement methods. Two such models are SE-GAN and SE-WaveNet, both of which rely on complex neural network architectures, making them expensive to train. We propose using the Variance Constrained Autoencoder (VCAE) for speech enhancement. Our model uses a more straightforward neural network structure than competing solutions and is a natural model for the task of speech enhancement. We demonstrate experimentally that the proposed enhancement model outperforms SE-GAN and SE-WaveNet in terms of perceptual quality of enhanced signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiang Hao|AUTHOR Xiang Hao]], [[Xiangdong Su|AUTHOR Xiangdong Su]], [[Zhiyu Wang|AUTHOR Zhiyu Wang]], [[Hui Zhang|AUTHOR Hui Zhang]], [[Batushiren|AUTHOR Batushiren]]
</p><p class="cpabstractcardaffiliationlist">Inner Mongolia University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1786–1790&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement at extremely low signal-to-noise ratio (SNR) condition is a very challenging problem and rarely investigated in previous works. This paper proposes a robust speech enhancement approach (UNetGAN) based on U-Net and generative adversarial learning to deal with this problem. This approach consists of a generator network and a discriminator network, which operate directly in the time domain. The generator network adopts a U-Net like structure and employs dilated convolution in the bottleneck of it. We evaluate the performance of the UNetGAN at low SNR conditions (up to -20dB) on the public benchmark. The result demonstrates that it significantly improves the speech quality and substantially outperforms the representative deep learning models, including SEGAN, cGAN fo SE, Bidirectional LSTM using phase-sensitive spectrum approximation cost function (PSA-BLSTM) and Wave-U-Net regarding Short-Time Objective Intelligibility (STOI) and Perceptual evaluation of speech quality (PESQ).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Santiago Pascual|AUTHOR Santiago Pascual]]^^1^^, [[Joan Serrà|AUTHOR Joan Serrà]]^^2^^, [[Antonio Bonafonte|AUTHOR Antonio Bonafonte]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universitat Politècnica de Catalunya, Spain; ^^2^^Telefónica I+D, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1791–1795&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The speech enhancement task usually consists of removing additive noise or reverberation that partially mask spoken utterances, affecting their intelligibility. However, little attention is drawn to other, perhaps more aggressive signal distortions like clipping, chunk elimination, or frequency-band removal. Such distortions can have a large impact not only on intelligibility, but also on naturalness or even speaker identity, and require of careful signal reconstruction. In this work, we give full consideration to this generalized speech enhancement task, and show it can be tackled with a time-domain generative adversarial network (GAN). In particular, we extend a previous GAN-based speech enhancement system to deal with mixtures of four types of aggressive distortions. Firstly, we propose the addition of an adversarial acoustic regression loss that promotes a richer feature extraction at the discriminator. Secondly, we also make use of a two-step adversarial training schedule, acting as a warm up-and-fine-tune sequence. Both objective and subjective evaluations show that these two additions bring improved speech reconstructions that better match the original speaker identity and naturalness.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaoqi Li|AUTHOR Xiaoqi Li]], [[Yaxing Li|AUTHOR Yaxing Li]], [[Meng Li|AUTHOR Meng Li]], [[Shan Xu|AUTHOR Shan Xu]], [[Yuanjie Dong|AUTHOR Yuanjie Dong]], [[Xinrong Sun|AUTHOR Xinrong Sun]], [[Shengwu Xiong|AUTHOR Shengwu Xiong]]
</p><p class="cpabstractcardaffiliationlist">Wuhan University of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1796–1800&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Convolution neural networks (CNNs) are achieving increasing attention for the speech enhancement task recently. However, the convolutional operations only process a local neighborhood (several nearest neighboring neurons) at a time across either space or time direction. The long-range dependencies can only be captured when the convolutional operations are applied recursively, but the problems of computationally inefficient and optimization difficulties are introduced. Inspired by the recent impressive performance of the non-local module in many computer vision tasks, we propose a convolutional neural network with non-local module for speech enhancement in this paper. The non-local operations are capable of capturing the global information in the frequency domain through passing information between distant time-frequency units. The non-local operations are able to set the dimension of the input as an arbitrary value, which results in the easy integration with our proposed network framework. Experimental results demonstrate that the proposed method not only improves the computational efficiency significantly but also outperforms the competing methods in terms of objective speech intelligibility and quality metrics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-Chen Lin|AUTHOR Yu-Chen Lin]]^^1^^, [[Yi-Te Hsu|AUTHOR Yi-Te Hsu]]^^2^^, [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^2^^, [[Tei-Wei Kuo|AUTHOR Tei-Wei Kuo]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Taiwan University, Taiwan; ^^2^^Academia Sinica, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1801–1805&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Numerous compression and acceleration techniques achieved state-of-the-art results for classification tasks in speech processing. However, the same techniques produce unsatisfactory performance for regression tasks, because of the different natures of classification and regression tasks. This paper presents a novel integer-adder deep neural network (IA-Net), which compresses model size and accelerates the inference process in speech enhancement, an important task in speech-signal processing, by replacing the floating-point multiplier with an integer-adder. The experimental results show that the inference time of IA-Net can be significantly reduced by 20% and the model size can be compressed by 71.9% without any performance degradation. To the best of our knowledge, this is the first study that decreases the inference time and compresses the model size, simultaneously, while producing good performance for speech enhancement. Based on the promising results, we believe that the proposed framework can be deployed in various mobile and edge-computing devices.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Li Chai|AUTHOR Li Chai]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1806–1810&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a Kullback-Leibler divergence (KLD) regularized approach to adapting speaker-independent (SI) speech enhancement model based on regression deep neural networks (DNNs) to another speaker-dependent (SD) model using a tiny amount of speaker-specific adaptation data. This algorithm adapts the DNN model conservatively by forcing the conditional target distribution estimated from the SD model to be close to that from the SI model. The constraint is realized by adding KLD regularization to our previously proposed maximum likelihood objective function. Experimental results demonstrate that, even with only 10 seconds of SD adaptation data, the proposed framework consistently achieves speech intelligibility improvements under all 15 unseen noise types evaluated and at all signal-to-noise ratio levels for all 8 test speakers from the WSJ0 evaluation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jorge Llombart|AUTHOR Jorge Llombart]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Luis Vicente|AUTHOR Luis Vicente]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1811–1815&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a speech enhancement method which exploits the high potential of residual connections in a Wide Residual Network architecture. This is supported on single dimensional convolutions computed alongside the time domain, which is a powerful approach to process contextually correlated representations through the temporal domain, such as speech feature sequences. We find the residual mechanism extremely useful for the enhancement task since the signal always has a linear shortcut and the non-linear path enhances it in several steps by adding or subtracting corrections. The enhancement capability of the proposal is assessed by objective quality metrics evaluated with simulated and real samples of reverberated speech signals. Results show that the proposal outperforms the state-of-the-art method called WPE, which is known to effectively reduce reverberation and greatly enhance the signal. The proposed model, trained with artificial synthesized reverberation data, was able to generalize to real room impulse responses for a variety of conditions (e.g. different room sizes,  RT,,60,,, near & far field). Furthermore, it achieves accuracy for real speech with reverberation from two different datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chandan K.A. Reddy|AUTHOR Chandan K.A. Reddy]], [[Ebrahim Beyrami|AUTHOR Ebrahim Beyrami]], [[Jamie Pool|AUTHOR Jamie Pool]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Sriram Srinivasan|AUTHOR Sriram Srinivasan]], [[Johannes Gehrke|AUTHOR Johannes Gehrke]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1816–1820&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Background noise is a major source of quality impairments in Voice over Internet Protocol (VoIP) and Public Switched Telephone Network (PSTN) calls. Recent work shows the efficacy of deep learning for noise suppression, but the datasets have been relatively small compared to those used in other domains (e.g., ImageNet) and the associated evaluations have been more focused. In order to better facilitate deep learning research in Speech Enhancement, we present a noisy speech dataset (MS-SNSD) that can scale to arbitrary sizes depending on the number of speakers, noise types, and Speech to Noise Ratio (SNR) levels desired. We show that increasing dataset sizes increases noise suppression performance as expected. In addition, we provide an open-source evaluation methodology to evaluate the results subjectively at scale using crowdsourcing, with a reference algorithm to normalize the results. To demonstrate the dataset and evaluation framework we apply it to several noise suppressors and compare the subjective Mean Opinion Score (MOS) with objective quality measures such as SNR, PESQ, POLQA, and VISQOL and show why MOS is still required. Our subjective MOS evaluation is the first large scale evaluation of Speech Enhancement algorithms that we are aware of.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nagaraj Adiga|AUTHOR Nagaraj Adiga]]^^1^^, [[Yannis Pantazis|AUTHOR Yannis Pantazis]]^^2^^, [[Vassilis Tsiaras|AUTHOR Vassilis Tsiaras]]^^1^^, [[Yannis Stylianou|AUTHOR Yannis Stylianou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Crete, Greece; ^^2^^FORTH, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1821–1825&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The quality of speech synthesis systems can be significantly deteriorated by the presence of background noise in the recordings. Despite the existence of speech enhancement techniques for effectively suppressing additive noise under low signal-to-noise (SNR) conditions, these techniques have been neither designed nor tested in speech synthesis tasks where background noise has relatively lower energy. In this paper, we propose a speech enhancement technique based on generative adversarial networks (GANs) which acts as a preprocessing step of speech synthesis. Motivated by the speech enhancement generative adversarial network (SEGAN) approach and recent advances in deep learning, we propose to use Wasserstein GAN (WGAN) with gradient penalty and gated activation functions to the autoencoder network of SEGAN. We studied the impact of the proposed method on a data set consisting of 28 speakers and different noise types with 3 different SNR level. The effectiveness of the proposed method in the context of speech synthesis is demonstrated through the training of WaveNet vocoder.  We compare our method against SEGAN. Both subjective and objective metrics confirm that the proposed speech enhancement approach outperforms SEGAN in terms of speech synthesis quality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Konstantinos Kyriakopoulos|AUTHOR Konstantinos Kyriakopoulos]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1836–1840&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A speaker’s rhythm contributes to the intelligibility of their speech and can be characteristic of their language and accent. For non-native learners of a language, the extent to which they match its natural rhythm is an important predictor of their proficiency. As a learner improves, their rhythm is expected to become less similar to their L1 and more to the L2. Metrics based on the variability of the durations of vocalic and consonantal intervals have been shown to be effective at detecting language and accent. In this paper, pairwise variability (PVI, CCI) and variance (varcoV, varcoC) metrics are first used to predict proficiency and L1 of non-native speakers taking an English spoken exam. A deep learning alternative to generalise these features is then presented, in the form of a tunable duration embedding, based on attention over an RNN over durations. The RNN allows relationships beyond pairwise to be captured, while attention allows sensitivity to the different relative importance of durations. The system is trained end-to-end for proficiency and L1 prediction and compared to the baseline. The values of both sets of features for different proficiency levels are then visualised and compared to native speech in the L1 and the L2.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seung Hee Yang|AUTHOR Seung Hee Yang]], [[Minhwa Chung|AUTHOR Minhwa Chung]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1881–1885&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/1478" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Self-imitating feedback is an effective and learner-friendly method for non-native learners in Computer-Assisted Pronunciation Training. Acoustic characteristics in native utterances are extracted and transplanted onto learner’s own speech input, and given back to the learner as a corrective feedback. Previous works focused on speech conversion using prosodic transplantation techniques based on PSOLA algorithm. Motivated by the visual differences found in spectrograms of native and non-native speeches, we investigated applying GAN to generate self-imitating feedback by utilizing generator’ s ability through adversarial training. Because this mapping is highly under-constrained, we also adopt cycle consistency loss to encourage the output to preserve the global structure, which is shared by native and non-native utterances. Trained on 97,200 spectrogram images of short utterances produced by native and non-native speakers of Korean, the generator is able to successfully transform the non-native spectrogram input to a spectrogram with properties of self-imitating feedback. Furthermore, the transformed spectrogram shows segmental corrections that cannot be obtained by prosodic transplantation. Perceptual test comparing the self-imitating and correcting abilities of our method with the baseline PSOLA method shows that the generative approach with cycle consistency loss is promising.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Danny Merkx|AUTHOR Danny Merkx]], [[Stefan L. Frank|AUTHOR Stefan L. Frank]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1841–1845&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Humans learn language by interaction with their environment and listening to other humans. It should also be possible for computational models to learn language directly from speech but so far most approaches require text. We improve on existing neural network approaches to create visually grounded embeddings for spoken utterances. Using a combination of a multi-layer GRU, importance sampling, cyclic learning rates, ensembling and vectorial self-attention our results show a remarkable increase in image-caption retrieval performance over previous work. Furthermore, we investigate which layers in the model learn to recognise words in the input. We find that deeper network layers are better at encoding word presence, although the final layer has slightly lower performance. This shows that our visually grounded sentence encoder learns to recognise words from the input even though it is not explicitly trained for word recognition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lucy Skidmore|AUTHOR Lucy Skidmore]], [[Roger K. Moore|AUTHOR Roger K. Moore]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1846–1850&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite increasing awareness of Alexa’s potential as an educational tool, there remains a limited scope for Alexa skills to accommodate the features required for effective language learning. This paper describes an investigation into implementing ‘spaced-repetition’, a non-trivial feature of flashcard-based learning, through the development of an Alexa skill called ‘Japanese Flashcards’. Here we show that existing Alexa development features such as skill persistence allow for the effective implementation of spaced-repetition and suggest a heuristic adaptation of the spaced-repetition model that is appropriate for use with voice assistants (VAs). We also highlight areas of the Alexa development process that limit the facilitation of language learning, namely the lack of multilingual speech recognition, and offer solutions to these current limitations. Overall, the investigation shows that Alexa can successfully facilitate simple L2-L1 flashcard-based language learning and highlights the potential for Alexa to be used as a sophisticated and effective language learning tool.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[John H.L. Hansen|AUTHOR John H.L. Hansen]], [[Aditya Joglekar|AUTHOR Aditya Joglekar]], [[Meena Chandra Shekhar|AUTHOR Meena Chandra Shekhar]], [[Vinay Kothapally|AUTHOR Vinay Kothapally]], [[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Lakshmish Kaushik|AUTHOR Lakshmish Kaushik]], [[Abhijeet Sangwan|AUTHOR Abhijeet Sangwan]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1851–1855&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The 2019 FEARLESS STEPS (FS-1) Challenge is an initial step to motivate a streamlined and collaborative effort from the speech and language community towards addressing massive naturalistic audio, the first of its kind. The Fearless Steps Corpus is a collection of 19,000 hours of multi-channel recordings of spontaneous speech from over 450 speakers under multiple noise conditions. A majority of the Apollo Missions original analog data is unlabeled and has thus far motivated the development of both unsupervised and semi-supervised strategies. This edition of the challenge encourages the development of core speech and language technology systems for data with limited ground-truth / low resource availability and is intended to serve as the “First Step” towards extracting high-level information from such massive unlabeled corpora. In conjunction with the Challenge, 11,000 hours of synchronized 30-channel Apollo-11 audio data has also been released to the public by CRSS-UTDallas. We describe in this paper the Fearless Steps Corpus, Challenge Tasks, their associated baseline systems, and results. In conclusion, we also provide insights gained by the CRSS-UTDallas team during the inaugural Fearless Steps Challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kuan-Yu Chen|AUTHOR Kuan-Yu Chen]], [[Che-Ping Tsai|AUTHOR Che-Ping Tsai]], [[Da-Rong Liu|AUTHOR Da-Rong Liu]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]], [[Lin-shan Lee|AUTHOR Lin-shan Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1856–1860&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Producing a large annotated speech corpus for training ASR systems remains difficult for more than 95% of languages all over the world which are low-resourced, but collecting a relatively big unlabeled data set for such languages is more achievable. This is why some initial effort have been reported on completely unsupervised speech recognition learned from unlabeled data only, although with relatively high error rates. In this paper, we develop a Generative Adversarial Network (GAN) to achieve this purpose, in which a Generator and a Discriminator learn from each other iteratively to improve the performance. We further use a set of Hidden Markov Models (HMMs) iteratively refined from the machine generated labels to work in harmony with the GAN. The initial experiments on TIMIT data set achieve an phone error rate of 33.1%, which is 8.5% lower than the previous state-of-the-art.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tasavat Trisitichoke|AUTHOR Tasavat Trisitichoke]], [[Shintaro Ando|AUTHOR Shintaro Ando]], [[Daisuke Saito|AUTHOR Daisuke Saito]], [[Nobuaki Minematsu|AUTHOR Nobuaki Minematsu]]
</p><p class="cpabstractcardaffiliationlist">University of Tokyo, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1861–1865&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, researchers’ attention has been paid to pronunciation assessment not based on comparison between L2 speech and native models, but based on comprehensibility of L2 speech [1, 2, 3]. In our previous studies [4, 5, 6], native listeners’ shadowing of L2 speech was examined and it was shown that delay of shadowing and accuracy of articulation in shadowing utterances, both of which were acoustically calculated, are strongly influenced by the amount of cognitive load imposed for understanding L2 speech, especially when it is with strong accents. In this paper, aside from acoustic analysis of shadowings, we focus on shadowers’ facial microexpressions and examine how they are correlated with perceived comprehensibility. To extract facial expression features, two methods are tested. One is a computer-vision-based method and recorded videos of shadowers’ facial expressions are analyzed. The other is a method using a physiological sensor that can detect subtle movements of facial muscles. In experiments, four shadowers’ facial expressions are analyzed, each of whom shadowed approximately 800 L2 utterances. Results show that some of shadowers’ facial expressions are highly correlated with perceived comprehensibility, and that those facial expressions are strongly shadower-dependent. These results indicate a high potential of shadowers’ facial expressions for comprehensibility prediction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Reima Karhila|AUTHOR Reima Karhila]]^^1^^, [[Anna-Riikka Smolander|AUTHOR Anna-Riikka Smolander]]^^2^^, [[Sari Ylinen|AUTHOR Sari Ylinen]]^^2^^, [[Mikko Kurimo|AUTHOR Mikko Kurimo]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^University of Helsinki, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1866–1870&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For researching effects of gamification in foreign language learning for children in the “Say It Again, Kid!” project we developed a feedback paradigm that can drive gameplay in pronunciation learning games. We describe our scoring system based on the difference between a reference phone sequence and the output of a multilingual CTC phoneme recogniser. We present a white-box scoring model of mapped weighted Levenshtein edit distance between reference and error with error weights for articulatory differences computed from a training set of scored utterances. The system can produce a human-readable list of each detected mispronunciation’s contribution to the utterance score. We compare our scoring method to established black box methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Su-Youn Yoon|AUTHOR Su-Youn Yoon]], [[Chong Min Lee|AUTHOR Chong Min Lee]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Keelan Evanini|AUTHOR Keelan Evanini]]
</p><p class="cpabstractcardaffiliationlist">Educational Testing Service, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1871–1875&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we developed an automated scoring model for an oral proficiency test eliciting spontaneous speech from non-native speakers of English. In a large-scale oral proficiency test, a small number of responses may have atypical characteristics that make it difficult even for state-of-the-art automated scoring models to assign fair scores. The oral proficiency test in this study consisted of questions asking about content in materials provided to the test takers, and the atypical responses frequently had serious content abnormalities. In order to develop an automated scoring system that is robust to these atypical responses, we first developed a set of content features to capture content abnormalities. Next, we trained scoring models using the augmented training dataset, including synthetic atypical responses. Compared to the baseline scoring model, the new model showed comparable performance in scoring normal responses, while it assigned fairer scores for authentic atypical responses extracted from operational test administrations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Y. Lu|AUTHOR Y. Lu]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[P. Manakul|AUTHOR P. Manakul]], [[L. Wang|AUTHOR L. Wang]], [[Y. Wang|AUTHOR Y. Wang]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1876–1880&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Computer assisted language learning (CALL) systems aid learners to monitor their progress by providing scoring and feedback on language assessment tasks. Free speaking tests allow assessment of what a learner has said, as well as how they said it. For these tasks, Automatic Speech Recognition (ASR) is required to generate transcriptions of a candidate’s responses, the quality of these transcriptions is crucial to provide reliable feedback in downstream processes. This paper considers the impact of ASR performance on Grammatical Error Detection (GED) for free speaking tasks, as an example of providing feedback on a learner’s use of English. The performance of an advanced deep-learning based GED system, initially trained on written corpora, is used to evaluate the influence of ASR errors. One consequence of these errors is that grammatical errors can result from incorrect transcriptions as well as learner errors, this may yield confusing feedback. To mitigate the effect of these errors, and reduce erroneous feedback, ASR confidence scores are incorporated into the GED system. By additionally adapting the written text GED system to the speech domain, using ASR transcriptions, significant gains in performance can be achieved. Analysis of the GED performance for different grammatical error types and across grade is also presented.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chiori Hori|AUTHOR Chiori Hori]], [[Anoop Cherian|AUTHOR Anoop Cherian]], [[Tim K. Marks|AUTHOR Tim K. Marks]], [[Takaaki Hori|AUTHOR Takaaki Hori]]
</p><p class="cpabstractcardaffiliationlist">MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1886–1890&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multimodal fusion of audio, vision, and text has demonstrated significant benefits in advancing the performance of several tasks, including machine translation, video captioning, and video summarization. Audio-Visual Scene-aware Dialog (AVSD) is a new and more challenging task, proposed recently, that focuses on generating sentence responses to questions that are asked in a dialog about video content. While prior approaches designed to tackle this task have shown the need for multimodal fusion to improve response quality, the best-performing systems often rely heavily on human-generated summaries of the video content, which are unavailable when such systems are deployed in real-world. This paper investigates how to compensate for such information, which is missing in the inference phase but available during the training phase. To this end, we propose a novel AVSD system using student-teacher learning, in which a student network is (jointly) trained to mimic the teacher’s responses. Our experiments demonstrate that in addition to yielding state-of-the-art accuracy against the baseline DSTC7-AVSD system, the proposed approach (which does not use human-generated summaries at test time) performs competitively with methods that do use those summaries.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nichola Lubold|AUTHOR Nichola Lubold]]^^1^^, [[Stephanie A. Borrie|AUTHOR Stephanie A. Borrie]]^^2^^, [[Tyson S. Barrett|AUTHOR Tyson S. Barrett]]^^2^^, [[Megan Willi|AUTHOR Megan Willi]]^^3^^, [[Visar Berisha|AUTHOR Visar Berisha]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Arizona State University, USA; ^^2^^Utah State University, USA; ^^3^^California State University Chico, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1931–1935&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The communication phenomenon known as conversational entrainment occurs when dialogue partners align or adapt their behavior to one another while conversing. Associated with rapport, trust, and communicative efficiency, entrainment appears to facilitate conversational success. In this work, we explore how conversational partners entrain or align on articulatory precision or the clarity with which speakers articulate their spoken productions. Articulatory precision also has implications for conversational success as precise articulation can enhance speech understanding and intelligibility. However, in conversational speech, speakers tend to reduce their articulatory precision, preferring low-cost, imprecise speech. Speakers may adapt their articulation and become more precise depending on feedback from their listeners. Given the potential of entrainment, we are interested in how conversational partners adapt or entrain their articulatory precision to one another. We explore this phenomenon in 57 task-based dialogues. Controlling for the influence of speaking rate, we find that speakers entrain on articulatory precision, with significant alignment on articulation of consonants. We discuss the potential applications that speaker alignment on precision might have for modeling conversation and implementing strategies for enhancing communicative success in human-human and human-computer interactions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zheng Lian|AUTHOR Zheng Lian]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jian Huang|AUTHOR Jian Huang]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1936–1940&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Different from the emotion recognition in individual utterances, we propose a multimodal learning framework using relation and dependencies among the utterances for conversational emotion analysis. The attention mechanism is applied to the fusion of the acoustic and lexical features. Then these fusion representations are fed into the self-attention based bi-directional gated recurrent unit (GRU) layer to capture long-term contextual information. To imitate real interaction patterns of different speakers, speaker embeddings are also utilized as additional inputs to distinguish the speaker identities during conversational dialogs. To verify the effectiveness of the proposed method, we conduct experiments on the IEMOCAP database. Experimental results demonstrate that our method shows absolute 2.42% performance improvement over the state-of-the-art strategies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Karthik Gopalakrishnan|AUTHOR Karthik Gopalakrishnan]], [[Behnam Hedayatnia|AUTHOR Behnam Hedayatnia]], [[Qinlang Chen|AUTHOR Qinlang Chen]], [[Anna Gottardi|AUTHOR Anna Gottardi]], [[Sanjeev Kwatra|AUTHOR Sanjeev Kwatra]], [[Anu Venkatesh|AUTHOR Anu Venkatesh]], [[Raefer Gabriel|AUTHOR Raefer Gabriel]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1891–1895&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Building socialbots that can have deep, engaging open-domain conversations with humans is one of the grand challenges of artificial intelligence (AI). To this end, bots need to be able to leverage world knowledge spanning several domains effectively when conversing with humans who have their own world knowledge. Existing knowledge-grounded conversation datasets are primarily stylized with explicit roles for conversation partners. These datasets also do not explore depth or breadth of topical coverage with transitions in conversations. We introduce Topical-Chat, a knowledge-grounded human-human conversation dataset where the underlying knowledge spans 8 broad topics and conversation partners don’t have explicitly defined roles, to help further research in open-domain conversational AI. We also train several state-of-the-art encoder-decoder conversational models on Topical-Chat and perform automated and human evaluation for benchmarking.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Uliyana Kubasova|AUTHOR Uliyana Kubasova]], [[Gabriel Murray|AUTHOR Gabriel Murray]], [[McKenzie Braley|AUTHOR McKenzie Braley]]
</p><p class="cpabstractcardaffiliationlist">University of the Fraser Valley, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1896–1900&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work analyzes the efficacy of verbal and nonverbal features of group conversation for the task of automatic prediction of group task performance. We describe a new publicly available survival task dataset that was collected and annotated to facilitate this prediction task. In these experiments, the new dataset is merged with an existing survival task dataset, allowing us to compare feature sets on a much larger amount of data than has been used in recent related work. This work is also distinct from related research on social signal processing (SSP) in that we compare verbal and nonverbal features, whereas SSP is almost exclusively concerned with nonverbal aspects of social interaction. A key finding is that nonverbal features from the speech signal are extremely effective for this task, even on their own. However, the most effective individual features are verbal features, and we highlight the most important ones.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Victor R. Martinez|AUTHOR Victor R. Martinez]]^^1^^, [[Nikolaos Flemotomos|AUTHOR Nikolaos Flemotomos]]^^1^^, [[Victor Ardulov|AUTHOR Victor Ardulov]]^^1^^, [[Krishna Somandepalli|AUTHOR Krishna Somandepalli]]^^1^^, [[Simon B. Goldberg|AUTHOR Simon B. Goldberg]]^^2^^, [[Zac E. Imel|AUTHOR Zac E. Imel]]^^3^^, [[David C. Atkins|AUTHOR David C. Atkins]]^^4^^, [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern California, USA; ^^2^^UW–Madison, USA; ^^3^^University of Utah, USA; ^^4^^University of Washington, USA; ^^5^^University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1901–1905&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Psychotherapy, from a narrative perspective, is the process in which a client relates an on-going life-story to a therapist. In each session, a client will recount events from their life, some of which stand out as more significant than others. These significant stories can ultimately shape one’s identity. In this work we study these narratives in the context of therapeutic alliance — a self-reported measure on the perception of a shared bond between client and therapist. We propose that alliance can be predicted from the interactions between certain types of clients with types of therapists. To validate this method, we obtained 1235 transcribed sessions with client-reported alliance to train an unsupervised approach to discover groups of therapists and clients based on common types of narrative characters, or  personae. We measure the strength of the relation between personae and alliance in two experiments. Our results show that (1) alliance can be explained by the interactions between the discovered character types, and (2) models trained on therapist and client personae achieve significant performance gains compared to competitive supervised baselines. Finally, exploratory analysis reveals important character traits that lead to an improved perception of alliance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kristin Haake|AUTHOR Kristin Haake]]^^1^^, [[Sarah Schimke|AUTHOR Sarah Schimke]]^^1^^, [[Simon Betz|AUTHOR Simon Betz]]^^2^^, [[Sina Zarrieß|AUTHOR Sina Zarrieß]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Dortmund, Germany; ^^2^^Universität Bielefeld, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1906–1910&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Spoken dialogue systems are predominantly evaluated using offline methods such as user ratings or task-oriented measures. Various phenomena in conversational speech, however, are known to affect the way the listener’s comprehension  unfolds over time, and not necessarily the final result of the comprehension process. For instance, in human reference comprehension, conversational signals like hesitations have been shown to ease processing of expressions referring to difficult-to-describe targets, as can primarily be observed in listeners’ anticipatory eye movements rather than in their final reference resolution decision. In this study, we explore eye tracking for testing conversational dialogue systems, looking at how listeners process automatically generated referring expressions containing defective attributes. We investigate whether hesitations facilitate the processing of partially defective system utterances and track the user’s eye movements when listening to expressions with: (i) semantically defective but fluently synthesized adjectives, (ii) defective and lengthened adjectives, i.e. containing a conversational uncertainty signal. Our results are encouraging: whereas the offline measure of task success does not show any differences between the two conditions, the listeners’ eye movements suggest that processing of partially defective utterances might be facilitated by conversational hesitations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bin Li|AUTHOR Bin Li]]^^1^^, [[Yuan Jia|AUTHOR Yuan Jia]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UCASS, China; ^^2^^CASS, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1911–1915&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a detailed investigation on the influence of contextuality on the prosodic realization of information structure in Chinese dialogues. The materials were selected from the 863 corpus, which contains both isolated sentences and spontaneous dialogues. RefLex was selected as the annotation scheme, which differentiates information structure on the lexical and referential levels. Prosodic data (including duration and pitch range) from 12 groups of spontaneous dialogues were analyzed with the linear mixed effects mode, and each of them consists of 13–22 turns. The isolated sentences corresponding to these dialogues were also analyzed. The analysis results reveal the influence of contextuality. Specifically, the features of prosodic realization of information structure on the lexical and referential levels show a contrary tendency. The statistical analysis indicates that the speakers use duration and pitch ranges as phonetic cues to distinguish information structures on both levels. On the other hand, duration on the referential level is the only phonetic cue affected by contextuality.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kristijan Gjoreski|AUTHOR Kristijan Gjoreski]], [[Aleksandar Gjoreski|AUTHOR Aleksandar Gjoreski]], [[Ivan Kraljevski|AUTHOR Ivan Kraljevski]], [[Diane Hirschfeld|AUTHOR Diane Hirschfeld]]
</p><p class="cpabstractcardaffiliationlist">voice INTER connect, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1916–1920&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a case study of cross-lingual transfer learning applied for affective computing in the domain of spoken dialogue systems. Prosodic features of correction dialog acts are modeled on a group of languages and compared with languages excluded from the analysis.

Speech from different languages was recorded in carefully staged Wizard-of-Oz experiments, however, without the possibility to ensure balanced distribution of speakers per language. In order to assess the possibility of cross-lingual transfer learning and to ensure reliable classification of corrections independently of language, we employed different machine learning approaches along with relevant acoustic-prosodic features sets.

The results of the experiments with mono-lingual corpora (trained and tested on a single language) and cross-lingual (trained on several languages and tested on the rest) were analyzed and compared in the terms of accuracy and F1 score.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingzhi Yu|AUTHOR Mingzhi Yu]]^^1^^, [[Emer Gilmartin|AUTHOR Emer Gilmartin]]^^2^^, [[Diane Litman|AUTHOR Diane Litman]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Pittsburgh, USA; ^^2^^Trinity College Dublin, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1921–1925&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Research on human spoken language has shown that speech plays an important role in identifying speaker personality traits. In this work, we propose an approach for identifying speaker personality traits using overlap dynamics in multiparty spoken dialogues. We first define a set of novel features representing the overlap dynamics of each speaker. We then investigate the impact of speaker personality traits on these features using ANOVA tests. We find that features of overlap dynamics significantly vary for speakers with different levels of both Extraversion and Conscientiousness. Finally, we find that classifiers using only overlap dynamics features outperform random guessing in identifying Extraversion and Agreeableness, and that the improvements are statistically significant.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zakaria Aldeneh|AUTHOR Zakaria Aldeneh]]^^1^^, [[Mimansa Jaiswal|AUTHOR Mimansa Jaiswal]]^^1^^, [[Michael Picheny|AUTHOR Michael Picheny]]^^2^^, [[Melvin G. McInnis|AUTHOR Melvin G. McInnis]]^^1^^, [[Emily Mower Provost|AUTHOR Emily Mower Provost]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Michigan, USA; ^^2^^IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1926–1930&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Bipolar disorder, a severe chronic mental illness characterized by pathological mood swings from depression to mania, requires ongoing symptom severity tracking to both guide and measure treatments that are critical for maintaining long-term health. Mental health professionals assess symptom severity through semi-structured clinical interviews. During these interviews, they observe their patients’ spoken behaviors, including both what the patients say and how they say it. In this work, we move beyond acoustic and lexical information, investigating how higher-level interactive patterns also change during mood episodes. We then perform a secondary analysis, asking if these interactive patterns, measured through dialogue features, can be used in conjunction with acoustic features to automatically recognize mood episodes. Our results show that it is beneficial to consider dialogue features when analyzing and building automated systems for predicting and monitoring mood.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Emma O’Neill|AUTHOR Emma O’Neill]], [[Julie Carson-Berndsen|AUTHOR Julie Carson-Berndsen]]
</p><p class="cpabstractcardaffiliationlist">University College Dublin, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1941–1945&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the extent to which native speaker perceptions regarding the similarity between phonemes of English are influenced by their distributional properties. A similarity hierarchy model based on the distribution of consonantal phonemes in the English language was generated by creating phoneme-embeddings from contextual information. We compare this to similarity models based on phonological feature theory and on native speaker perception. Characteristics of the perception-based model are shown to appear in the distribution-based model whilst not being captured by the feature-based model. This not only provides evidence of similarity perceptions being influenced by distributional properties but is an argument for incorporating distributional information alongside phonological features when modelling perceptual similarity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-Ren Chien|AUTHOR Yu-Ren Chien]], [[Michal Borský|AUTHOR Michal Borský]], [[Jón Guðnason|AUTHOR Jón Guðnason]]
</p><p class="cpabstractcardaffiliationlist">Reykjavik University, Iceland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1986–1989&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The periodicity of a voiced-sound signal can reflect physiological conditions such as identity, age, and voice disorder. One way to look into this periodicity is to measure the temporal variability of vocal fundamental frequency (F0). This paper proposes 2 measures of F0 variability based on glottal closure instant (GCI). GCI is essential to the detection of F0 when the signal waveform varies substantially between adjacent cycles, e.g., in breathy voice. Frequency-selective variability measurements are taken from an interpolated sequence of fundamental-period values based on GCIs, including certain spectral-shape parameters which constitute a multi-variate measure. The utility of these measures was demonstrated in two experiments designed for inter- and intra-speaker comparisons, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lauri Tavi|AUTHOR Lauri Tavi]]^^1^^, [[Tanel Alumäe|AUTHOR Tanel Alumäe]]^^2^^, [[Stefan Werner|AUTHOR Stefan Werner]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Eastern Finland, Finland; ^^2^^TalTech, Estonia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1990–1994&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although creaky voice, or vocal fry, is widely studied phonation mode, open questions still exist in creak’s acoustic characterization and automatic recognition. Many questions are open since creak varies significantly depending on conversational context. In this study, we introduce an exploratory creak recognizer based on convolutional neural network (CNN), which is generated specifically for emergency calls. The study focuses on recognition of creaky voice from authentic emergency calls because creak detection could potentially provide information about the caller’s emotional state or attempt of voice disguise. We generated the CNN recognition system using emergency call recordings and other out-of-domain speech recordings and compared the results with an already existing and widely used creaky voice detection system: using poor quality emergency call recordings as test data, this system achieved F1 of 0.41 whereas our CNN system accomplished an F1 of 0.64. The results show that the CNN system can perform moderately well using a limited amount of training data on challenging testing data and has the potential to achieve higher F scores when more emergency calls are used for model training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sofoklis Kakouros|AUTHOR Sofoklis Kakouros]], [[Antti Suni|AUTHOR Antti Suni]], [[Juraj Šimko|AUTHOR Juraj Šimko]], [[Martti Vainio|AUTHOR Martti Vainio]]
</p><p class="cpabstractcardaffiliationlist">University of Helsinki, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1946–1950&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prominence perception has been known to correlate with a complex interplay of the acoustic features of energy, fundamental frequency, spectral tilt, and duration. The contribution and importance of each of these features in distinguishing between prominent and non-prominent units in speech is not always easy to determine, and more so, the prosodic representations that humans and automatic classifiers learn have been difficult to interpret. This work focuses on examining the acoustic prosodic representations that binary prominence classification neural networks and autoencoders learn for prominence. We investigate the complex features learned at different layers of the network as well as the 10-dimensional bottleneck features (BNFs), for the standard acoustic prosodic correlates of prominence separately and in combination. We analyze and visualize the BNFs obtained from the prominence classification neural networks as well as their network activations. The experiments are conducted on a corpus of Dutch continuous speech with manually annotated prominence labels. Our results show that the prosodic representations obtained from the BNFs and higher-dimensional non-BNFs provide good separation of the two prominence categories, with, however, different partitioning of the BNF space for the distinct features, and the best overall separation obtained for F0.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sharon Peperkamp|AUTHOR Sharon Peperkamp]], [[Alvaro Martin Iturralde Zurita|AUTHOR Alvaro Martin Iturralde Zurita]]
</p><p class="cpabstractcardaffiliationlist">LSCP (UMR 8554), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1951–1955&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Phonological rules change the surface realization of words. Listeners undo these changes in order to retrieve the canonical word form. We investigate this so-called compensation for a French deletion rule, i.e. liquid deletion. This rule optionally deletes the final consonant of a word-final obstruent-liquid cluster. It can apply both before consonants and before vowels, but its application is about twice as frequent before consonants. Using a word detection task, we find an overall relatively low rate of compensation, which we argue is due to the relatively high perceptual salience of the rule. We also observe a clear effect of context, though: listeners compensate more than twice as often for a deleted liquid before a consonant than before a vowel. This is evidence that compensation involves fine-grained knowledge about the probability of the rule’s application in different contexts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniil Kocharov|AUTHOR Daniil Kocharov]], [[Tatiana Kachkovskaia|AUTHOR Tatiana Kachkovskaia]], [[Pavel Skrelin|AUTHOR Pavel Skrelin]]
</p><p class="cpabstractcardaffiliationlist">Saint Petersburg State University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1956–1960&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unstressed vowels in Russian are reduced in both duration and quality, but these two manifestations of vowel reduction do not have to be observed simultaneously. In order to investigate this question, we analysed the reduction pattern of words in such contexts where lengthening is induced by prosodic factors: prominence and pre-boundary lengthening. The study is based on a large corpus of read speech. The following results were obtained: (1) as expected, both contexts increase vowel duration; (2) under prosodic prominence vowels undergo less qualitative reduction, while pre-boundary lengthening has no effect on qualitative reduction; (3) additionally, it was shown that prominence mainly affects the pretonic part of the word, while pre-boundary lengthening — the post-tonic part. Thus, an increase in vowel duration does not always cause a decrease in qualitative reduction, which may serve as evidence against the idea that qualitative reduction is caused by quantitative reduction. Additionally, these results may serve as an argument for the idea that the two processes — vowel reduction and temporal organization of utterance — are autonomous.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christer Gobl|AUTHOR Christer Gobl]], [[Ailbhe Ní Chasaide|AUTHOR Ailbhe Ní Chasaide]]
</p><p class="cpabstractcardaffiliationlist">Trinity College Dublin, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1961–1965&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper explores the mapping of time and frequency domain aspects of the voice source, focussing on the low end of the source spectrum. It refines and extends an earlier study, where the LF model was used to explore the correspondences between the open quotient (O,,q,,), glottal skew (R,,k,,) and harmonic levels of the source spectrum, including the H1-H2 measure, widely assumed to reflect differences in O,,q,,. Here we use a different model (the F-model) as it better reflects the effective open quotient and glottal skew in certain conditions. As in the earlier study, a series of glottal pulses were generated, keeping peak glottal flow constant, while systematically varying O,,q,, and R,,k,,. Results suggest that the effects of R,,k,, on the low harmonics is considerably less than estimated in the earlier study, and its main impact is on the level of H2 (and consequently H1-H2) when O,,q,, is relatively high. The conclusion remains that the H1-H2 is not simply a direct reflection of O,,q,,. However, for O,,q,, values of up to about 0.6, it maps closely to H1-H2: beyond this point, H1-H2 reflects a more complex interaction of open quotient and glottal skew.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eleanor Chodroff|AUTHOR Eleanor Chodroff]]^^1^^, [[Jennifer S. Cole|AUTHOR Jennifer S. Cole]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of York, UK; ^^2^^Northwestern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1966–1970&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Understanding the structure of intonational variation is a longstanding issue in prosodic research. A given utterance can be realized with countless intonational contours, and while variation in prosodic meaning is also large, listeners nevertheless converge on relatively consistent form-function mappings. While this suggests the existence of abstract intonational representations, it has been unclear how exactly these are defined. The present study examines the validity of a well-defined set of phonological representations for the generation of intonation in the nuclear region of an intonational phrase in American English: namely, the combination of binary pitch accents (H*/L*), phrase accents (H-/L-), and boundary tones (H%/L%) proposed in Pierrehumbert (1980). In an exploratory study, we examined whether speakers maintained the eight-way distinction among intonational contours posited to exist in this representational system. We created eight synthesized contours according to Pierrehumbert (1980) and examined whether listeners generalized these contours to novel productions. Speakers largely distinguished rising from non-rising contours in production, but few other distinctions were maintained. While this does not rule out the existence of additional contours in production, these findings do suggest that the representation of rising and non-rising contours may be privileged and more readily accessible in the intonational grammar.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sangwook Park|AUTHOR Sangwook Park]]^^1^^, [[David K. Han|AUTHOR David K. Han]]^^2^^, [[Mounya Elhilali|AUTHOR Mounya Elhilali]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^U.S. Army Research Laboratory, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1971–1975&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For those in the early stage of learning a foreign language, they commonly experience difficulties in understanding spoken words in the second language, while they have no problem in recognizing words spoken in their mother tongue. This paper examines this phenomenon using biomimetic receptive fields that can be interpreted as a transfer function between acoustic stimulus and cortical responses in the brain. While receptive fields of individual subjects are often optimized to recognize unique phonemes in their mother language, it is unclear whether challenges associated with acquiring a new language (especially in adulthood) is due to a mismatch between phonemic characteristics in the new language and optimized processing in the system. We explore this question by contrasting biomimetic systems optimized for four different languages with sufficiently different characteristics. We perform English phoneme classification with these language-optimized systems. We observed distinctive characteristics in receptive fields emerging from each language, and the differences of English phoneme recognition performance accordingly.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pavel Šturm|AUTHOR Pavel Šturm]], [[Jan Volín|AUTHOR Jan Volín]]
</p><p class="cpabstractcardaffiliationlist">Charles University, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1976–1980&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Question-word questions in Czech lexically mark their interrogative function in the initial position: in their standard form, they begin with an interrogative lexeme. For many linguists, this is a sufficient reason for resigning on intonation marking, so they claim that the speech melody in these questions is identical to the melody of statements. A careful observation of the current Czech speech suggests otherwise.

This paper presents a perceptual experiment in which Czech speakers evaluated two contrastive forms of the interrogative melody, specifically the one with a late peak modelled after statements (as suggested by some authors), and the one with an early peak modelled after our empirical data collected previously. Thirty-two listeners expressed a statistically significant preference for the early peak in a perception test. This outcome resonates with the sample of speech production of the questions. However, the late peak is also possible and acceptable: we assume that it might be a signal of contrastive emphasis or an implicational cue.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anneliese Kelterer|AUTHOR Anneliese Kelterer]]^^1^^, [[Barbara Schuppler|AUTHOR Barbara Schuppler]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Graz, Austria; ^^2^^Technische Universität Graz, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1981–1985&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Chichimec is an Oto-Manguean language of Mexico with a phonological contrast between modal, breathy and creaky vowels. This study is the first acoustic investigation of this contrast in Chichimec, based on spectral tilt and Cepstral Peak Prominence (CPP) measures. We consider the change of these measures over the course of the vowel and include a high vowel, which was omitted in most phonation studies of other languages. The present study not only contributes to the description of Chichimec with respect to the different portions of the vowel, but also explores the adequacy of the acoustic measures of phonation type for low and high vowels.

Our results show that phonation changes in the course of the vowel, and that this change is a relevant factor for phonation types in Chichimec. We find that CPP is the best measure to characterize Chichimec phonation contrasts in all vowels. For the vowel /a/, spectral tilt measures are better indicators of phonation type for women than for men. The results for /i/ indicate that spectral tilt distinguishes breathy from modal vowels for men, but that these measures might generally not be appropriate to describe phonation contrasts in women’s high vowels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuzhuang Xu|AUTHOR Shuzhuang Xu]], [[Hiroshi Shimodaira|AUTHOR Hiroshi Shimodaira]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1995–1999&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Pitch tracking, or the continuous extraction of fundamental frequency from speech waveforms, is of vital importance to many applications in speech analysis and synthesis. Many existing trackers, including conventional ones such as Praat, RAPT and YIN, and newly proposed neural-network-based ones such as DNN-CLS, CREPE and RNN-REG, have conducted an extensive investigation into speech pitch tracking. This work developed a different end-to-end regression model based on neural networks, where a voice detector and a newly proposed value estimator work jointly to highlight the trajectory of fundamental frequency. Experiments on the PTDB-TUG corpus showed that the system surpasses canonical neural networks in terms of gross error rate. It further outperformed conventional trackers under clean condition and neural-network classifiers under noisy condition by the NOISEX-92 corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chitralekha Gupta|AUTHOR Chitralekha Gupta]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2040–2044&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic lyrics to polyphonic audio alignment is a challenging task not only because the vocals are corrupted by background music, but also there is a lack of annotated polyphonic corpus for effective acoustic modeling. In this work, we propose (1) using additional speech and music-informed features and (2) adapting the acoustic models trained on a large amount of solo singing vocals towards polyphonic music using a small amount of in-domain data. Incorporating additional information such as voicing and auditory features together with conventional acoustic features aims to bring robustness against the increased spectro-temporal variations in singing vocals. By adapting the acoustic model using a small amount of polyphonic audio data, we reduce the domain mismatch between training and testing data. We perform several alignment experiments and present an in-depth alignment error analysis on acoustic features, and model adaptation techniques. The results demonstrate that the proposed strategy provides a significant error reduction of word boundary alignment over comparable existing systems, especially on more challenging polyphonic data with long-duration musical interludes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anastasios Vafeiadis|AUTHOR Anastasios Vafeiadis]]^^1^^, [[Eleftherios Fanioudakis|AUTHOR Eleftherios Fanioudakis]]^^2^^, [[Ilyas Potamitis|AUTHOR Ilyas Potamitis]]^^2^^, [[Konstantinos Votis|AUTHOR Konstantinos Votis]]^^1^^, [[Dimitrios Giakoumis|AUTHOR Dimitrios Giakoumis]]^^1^^, [[Dimitrios Tzovaras|AUTHOR Dimitrios Tzovaras]]^^1^^, [[Liming Chen|AUTHOR Liming Chen]]^^3^^, [[Raouf Hamzaoui|AUTHOR Raouf Hamzaoui]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CERTH, Greece; ^^2^^TEI Crete, Greece; ^^3^^De Montfort University, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2045–2049&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech Activity Detection (SAD) plays an important role in mobile communications and automatic speech recognition (ASR). Developing efficient SAD systems for real-world applications is a challenging task due to the presence of noise. We propose a new approach to SAD where we treat it as a two-dimensional multilabel image classification problem. To classify the audio segments, we compute their Short-time Fourier Transform spectrograms and classify them with a Convolutional Recurrent Neural Network (CRNN), traditionally used in image recognition. Our CRNN uses a sigmoid activation function, max-pooling in the frequency domain, and a convolutional operation as a moving average filter to remove misclassified spikes. On the development set of Task 1 of the 2019 Fearless Steps Challenge, our system achieved a decision cost function (DCF) of 2.89%, a 66.4% improvement over the baseline. Moreover, it achieved a DCF score of 3.318% on the evaluation dataset of the challenge, ranking first among all submissions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tokihiko Kaburagi|AUTHOR Tokihiko Kaburagi]]
</p><p class="cpabstractcardaffiliationlist">Kyushu University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2050–2054&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We examined the physical interaction between the voice source system in the larynx and the acoustic filter of the vocal tract. The vocal tract of a soprano was first scanned in three dimensions using magnetic resonance imaging (MRI) while she produced four musical notes with different vowels. These images were used to simulate voice production, including the vibratory motion of the vocal folds and the behavior of glottal airflow. Images for the /i/ vowel were used in the simulation, because a good proximity relationship was found between the fundamental frequency and the first impedance peak of the vocal tract. The simulation results revealed that the fundamental frequency (vibration frequency of the vocal folds) was decreased to a large extent by the source-filter interaction especially when their natural frequency was in the proximity of the impedance peak. In a specific case, this frequency lowering had the effect of changing the acoustic load of the vocal tract exerted on the vocal folds so that their vibratory motion was effectively assisted.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tanay Sharma|AUTHOR Tanay Sharma]], [[Rohith Chandrashekar Aralikatti|AUTHOR Rohith Chandrashekar Aralikatti]], [[Dilip Kumar Margam|AUTHOR Dilip Kumar Margam]], [[Abhinav Thanda|AUTHOR Abhinav Thanda]], [[Sharad Roy|AUTHOR Sharad Roy]], [[Pujitha Appan Kandala|AUTHOR Pujitha Appan Kandala]], [[Shankar M. Venkatesan|AUTHOR Shankar M. Venkatesan]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2000–2004&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Visual Voice Activity Detection (V-VAD) involves the detection of speech activity of a speaker using visual features. The V-VAD is useful in detecting the end point of an utterance under noisy acoustic conditions or for maintaining speaker privacy. In this paper, we propose a speaker independent, real-time solution for V-VAD. The focus is on real-time aspect and accuracy as such algorithms will play a key role in detecting end point especially while interacting with speech assistants. We propose two novel methods one using CNN and the other using 2D-DCT features. Unidirectional LSTMs are used in both the methods to make it online and learn temporal dependence. The methods are tested on two publicly available datasets. Additionally the methods are also tested on a locally collected dataset which further validates our hypothesis. Additionally it has been observed through experiments that both the approaches generalize to unseen speakers. It has been shown that our best approach gives substantial improvement over earlier methods done on the same dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luc Ardaillon|AUTHOR Luc Ardaillon]], [[Axel Roebel|AUTHOR Axel Roebel]]
</p><p class="cpabstractcardaffiliationlist">STMS (UMR 9912), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2005–2009&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The estimation of fundamental frequency (F,,0,,) from audio is a necessary step in many speech processing tasks such as speech synthesis, that require to accurately analyze big datasets, or real-time voice transformations, that require low computation times. New approaches using neural networks have been recently proposed for F,,0,, estimation, outperforming previous approaches in terms of accuracy. The work presented here aims at bringing some more improvements over such CNN-based state-of-the-art approaches, especially when targeting speech data. More specifically, we first propose to use the recent PaN speech synthesis engine in order to generate a high-quality speech database with a reliable ground truth F,,0,, annotation. Then, we propose 3 variants of a new fully-convolutional network (FCN) architecture that are shown to perform better than other similar data-driven methods, with a significantly reduced computational load making them more suitable for real-time purposes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mingye Dong|AUTHOR Mingye Dong]]^^1^^, [[Jie Wu|AUTHOR Jie Wu]]^^2^^, [[Jian Luan|AUTHOR Jian Luan]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2010–2014&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Pitch extraction, also known as fundamental frequency estimation, is a long-term task in audio signal processing. Especially, due to the presence of accompaniment, vocal pitch extraction in polyphonic music is more challenging. So far, most of deep learning approaches use log mel spectrogram as input, which neglect the phase information. In addition, shallow networks have been applied on waveform directly, which may not handle contaminated vocal data well. In this paper, a deep convolutional residual network is proposed. It analyzes and extracts effective feature from waveform automatically. Residual learning can reduce model degradation due to the skip connection and residual mapping. In comparison to reported results, the proposed approach shows 5% and 4% improvement on overall accuracy(OA) and raw pitch accuracy(RPA) respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2015–2019&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech activity detection (SAD) is a part of many speech processing applications. The traditional SAD approaches use signal energy as the evidence to identify the speech regions. However, such methods perform poorly under uncontrolled environments. In this work, we propose a novel SAD approach using a multi-level decision with signal knowledge in an adaptive manner. The multi-level evidence considered are modulation spectrum and smoothed Hilbert envelope of linear prediction (LP) residual. Modulation spectrum has compelling parallels to the dynamics of speech production and captures information only for the speech component. Contrarily, Hilbert envelope of LP residual captures excitation source aspect of speech. Under uncontrolled scenario, these evidence are found to be robust towards the signal distortions and thus expected to work well. In view of different levels of interference present in the signal, we propose to use a quality factor to control the speech/non-speech decision in an adaptive manner. We refer this method as multi-level adaptive SAD and evaluate on Fearless Steps corpus that is collected during Apollo-11 Mission in naturalistic environments. We achieve a detection cost function of 7.35% with the proposed multi-level adaptive SAD on the evaluation set of Fearless Steps 2019 challenge corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2020–2024&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Singer identification is to automatically identify the singer in a music recording, such as a polyphonic song. A song has two major acoustic components that are singing vocals and background accompaniment. Although identifying singers is similar to speaker identification, it is challenging due to the interference of background accompaniment on the singer-specific information in singing vocals. We believe that separating the background accompaniment from the singing vocal will help us to overcome the interference. In this work, we extract the singing vocals from polyphonic songs using Wave-U-Net based audio-source separation approach. The extracted singing vocals are then used in i-vector based singer identification system. Further, we explore different state-of-the-art audio-source separation methods to establish the role of considered method in application to singer identification. The proposed singer identification framework achieves an absolute accuracy improvement of 5.66% over the baseline without audio-source separation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hiroko Terasawa|AUTHOR Hiroko Terasawa]]^^1^^, [[Kenta Wakasa|AUTHOR Kenta Wakasa]]^^1^^, [[Hideki Kawahara|AUTHOR Hideki Kawahara]]^^2^^, [[Ken-Ichi Sakakibara|AUTHOR Ken-Ichi Sakakibara]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Tsukuba, Japan; ^^2^^Wakayama University, Japan; ^^3^^Health Sciences University of Hokkaido, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2025–2029&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, the difference in glottal vibration and timbre of singing voice in choral and operatic singing was investigated. Eight professional singers with active careers in operatic and choral performances participated in the experiment and sang excerpts from three operatic songs and two choral songs. Audio and electroglottograph signals were simultaneously recorded. The open quotient (O,,q,,) and singing power ratio (SPR) of the voices were analyzed, and it was found that the O,,q,, of choral singing tends to be higher and the SPR of choral singing tends to be lower than those of operatic singing. This suggests that choral singing is conducted with laxer vocal fold coordination, and it has less ringing timbre than operatic singing. However, the O,,q,, and SPR were not directly correlated: the degree of adjustment of SPR differed across singers, suggesting that the strategy to achieve a desired voice quality is individualistic in nature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruixi Lin|AUTHOR Ruixi Lin]]^^1^^, [[Charles Costello|AUTHOR Charles Costello]]^^2^^, [[Charles Jankowski|AUTHOR Charles Jankowski]]^^3^^, [[Vishwas Mruthyunjaya|AUTHOR Vishwas Mruthyunjaya]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^Plus One Robotics, USA; ^^3^^CloudMinds Technology, USA; ^^4^^AISERA, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2030–2034&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we focus our attention on how to improve Voice Activity Detection (VAD) in noisy conditions. We propose a Convolutional Neural Network (CNN) based model, as well as a Denoising Autoencoder (DAE), and experiment against acoustic features and their delta features in noise levels ranging from SNR 35 dB to 0 dB. The experiments compare and find the best model configuration for robust performance in noisy conditions. We observe that combining more expressive audio features with the use of DAEs improve accuracy, especially as noise increases. At 0 dB, the proposed model trained with the best feature set could achieve a lab test accuracy of 93.2% (averaged across all noise levels) and 88.6% inference accuracy on device. We also compress the neural network and deploy the inference model that is optimized for the app so that the average on-device CPU usage is reduced to 14% from 37%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Taiki Yamamoto|AUTHOR Taiki Yamamoto]]^^1^^, [[Ryota Nishimura|AUTHOR Ryota Nishimura]]^^1^^, [[Masayuki Misaki|AUTHOR Masayuki Misaki]]^^2^^, [[Norihide Kitaoka|AUTHOR Norihide Kitaoka]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tokushima University, Japan; ^^2^^Panasonic, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2035–2039&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The number of consumer devices which can be operated by voice is increasing every year. Magic Word Detection (MWD), the detection of an activation keyword in continuous speech, has become an essential technology for the hands-free operation of such devices. Because MWD systems need to run constantly in order to detect Magic Words at any time, many studies have focused on the development of a small-footprint system. In this paper, we propose a novel, small-footprint MWD method which uses a convolutional Long Short-Term Memory (LSTM) neural network to capture frequency and time domain features over time. As a result, the proposed method outperforms the baseline method while reducing the number of parameters by more than 80%. An experiment on a small-scale device demonstrates that our model is efficient enough to function in real time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuxiang Zou|AUTHOR Yuxiang Zou]], [[Linhao Dong|AUTHOR Linhao Dong]], [[Bo Xu|AUTHOR Bo Xu]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2055–2059&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent character-based end-to-end text-to-speech (TTS) systems have shown promising performance in natural speech generation, especially for English. However, for Chinese TTS, the character-based model is easy to generate speech with wrong pronunciation due to the label sparsity issue. To address this issue, we introduce an additional learning task of character-to-pinyin mapping to boost the pronunciation learning of characters, and leverage a pre-trained dictionary network to correct the pronunciation mistake through joint training. Specifically, our model predicts pinyin labels as an auxiliary task to assist learning better hidden representations of Chinese characters, where pinyin is a standard phonetic representation for Chinese characters. The dictionary network plays a role as a tutor to further help hidden representation learning. Experiments demonstrate that employing the pinyin auxiliary task and an external dictionary network clearly enhances the naturalness and intelligibility of the synthetic speech directly from the Chinese character sequences.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Harry Bleyan|AUTHOR Harry Bleyan]]^^1^^, [[Sandy Ritchie|AUTHOR Sandy Ritchie]]^^2^^, [[Jonas Fromseier Mortensen|AUTHOR Jonas Fromseier Mortensen]]^^2^^, [[Daan van Esch|AUTHOR Daan van Esch]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Google, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2100–2104&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We discuss two methods that let us easily create grapheme-to-phoneme (G2P) conversion systems for languages without any human-curated pronunciation lexicons, as long as we know the phoneme inventory of the target language and as long as we have some pronunciation lexicons for other languages written in the same script. We use these resources to infer what grapheme-to-phoneme correspondences we would expect, and predict pronunciations for words in the target language with minimal or no language-specific human work. Our first approach uses finite-state transducers, while our second approach uses a sequence-to-sequence neural network. Our G2P models reach high degrees of accuracy, and can be used for various applications, e.g. in developing an automatic speech recognition system. Our methods greatly simplify a task that has historically required extensive manual labor.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mengnan Chen|AUTHOR Mengnan Chen]]^^1^^, [[Minchuan Chen|AUTHOR Minchuan Chen]]^^2^^, [[Shuang Liang|AUTHOR Shuang Liang]]^^2^^, [[Jun Ma|AUTHOR Jun Ma]]^^2^^, [[Lei Chen|AUTHOR Lei Chen]]^^1^^, [[Shaojun Wang|AUTHOR Shaojun Wang]]^^2^^, [[Jing Xiao|AUTHOR Jing Xiao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^East China Normal University, China; ^^2^^Ping An Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2105–2109&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural network-based model for text-to-speech (TTS) synthesis has made significant progress in recent years. In this paper, we present a cross-lingual, multi-speaker neural end-to-end TTS framework which can model speaker characteristics and synthesize speech in different languages. We implement the model by introducing a separately trained neural speaker embedding network, which can represent the latent structure of different speakers and language pronunciations. We train the speech synthesis network bilingually and prove the possibility of synthesizing Chinese speaker’s English speech and vice versa. We explore different methods to fit a new speaker using only a few speech samples. The experimental results show that, with only several minutes of audio from a new speaker, the proposed model can synthesize speech bilingually and acquire decent naturalness and similarity for both languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zexin Cai|AUTHOR Zexin Cai]], [[Yaogen Yang|AUTHOR Yaogen Yang]], [[Chuxiong Zhang|AUTHOR Chuxiong Zhang]], [[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2110–2114&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes a conditional neural network architecture for Mandarin Chinese polyphone disambiguation. The system is composed of a bidirectional recurrent neural network component acting as a sentence encoder to accumulate the context correlations, followed by a prediction network that maps the polyphonic character embeddings along with the conditions to corresponding pronunciations. We obtain the word-level condition from a pre-trained word-to-vector lookup table. One goal of polyphone disambiguation is to address the homograph problem existing in the front-end processing of Mandarin Chinese text-to-speech system. Our system achieves an accuracy of 94.69% on a publicly available polyphonic character dataset. To further validate our choices on the conditional feature, we investigate polyphone disambiguation systems with multi-level conditions respectively. The experimental results show that both the sentence-level and the word-level conditional embedding features are able to attain good performance for Mandarin Chinese polyphone disambiguation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hao Sun|AUTHOR Hao Sun]]^^1^^, [[Xu Tan|AUTHOR Xu Tan]]^^2^^, [[Jun-Wei Gan|AUTHOR Jun-Wei Gan]]^^2^^, [[Hongzhi Liu|AUTHOR Hongzhi Liu]]^^1^^, [[Sheng Zhao|AUTHOR Sheng Zhao]]^^2^^, [[Tao Qin|AUTHOR Tao Qin]]^^2^^, [[Tie-Yan Liu|AUTHOR Tie-Yan Liu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Peking University, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2115–2119&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Grapheme-to-phoneme (G2P) conversion is an important task in automatic speech recognition and text-to-speech systems. Recently, G2P conversion is viewed as a sequence to sequence task and modeled by RNN or CNN based encoder-decoder framework. However, previous works do not consider the practical issues when deploying G2P model in the production system, such as how to leverage additional unlabeled data to boost the accuracy, as well as reduce model size for online deployment. In this work, we propose token-level ensemble distillation for G2P conversion, which can (1) boost the accuracy by distilling the knowledge from additional unlabeled data, and (2) reduce the model size but maintain the high accuracy, both of which are very practical and helpful in the online production system. We use token-level knowledge distillation, which results in better accuracy than the sequence-level counterpart. What is more, we adopt the Transformer instead of RNN or CNN based models to further boost the accuracy of G2P conversion. Experiments on the publicly available CMUDict dataset and an internal English dataset demonstrate the effectiveness of our proposed method. Particularly, our method achieves 19.88% WER on CMUDict dataset, outperforming the previous works by more than 4.22% WER, and setting the new state-of-the-art results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liumeng Xue|AUTHOR Liumeng Xue]]^^1^^, [[Wei Song|AUTHOR Wei Song]]^^2^^, [[Guanghui Xu|AUTHOR Guanghui Xu]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^1^^, [[Zhizheng Wu|AUTHOR Zhizheng Wu]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Northwestern Polytechnical University, China; ^^2^^JD.com, China; ^^3^^JD.com, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2060–2064&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When deploying a Chinese neural Text-to-Speech (TTS) system, one of the challenges is to synthesize Chinese utterances with English phrases or words embedded. This paper looks into the problem in the encoder-decoder framework when only monolingual data from a target speaker is available. Specifically, we view the problem from two aspects: speaker consistency within an utterance and naturalness. We start the investigation with an average voice model which is built from multi-speaker monolingual data, i.e., Mandarin and English data. On the basis of that, we look into speaker embedding for speaker consistency within an utterance and phoneme embedding for naturalness and intelligibility, and study the choice of data for model training. We report the findings and discuss the challenges to build a mixed-lingual TTS system with only monolingual data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alex Sokolov|AUTHOR Alex Sokolov]], [[Tracy Rohlin|AUTHOR Tracy Rohlin]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2065–2069&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Grapheme-to-phoneme (G2P) models are a key component in Automatic Speech Recognition (ASR) systems, such as the ASR system in Alexa, as they are used to generate pronunciations for out-of-vocabulary words that do not exist in the pronunciation lexicons (mappings like  “e c h o” →  “E k oU”).

Most G2P systems are monolingual and based on traditional joint-sequence based n-gram models [1, 2]. As an alternative, we present a single end-to-end trained neural G2P model that shares same encoder and decoder across multiple languages. This allows the model to utilize a combination of universal symbol inventories of Latin-like alphabets and cross-linguistically shared feature representations. Such model is especially useful in the scenarios of low resource languages and code switching/ foreign words, where the pronunciations in one language need to be adapted to other locales or accents. We further experiment with word language distribution vector as an additional training target in order to improve system performance by helping the model decouple pronunciations across a variety of languages in the parameter space. We show 7.2% average improvement in phoneme error rate over low resource languages and no degradation over high resource ones compared to monolingual baselines.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jason Taylor|AUTHOR Jason Taylor]], [[Korin Richmond|AUTHOR Korin Richmond]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2070–2074&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Ensuring correct pronunciation for the widest possible variety of text input is vital for deployed text-to-speech (TTS) systems. For languages such as English that do not have trivial spelling, systems have always relied heavily upon a lexicon, both for pronunciation lookup and for training letter-to-sound (LTS) models as a fall-back to handle out-of-vocabulary words (OOVs). In contrast, recently proposed models that are trained “end-to-end” (E2E) aim to avoid linguistic text analysis and any explicit phone representation, instead learning pronunciation implicitly as part of a direct mapping from input characters to speech audio. This might be termed  implicit LTS. In this paper, we explore the nature of this approach by training  explicit LTS models with datasets commonly used to build E2E systems. We compare their performance with LTS models trained on a high quality English lexicon. We find that LTS errors for words with ambiguous or unpredictable pronunciations are mirrored as mispronunciations by an E2E model. Overall, our analysis suggests that limited and unbalanced lexical coverage in E2E training data may pose significant confounding factors that complicate learning accurate pronunciations in a purely E2E system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuan-Jui Chen|AUTHOR Yuan-Jui Chen]], [[Tao Tu|AUTHOR Tao Tu]], [[Cheng-chieh Yeh|AUTHOR Cheng-chieh Yeh]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2075–2079&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end text-to-speech (TTS) has shown great success on large quantities of paired text plus speech data. However, laborious data collection remains difficult for at least 95% of the languages over the world, which hinders the development of TTS in different languages. In this paper, we aim to build TTS systems for such low-resource (target) languages where only very limited paired data are available. We show such TTS can be effectively constructed by transferring knowledge from a high-resource (source) language. Since the model trained on source language cannot be directly applied to target language due to input space mismatch, we propose a method to learn a mapping between source and target linguistic symbols. Benefiting from this learned mapping, pronunciation information can be preserved throughout the transferring procedure. Preliminary experiments show that we only need around 15 minutes of paired data to obtain a relatively good TTS system. Furthermore, analytic studies demonstrated that the automatically discovered mapping correlate well with the phonetic expertise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu Zhang|AUTHOR Yu Zhang]]^^1^^, [[Ron J. Weiss|AUTHOR Ron J. Weiss]]^^1^^, [[Heiga Zen|AUTHOR Heiga Zen]]^^2^^, [[Yonghui Wu|AUTHOR Yonghui Wu]]^^1^^, [[Zhifeng Chen|AUTHOR Zhifeng Chen]]^^1^^, [[R.J. Skerry-Ryan|AUTHOR R.J. Skerry-Ryan]]^^1^^, [[Ye Jia|AUTHOR Ye Jia]]^^1^^, [[Andrew Rosenberg|AUTHOR Andrew Rosenberg]]^^1^^, [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Google, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2080–2084&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a multispeaker, multilingual text-to-speech (TTS) synthesis model based on Tacotron that is able to produce high quality speech in multiple languages. Moreover, the model is able to transfer voices across languages, e.g. synthesize fluent Spanish speech using an English speaker’s voice, without training on any bilingual or parallel examples. Such transfer works across distantly related languages, e.g. English and Mandarin.

Critical to achieving this result are: 1. using a phonemic input representation to encourage sharing of model capacity across languages, and 2. incorporating an adversarial loss term to encourage the model to disentangle its representation of speaker identity (which is perfectly correlated with language in the training data) from the speech content. Further scaling up the model by training on multiple speakers of each language, and incorporating an autoencoding input to help stabilize attention during training, results in a model which can be used to consistently synthesize intelligible speech for training speakers in all languages seen during training, and in native or foreign accents.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Markéta Jůzová|AUTHOR Markéta Jůzová]], [[Daniel Tihelka|AUTHOR Daniel Tihelka]], [[Jakub Vít|AUTHOR Jakub Vít]]
</p><p class="cpabstractcardaffiliationlist">University of West Bohemia, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2085–2089&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce a unified Grapheme-to-phoneme conversion framework based on the composition of deep neural networks. In contrary to the usual approaches building the G2P frameworks from the dictionary, we use whole phrases, which allows us to capture various language properties, e.g. cross-word assimilation, without the need for any special care or topology adjustments. The evaluation is carried out on three different languages — English, Czech and Russian. Each requires dealing with specific properties, stressing the proposed framework in various ways. The very first results show promising performance of the proposed framework, dealing with all the phenomena specific to the tested languages. Thus, we consider the framework to be language-independent for a wide range of languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dongyang Dai|AUTHOR Dongyang Dai]]^^1^^, [[Zhiyong Wu|AUTHOR Zhiyong Wu]]^^1^^, [[Shiyin Kang|AUTHOR Shiyin Kang]]^^2^^, [[Xixin Wu|AUTHOR Xixin Wu]]^^3^^, [[Jia Jia|AUTHOR Jia Jia]]^^1^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^4^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tsinghua University, China; ^^2^^Tencent, China; ^^3^^CUHK, China; ^^4^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2090–2094&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Grapheme-to-phoneme (G2P) conversion serves as an essential component in Chinese Mandarin text-to-speech (TTS) system, where polyphone disambiguation is the core issue. In this paper, we propose an end-to-end framework to predict the pronunciation of polyphonic character, which accepts sentence containing polyphonic character as input in the form of Chinese character sequence without the necessity of any preprocessing. The proposed method consists of a pre-trained bidirectional encoder representations from Transformers (BERT) model and a neural network (NN) based classifier. The pre-trained BERT model extracts semantic features from raw Chinese character sequence and the NN based classifier predicts the polyphonic character’s pronunciation according to BERT output. To explore the impact of contextual information on polyphone disambiguation, three different classifiers are investigated: a fully-connected network based classifier, a long short-term memory (LSTM) network based classifier and a Transformer block based classifier. Experimental results demonstrate the effectiveness of the proposed end-to-end framework for polyphone disambiguation and the semantic features extracted by BERT can greatly enhance the performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sevinj Yolchuyeva|AUTHOR Sevinj Yolchuyeva]], [[Géza Németh|AUTHOR Géza Németh]], [[Bálint Gyires-Tóth|AUTHOR Bálint Gyires-Tóth]]
</p><p class="cpabstractcardaffiliationlist">BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2095–2099&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention mechanism is one of the most successful techniques in deep learning based Natural Language Processing (NLP). The transformer network architecture is completely based on attention mechanisms, and it outperforms sequence-to-sequence models in neural machine translation without recurrent and convolutional layers. Grapheme-to-phoneme (G2P) conversion is a task of converting letters (grapheme sequence) to their pronunciations (phoneme sequence). It plays a significant role in text-to-speech (TTS) and automatic speech recognition (ASR) systems. In this paper, we investigate the application of transformer architecture to G2P conversion and compare its performance with recurrent and convolutional neural network based approaches. Phoneme and word error rates are evaluated on the CMUDict dataset for US English and the NetTalk dataset. The results show that transformer based G2P outperforms the convolutional-based approach in terms of word error rate and our results significantly exceeded previous recurrent approaches (without attention) regarding word and phoneme error rates on both datasets. Furthermore, the size of the proposed model is much smaller than the size of the previous approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinjian Li|AUTHOR Xinjian Li]], [[Siddharth Dalmia|AUTHOR Siddharth Dalmia]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Florian Metze|AUTHOR Florian Metze]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2120–2124&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multilingual acoustic models have been successfully applied to low-resource speech recognition. Most existing works have combined many small corpora together, and pretrained a multilingual model by sampling from each corpus uniformly. The model is eventually fine-tuned on each target corpus. This approach, however, fails to exploit the relatedness and similarity among corpora in the training set. For example, the target corpus might benefit more from a corpus in the same domain or a corpus from a close language. In this work, we propose a simple but useful sampling strategy to take advantage of this relatedness. We first compute the corpus-level embeddings and estimate the similarity between each corpus. Next we start training the multilingual model with uniform-sampling from each corpus at first, then we gradually increase the probability to sample from related corpora based on its similarity with the target corpus. Finally the model would be fine-tuned automatically on the target corpus. Our sampling strategy outperforms the baseline multilingual model on 16 low-resource tasks. Additionally, we demonstrate that our corpus embeddings capture the language and domain information of each corpus.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhiping Zeng|AUTHOR Zhiping Zeng]]^^1^^, [[Yerbolat Khassanov|AUTHOR Yerbolat Khassanov]]^^2^^, [[Van Tung Pham|AUTHOR Van Tung Pham]]^^1^^, [[Haihua Xu|AUTHOR Haihua Xu]]^^1^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Temasek Laboratories @ NTU, Singapore; ^^2^^NTU, Singapore; ^^3^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2165–2169&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching (CS) refers to a linguistic phenomenon where a speaker uses different languages in an utterance or between alternating utterances. In this work, we study end-to-end (E2E) approaches to the Mandarin-English code-switching speech recognition task. We first examine the effectiveness of using data augmentation and byte-pair encoding (BPE) subword units. More importantly, we propose a multitask learning recipe, where a language identification task is explicitly learned in addition to the E2E speech recognition task. Furthermore, we introduce an efficient word vocabulary expansion method for language modeling to alleviate data sparsity issues under the code-switching scenario. Experimental results on the SEAME data, a Mandarin-English code-switching corpus, demonstrate the effectiveness of the proposed methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shiliang Zhang|AUTHOR Shiliang Zhang]]^^1^^, [[Yuan Liu|AUTHOR Yuan Liu]]^^1^^, [[Ming Lei|AUTHOR Ming Lei]]^^1^^, [[Bin Ma|AUTHOR Bin Ma]]^^2^^, [[Lei Xie|AUTHOR Lei Xie]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Alibaba Group, China; ^^2^^Alibaba Group, Singapore; ^^3^^Northwestern Polytechnical University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2170–2174&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multilingual and code-switching speech recognition are two challenging tasks that are studied separately in many previous works. In this work, we jointly study multilingual and code-switching problems, and present a language-universal bilingual system for Mandarin-English speech recognition. Specifically, we propose a novel bilingual acoustic model, which consists of two monolingual system initialized subnets and a shared output layer corresponding to the  Character-Subword acoustic modeling units. The bilingual acoustic model is trained using a large Mandarin-English corpus with CTC and sMBR criteria. We find that this model, which is not given any information about language identity, can achieve comparable performance in monolingual Mandarin and English test sets compared to the well-trained language-specific Mandarin and English ASR systems, respectively. More importantly, the proposed bilingual model can automatically learn the language switching. Experimental results on a Mandarin-English code-switching test set show that it can achieve 11.8% and 17.9% relative error reduction on Mandarin and English parts, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Harish Arsikere|AUTHOR Harish Arsikere]], [[Ashtosh Sapru|AUTHOR Ashtosh Sapru]], [[Sri Garimella|AUTHOR Sri Garimella]]
</p><p class="cpabstractcardaffiliationlist">Amazon, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2125–2129&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a simple phone mapping approach to multi-dialect acoustic modeling. In contrast to the widely used shared hidden layer (SHL) training approach (hidden layers are shared across dialects whereas output layers are kept separate), phone mapping simplifies model training and maintenance by allowing all the network parameters to be shared; it also simplifies online adaptation via HMM-based i-vectors by allowing the same T-matrix to be used for all the dialects. Using the LSTM-HMM framework, we compare phone mapping with transfer learning and SHL training, and we also compare the efficacy of online i-vectors with that of one-hot dialect encoding. Experiments with a 2K hour dataset comprising four English dialects show that (1) phone mapping yields significant WER reductions over dialect-specific training (14%, on average) and transfer learning (5%, on average); (2) SHL training is only slightly better than phone mapping; and (3) i-vectors provide useful additional reductions (3%, on average) while one-hot encoding has little effect. Even with a large 40K hour dataset (comprising the same four English dialects) and fully optimized sequence discriminative training, we show that phone mapping provides healthy WER reductions over dialect-specific models (10%, on average).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anjuli Kannan|AUTHOR Anjuli Kannan]], [[Arindrima Datta|AUTHOR Arindrima Datta]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Eugene Weinstein|AUTHOR Eugene Weinstein]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Ankur Bapna|AUTHOR Ankur Bapna]], [[Zhifeng Chen|AUTHOR Zhifeng Chen]], [[Seungji Lee|AUTHOR Seungji Lee]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2130–2134&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multilingual end-to-end (E2E) models have shown great promise in expansion of automatic speech recognition (ASR) coverage of the world’s languages. They have shown improvement over monolingual systems, and have simplified training and serving by eliminating language-specific acoustic, pronunciation, and language models. This work presents an E2E multilingual system which is equipped to operate in low-latency interactive applications, as well as handle a key challenge of real world data: the imbalance in training data across languages. Using nine Indic languages, we compare a variety of techniques, and find that a combination of conditioning on a language vector and training language-specific adapter layers produces the best model. The resulting E2E multilingual model achieves a lower word error rate (WER) than both monolingual E2E models (eight of nine languages) and monolingual conventional systems (all nine languages).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Carlos Mendes|AUTHOR Carlos Mendes]]^^1^^, [[Alberto Abad|AUTHOR Alberto Abad]]^^2^^, [[João Paulo Neto|AUTHOR João Paulo Neto]]^^1^^, [[Isabel Trancoso|AUTHOR Isabel Trancoso]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^VoiceInteraction, Portugal; ^^2^^INESC-ID, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2135–2139&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the broadcast news domain, national wide newscasters typically interact with communities with a diverse set of accents. One of the challenges in speech recognition is the performance degradation in the presence of these diverse conditions. Performance further aggravates when the accents are from other countries that share the same language. Extensive work has been conducted in this topic for languages such as English and Mandarin. Recently, TDNN based multi-task learning has received some attention in this area, with interesting results, typically using models trained with a variety of different accented corpora from a particular language. In this work, we look at the case of LATAM (Latin American) Spanish for its unique and distinctive accent variations. Because LATAM Spanish has historically been influenced by non-Spanish European migrations, we anticipated that LATAM based speech recognition performance can be further improved by including these influential languages, during a TDNN based multi-task training. Experiments show that including such languages in the training setup outperforms the single task acoustic model baseline. We also propose an automatic per-language weight selection strategy to regularize each language contribution during multi-task training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thibault Viglino|AUTHOR Thibault Viglino]]^^1^^, [[Petr Motlicek|AUTHOR Petr Motlicek]]^^2^^, [[Milos Cernak|AUTHOR Milos Cernak]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EPFL, Switzerland; ^^2^^Idiap Research Institute, Switzerland; ^^3^^Logitech, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2140–2144&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Correct pronunciation is known to be the most difficult part to acquire for (native or non-native) language learners. The accented speech is thus more variable, and standard Automatic Speech Recognition (ASR) training approaches that rely on intermediate phone alignment might introduce errors during the ASR training. With end-to-end training we could alleviate this problem. In this work, we explore the use of multi-task training and accent embedding in the context of end-to-end ASR trained with the connectionist temporal classification loss. Comparing to the baseline developed using conventional ASR framework exploiting time-delay neural networks trained on accented English, we show significant relative improvement of about 25% in word error rate. Additional evaluation on unseen accent data yields relative improvements of of 31% and 2% for New Zealand English and Indian English, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sheng Li|AUTHOR Sheng Li]], [[Chenchen Ding|AUTHOR Chenchen Ding]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Peng Shen|AUTHOR Peng Shen]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]
</p><p class="cpabstractcardaffiliationlist">NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2145–2149&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The end-to-end (E2E) model allows for training of automatic speech recognition (ASR) systems without the hand-designed language-specific pronunciation lexicons. However, constructing the multilingual low-resource E2E ASR system is still challenging due to the vast number of symbols (e.g., words and characters). In this paper, we investigate an efficient method of encoding multilingual transcriptions for training E2E ASR systems. We directly encode the symbols of multilingual writing systems to universal articulatory representations, which is much more compact than characters and words. Compared with traditional multilingual modeling methods, we directly build a single acoustic-articulatory within recent transformer-based E2E framework for ASR tasks. The speech recognition results of our proposed method significantly outperform the conventional word-based and character-based E2E models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Karan Taneja|AUTHOR Karan Taneja]]^^1^^, [[Satarupa Guha|AUTHOR Satarupa Guha]]^^2^^, [[Preethi Jyothi|AUTHOR Preethi Jyothi]]^^1^^, [[Basil Abraham|AUTHOR Basil Abraham]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIT Bombay, India; ^^2^^Microsoft, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2150–2154&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One of the main challenges in building code-mixed ASR systems is the lack of annotated speech data. Often, however, monolingual speech corpora are available in abundance for the languages in the code-mixed speech. In this paper, we explore different techniques that use monolingual speech to create synthetic code-mixed speech and examine their effect on training models for code-mixed ASR. We assume access to a small amount of real code-mixed text, from which we extract probability distributions that govern the transition of phones across languages at code-switch boundaries and the span lengths corresponding to a particular language. We extract segments from monolingual data and concatenate them to form code-mixed utterances such that these probability distributions are preserved. Using this synthetic speech, we show significant improvements in Hindi-English code-mixed ASR performance compared to using synthetic speech naively constructed from complete utterances in different languages. We also present language modelling experiments that use synthetically constructed code-mixed text and discuss their benefits.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ke Hu|AUTHOR Ke Hu]], [[Antoine Bruguier|AUTHOR Antoine Bruguier]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]], [[Golan Pundak|AUTHOR Golan Pundak]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2155–2159&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Contextual automatic speech recognition, i.e., biasing recognition towards a given context (e.g. user’s playlists, or contacts), is challenging in end-to-end (E2E) models. Such models maintain a limited number of candidates during beam-search decoding, and have been found to recognize rare named entities poorly. The problem is exacerbated when biasing towards proper nouns in foreign languages, e.g., geographic location names, which are virtually unseen in training and are thus out-of-vocabulary (OOV). While grapheme or wordpiece E2E models might have a difficult time spelling OOV words, phonemes are more acoustically salient and past work has shown that E2E phoneme models can better predict such words. In this work, we propose an E2E model containing both English wordpieces and phonemes in the modeling space, and perform contextual biasing of foreign words at the phoneme level by mapping pronunciations of foreign words into similar English phonemes. In experimental evaluations, we find that the proposed approach performs 16% better than a grapheme-only biasing model, and 8% better than a wordpiece-only biasing model on a foreign place name recognition task, with only slight degradation on regular English tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yerbolat Khassanov|AUTHOR Yerbolat Khassanov]]^^1^^, [[Haihua Xu|AUTHOR Haihua Xu]]^^2^^, [[Van Tung Pham|AUTHOR Van Tung Pham]]^^1^^, [[Zhiping Zeng|AUTHOR Zhiping Zeng]]^^2^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^, [[Chongjia Ni|AUTHOR Chongjia Ni]]^^3^^, [[Bin Ma|AUTHOR Bin Ma]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^Temasek Laboratories @ NTU, Singapore; ^^3^^Alibaba Group, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2160–2164&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The lack of code-switch training data is one of the major concerns in the development of end-to-end code-switching automatic speech recognition (ASR) models. In this work, we propose a method to train an improved end-to-end code-switching ASR using only monolingual data. Our method encourages the distributions of output token embeddings of monolingual languages to be similar, and hence, promotes the ASR model to easily code-switch between languages. Specifically, we propose to use Jensen-Shannon divergence and cosine distance based constraints. The former will enforce output embeddings of monolingual languages to possess similar distributions, while the later simply brings the centroids of two distributions to be close to each other. Experimental results demonstrate high effectiveness of the proposed method, yielding up to 4.5% absolute mixed error rate improvement on Mandarin-English code-switching ASR task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prakhar Swarup|AUTHOR Prakhar Swarup]]^^1^^, [[Roland Maas|AUTHOR Roland Maas]]^^2^^, [[Sri Garimella|AUTHOR Sri Garimella]]^^1^^, [[Sri Harish Mallidi|AUTHOR Sri Harish Mallidi]]^^2^^, [[Björn Hoffmeister|AUTHOR Björn Hoffmeister]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, India; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2175–2179&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In automatic speech recognition, confidence measures provide a quantitative representation used to assess whether a generated hypothesis text is correct or not. For personal assistant devices like Alexa, automatic speech recognition (ASR) errors are inevitable due to the imperfection of today’s speech recognition technology. Hence, confidence scores provide an important metric to gauge the correctness of ASR hypothesis text and enable downstream consumers to subsequently initiate appropriate actions. In this work, our aim is to improve the correctness of our confidence scores by enhancing our baseline model architecture with learned features, namely acoustic and 1-best hypothesis embeddings. These embeddings are obtained by training separate networks on acoustic features and ASR 1-best hypothesis respectively. We present an experimental evaluation on a large US English data set showing a 6% relative equal error rate reduction and 13% relative normalized cross-entropy improvement over our baseline system by incorporating these embeddings. We also present a deeper analysis of the embeddings revealing that the acoustic embedding results in a better prediction of insertion errors whereas the 1-best hypothesis embedding helps to better predict substitution errors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Martin Karafiát|AUTHOR Martin Karafiát]]^^1^^, [[Murali Karthick Baskar|AUTHOR Murali Karthick Baskar]]^^1^^, [[Shinji Watanabe|AUTHOR Shinji Watanabe]]^^2^^, [[Takaaki Hori|AUTHOR Takaaki Hori]]^^3^^, [[Matthew Wiesner|AUTHOR Matthew Wiesner]]^^2^^, [[Jan Černocký|AUTHOR Jan Černocký]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Brno University of Technology, Czech Republic; ^^2^^Johns Hopkins University, USA; ^^3^^MERL, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2220–2224&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper investigates the applications of various multilingual approaches developed in conventional deep neural network - hidden Markov model (DNN-HMM) systems to sequence-to-sequence (seq2seq) automatic speech recognition (ASR). We employ a joint connectionist temporal classification-attention network as our base model. Our main contribution is separated into two parts. First, we investigate the effectiveness of the seq2seq model with stacked multilingual bottle-neck features obtained from a conventional DNN-HMM system on the Babel multilingual speech corpus. Second, we investigate the effectiveness of transfer learning from a pre-trained multilingual seq2seq model with and without the target language included in the original multilingual training data. In this experiment, we also explore various architectures and training strategies of the multilingual seq2seq model by making use of knowledge obtained in the DNN-HMM based transfer-learning. Although both approaches significantly improved the performance from a monolingual seq2seq baseline, interestingly, we found the multilingual bottle-neck features to be superior to multilingual models with transfer learning. This finding suggests that we can efficiently combine the benefits of the DNN-HMM system with the seq2seq system through multilingual bottle-neck feature techniques.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michał Zapotoczny|AUTHOR Michał Zapotoczny]], [[Piotr Pietrzak|AUTHOR Piotr Pietrzak]], [[Adrian Łańcucki|AUTHOR Adrian Łańcucki]], [[Jan Chorowski|AUTHOR Jan Chorowski]]
</p><p class="cpabstractcardaffiliationlist">University of Wrocław, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2225–2229&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Attention-based neural speech recognition models are frequently decoded with beam search, which produces a tree of hypotheses. In many cases, such as when using external language models, numerous decoding hypotheses need to be considered, requiring large beam sizes during decoding. We demonstrate that it is possible to merge certain nodes in a tree of hypotheses, in order to obtain a decoding lattice, which increases the number of decoding hypotheses without increasing the number of candidates that are scored by the neural network. We propose a convolutional architecture, which facilitates comparing states of the model at different pi The experiments are carried on the Wall Street Journal dataset, where the lattice decoder obtains lower word error rates with smaller beam sizes, than an otherwise similar architecture with regular beam search.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Martin Jansche|AUTHOR Martin Jansche]], [[Alexander Gutkin|AUTHOR Alexander Gutkin]]
</p><p class="cpabstractcardaffiliationlist">Google, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2230–2234&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Stochastic finite automata arise naturally in many language and speech processing tasks. They include stochastic acceptors, which represent certain probability distributions over random strings. We consider the problem of efficient sampling: drawing random string variates from the probability distribution represented by stochastic automata and transformations of those. We show that path-sampling is effective and can be efficient if the epsilon-graph of a finite automaton is acyclic. We provide an algorithm that ensures this by conflating epsilon-cycles within strongly connected components. Sampling is also effective in the presence of non-injective transformations of strings. We illustrate this in the context of decoding for Connectionist Temporal Classification (CTC), where the predictive probabilities yield auxiliary sequences which are transformed into shorter labeling strings. We can sample efficiently from the transformed labeling distribution and use this in two different strategies for finding the most probable CTC labeling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Łukasz Dudziak|AUTHOR Łukasz Dudziak]], [[Mohamed S. Abdelfattah|AUTHOR Mohamed S. Abdelfattah]], [[Ravichander Vipperla|AUTHOR Ravichander Vipperla]], [[Stefanos Laskaridis|AUTHOR Stefanos Laskaridis]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]
</p><p class="cpabstractcardaffiliationlist">Samsung, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2235–2239&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end automatic speech recognition (ASR) models are increasingly large and complex to achieve the best possible accuracy. In this paper, we build an AutoML system that uses reinforcement learning (RL) to optimize the per-layer compression ratios when applied to a state-of-the-art attention based end-to-end ASR model composed of several LSTM layers. We use singular value decomposition (SVD) low-rank matrix factorization as the compression method. For our RL-based AutoML system, we focus on practical considerations such as the choice of the reward/punishment functions, the formation of an effective search space, and the creation of a representative but small data set for quick evaluation between search steps. Finally, we present accuracy results on LibriSpeech of the model compressed by our AutoML system, and we compare it to manually-compressed models. Our results show that in the absence of retraining our RL-based search is an effective and practical method to compress a production-grade ASR system. When retraining is possible, we show that our AutoML system can select better highly-compressed seed models compared to manually hand-crafted rank selection, thus allowing for more compression than previously possible.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Yifan Gong|AUTHOR Yifan Gong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2240–2244&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Directly emitting words and sub-words from speech spectrogram has been shown to produce good results using end-to-end (E2E) trained models. Connectionist Temporal Classification (CTC) and Sequence-to-Sequence attention (Seq2Seq) models have both shown better success when directly targeting words or sub-words. In this work, we ask the question: Can an E2E model go beyond words and transcribe directly to phrases (i.e., a group of words)? Directly modeling frequent phrases might be better than modeling its constituent words. Also, emitting multiple words together might speed up inference in models like Seq2Seq where decoding is inherently sequential. To answer this, we undertake a study on a 3400-hour Microsoft Cortana voice assistant task. We present a side-by-side comparison for CTC and Seq2Seq models that have been trained to target a variety of tokens including letters, sub-words, words and phrases. We show that an E2E model can indeed transcribe directly to phrases. We see that while CTC has difficulty in accurately modeling phrases, a more powerful model like Seq2Seq can effortlessly target phrases that are up to 4 words long, with only a reasonable degradation in the final word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ruizhi Li|AUTHOR Ruizhi Li]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2245–2249&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Measuring performance of an automatic speech recognition (ASR) system without ground-truth could be beneficial in many scenarios, especially with data from unseen domains, where performance can be highly inconsistent. In conventional ASR systems, several performance monitoring (PM) techniques have been well-developed to monitor performance by looking at tri-phone posteriors or pre-softmax activations from neural network acoustic modeling. However, strategies for monitoring more recently developed end-to-end ASR systems have not yet been explored, and so that is the focus of this paper. We adapt previous PM measures (Entropy, M-measure and Autoencoder) and apply our proposed RNN predictor in the end-to-end setting. These measures utilize the decoder output layer and attention probability vectors, and their predictive power is measured with simple linear models. Our findings suggest that decoder-level features are more feasible and informative than attention-level probabilities for PM measures, and that M-measure on the decoder posteriors achieves the best overall predictive performance with an average prediction error 8.8%. Entropy measures and RNN-based prediction also show competitive predictability, especially for unseen conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Ming Lei|AUTHOR Ming Lei]], [[Zhijie Yan|AUTHOR Zhijie Yan]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2180–2184&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Connectionist Temporal Classification (CTC) based end-to-end speech recognition system usually need to incorporate an external language model by using WFST-based decoding in order to achieve promising results. This is more essential to Mandarin speech recognition since it owns a special phenomenon, namely  homophone, which causes a lot of substitution errors. The linguistic information introduced by language model is somehow helpful to distinguish these substitution errors. In this work, we propose a transformer based spelling correction model to automatically correct errors, especially the substitution errors, made by CTC-based Mandarin speech recognition system. Specifically, we investigate to use the recognition results generated by CTC-based systems as input and the ground-truth transcriptions as output to train a transformer with encoder-decoder architecture, which is much similar to machine translation. Experimental results in a 20,000 hours Mandarin speech recognition task show that the proposed spelling correction model can achieve a CER of 3.41%, which results in 22.9% and 53.2% relative improvement compared to the baseline CTC-based systems decoded with and without language model, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cal Peyser|AUTHOR Cal Peyser]], [[Hao Zhang|AUTHOR Hao Zhang]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Zelin Wu|AUTHOR Zelin Wu]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2185–2189&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recognizing written domain numeric utterances (e.g., I need 1.25.) can be challenging for ASR systems, particularly when numeric sequences are not seen during training. This out-of-vocabulary (OOV) issue is addressed in conventional ASR systems by training part of the model on spoken domain utterances (e.g., I need one dollar and twenty five cents.), for which numeric sequences are composed of in-vocabulary numbers, and then using an FST verbalizer to denormalize the result. Unfortunately, conventional ASR models are not suitable for the low memory setting of on-device speech recognition. E2E models such as RNN-T are attractive for on-device ASR, as they fold the AM, PM and LM of a conventional model into one neural network. However, in the on-device setting the large memory footprint of an FST denormer makes spoken domain training more difficult. In this paper, we investigate techniques to improve E2E model performance on numeric data. We find that using a text-to-speech system to generate additional numeric training data, as well as using a small-footprint neural network to perform spoken-to-written domain denorming, yields improvement in several numeric classes. In the case of the longest numeric sequences, we see reduction of WER by up to a factor of 8.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ye Bai|AUTHOR Ye Bai]]^^1^^, [[Jiangyan Yi|AUTHOR Jiangyan Yi]]^^1^^, [[Jianhua Tao|AUTHOR Jianhua Tao]]^^1^^, [[Zhengqi Wen|AUTHOR Zhengqi Wen]]^^1^^, [[Zhengkun Tian|AUTHOR Zhengkun Tian]]^^1^^, [[Chenghao Zhao|AUTHOR Chenghao Zhao]]^^2^^, [[Cunhang Fan|AUTHOR Cunhang Fan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Jiangsu Normal University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2190–2194&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword spotting requires a small memory footprint to run on mobile devices. However, previous works still use several hundred thousand parameters to achieve good performance. To address this issue, we propose a time delay neural network with shared weight self-attention for small-footprint keyword spotting. By sharing weights, the parameters of self-attention are reduced but without performance reduction. The publicly available Google Speech Commands dataset is used to evaluate the models. The number of parameters (12K) of our model is 1/20 of state-of-the-art ResNet model (239K). The proposed model achieves an error rate of 4.19% , which is comparable to the ResNet model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Ming Sun|AUTHOR Ming Sun]], [[Yixin Gao|AUTHOR Yixin Gao]], [[Shiv Vitaladevuni|AUTHOR Shiv Vitaladevuni]], [[Chao Wang|AUTHOR Chao Wang]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2195–2199&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper proposes a Sub-band Convolutional Neural Network for spoken term classification. Convolutional neural networks (CNNs) have proven to be very effective in acoustic applications such as spoken term classification, keyword spotting, speaker identification, acoustic event detection, etc. Unlike applications in computer vision, the spatial invariance property of 2D convolutional kernels does not fit acoustic applications well since the meaning of a specific 2D kernel varies a lot along the feature axis in an input feature map. We propose a sub-band CNN architecture to apply different convolutional kernels on each feature sub-band, which makes the overall computation more efficient. Experimental results show that the computational efficiency brought by sub-band CNN is more beneficial for small-footprint models. Compared to a baseline full band CNN for spoken term classification on a publicly available Speech Commands dataset, the proposed sub-band CNN architecture reduces the computation by 39.7% on commands classification, and 49.3% on digits classification with accuracy maintained.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sheng Li|AUTHOR Sheng Li]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Chenchen Ding|AUTHOR Chenchen Ding]], [[Peng Shen|AUTHOR Peng Shen]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]
</p><p class="cpabstractcardaffiliationlist">NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2200–2204&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Training automatic speech recognition (ASR) systems for East Asian languages (e.g., Chinese and Japanese) is tough work because of the characters existing in the writing systems of these languages. Traditionally, we first need to get the pronunciation of these characters by morphological analysis. The end-to-end (E2E) model allows for directly using characters or words as the modeling unit. However, since different groups of people (e.g., residents in Chinese mainland, Hong Kong, Taiwan, and Japan) adopts different writing forms for a character, this also leads to a large increase in the number of vocabulary, especially when building ASR systems across languages or dialects. In this paper, we propose a new E2E ASR modeling method by decomposing the characters into a set of radicals. Our experiments demonstrate that it is possible to effectively reduce the vocabulary size by sharing the basic radicals across different dialect of Chinese. Moreover, we also demonstrate this method could also be used to construct a Japanese E2E ASR system. The system modeled with radicals and kana achieved similar performance compared to state-of-the-art E2E system built with word-piece units.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiaqi Guo|AUTHOR Jiaqi Guo]]^^1^^, [[Yongbin You|AUTHOR Yongbin You]]^^2^^, [[Yanmin Qian|AUTHOR Yanmin Qian]]^^1^^, [[Kai Yu|AUTHOR Kai Yu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shanghai Jiao Tong University, China; ^^2^^AISpeech, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2205–2209&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Connectionist temporal classification (CTC) has been successfully used in speech recognition. It learns the alignments between speech frames and label sequences automatically without explicit pre-generated frame-level labels. While this property is convenient for shortening the training pipeline, it may become a potential disadvantage for the frame-level system combination due to inaccurate alignments. In this paper, a novel Dynamic Time Warping (DTW) based position calibration algorithm is proposed for joint decoding on two CTC based acoustic models. Furthermore, joint decoding for CTC and conventional hybrid NN-HMM models is also explored. Experiments on a large vocabulary Mandarin speech recognition task show that the proposed joint decoding of both CTC based and CTC-Hybrid based systems can achieve a significant and consistent character error rate reduction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Takanobu Oba|AUTHOR Takanobu Oba]], [[Yushi Aono|AUTHOR Yushi Aono]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2210–2214&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents joint end-to-end and deep neural network-hidden Markov model (DNN-HMM) hybrid automatic speech recognition (ASR) systems that share network components. End-to-end ASR systems have been shown competitive performance compared with the DNN-HMM hybrid ASR systems in recent studies. These systems have different advantages, which are an estimation ability based on the totally optimized model of the end-to-end ASR system and a stable processing based on a frame-by-frame manner of the DNN-HMM hybrid ASR system. In our previous study, we proposed a method to utilize an end-to-end ASR system for rescoring hypotheses generated from a DNN-HMM hybrid ASR system. However, the conventional method cannot efficiently leverage the advantages since network components are independently modeled. In order to tackle this problem, we propose a joint end-to-end and DNN-HMM hybrid ASR systems that share the network to transfer knowledge of the systems. In the proposed method, end-to-end ASR systems utilize the information from an output of an internal layer in a DNN acoustic model in the DNN-HMM hybrid ASR system for enhancing the end-to-end ASR system. This enables us to efficiently leverage sharable information for improving the joint ASR system. Experimental results show that the proposed method outperforms the conventional method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Karan Malhotra|AUTHOR Karan Malhotra]], [[Shubham Bansal|AUTHOR Shubham Bansal]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2215–2219&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently developed end-to-end (E2E) automatic speech recognition (ASR) systems demand abundance of transcribed speech data, there are several scenarios where the labeling of speech data is cumbersome and expensive. For a fixed annotation cost, active learning for speech recognition allows to efficiently train the ASR model. In this work, we advance the most common approach for active learning methods which relies on uncertainty sampling technique. In particular, we explore the use of path probability of the decoded sequence as a confidence measure and select the samples with the least confidence for active learning. In order to reduce the sampling bias in active learning, we propose a regularized uncertainty sampling approach that incorporates an i-vector diversity measure. Thus, the active learning in the proposed framework uses a joint score of uncertainty and i-vector diversity. The benefits of the proposed approach are illustrated for an E2E ASR task performed on CSJ and Librispeech datasets. In these experiments, we show that the proposed approach yields considerable improvements over the baseline model using random sampling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]], [[Santiago Barreda|AUTHOR Santiago Barreda]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2250–2254&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study examines the role of musical experience on listeners’ phoneme judgments across noise conditions. Individuals with 10+ years of musical training were compared with nonmusicians in their use of three acoustic cues in categorizing post-vocalic obstruent voicing: fundamental frequency, vowel duration, and spectral composition in two listening conditions (silence and multitalker babble, MTB). Results demonstrate that musicians display steeper phonemic categorization for coda /t/ and /d/ on the basis of all three cues of interest. Additionally, musicians and nonmusicians show different cue weighting patterns in MTB than in silence. The findings are discussed with reference to their implications for theories of experience-driven plasticity and individual differences in the perceptual organization of phonemes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Julien Meyer|AUTHOR Julien Meyer]]^^1^^, [[Laure Dentel|AUTHOR Laure Dentel]]^^2^^, [[Silvain Gerber|AUTHOR Silvain Gerber]]^^1^^, [[Rachid Ridouane|AUTHOR Rachid Ridouane]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^GIPSA-lab (UMR 5216), France; ^^2^^World Whistles Research Association, France; ^^3^^LPP (UMR 7018), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2295–2299&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present study compares the perceptual categorization of four CV syllables /ta, da, ka, ga/ in two different speech registers — modal speech and whistled speech — of Tashlhiyt Berber used in the Moroccan High Atlas. Whistled speech in a non-tonal language such as Tashlhiyt is a special speech register used for long distance dialogues that consists of the natural production of vocalic and consonantal qualities in a simple modulated whistled signal. The technique of whistling imposes various restrictions on speech articulation, which result in a simplification of the phonetics of spoken speech into a ‘whistled formant’. Here, we describe this simplification for Tashlhiyt syllables /ta, da, ka, ga/ and use them as stimuli in a behavioral experiment. We analyze and compare the perceptual categorization obtained from native Tashlhiyt listeners (trained since childhood in whistled speech) for both speech registers on these 4 syllable types. Results show that whistled stimuli were fairly well identified (~42%) above chance (25%), though less well than spoken ones (~84%). The detailed analysis of confusions between CVs enabled us to understand better how whistled consonants are perceived, highlighting the phonological contrasts that are best perceived and retained from spoken to whistled speech in this language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Han-Chi Hsieh|AUTHOR Han-Chi Hsieh]], [[Wei-Zhong Zheng|AUTHOR Wei-Zhong Zheng]], [[Ko-Chiang Chen|AUTHOR Ko-Chiang Chen]], [[Ying-Hui Lai|AUTHOR Ying-Hui Lai]]
</p><p class="cpabstractcardaffiliationlist">National Yang-Ming University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2300–2304&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The consonant is an important element in Mandarin, and various categories of consonant generation effectuate various facial expressions. Specifically, there are changes in facial muscles when speaking, and these changes are closely related to pronunciation; the facial muscles are associated with these hidden articulators, and the effects on the facial changes can be seen as 3D changes. However, in most studies, 2D images are used to analyze facial features when people talk. The 2D images serve to provide information in two dimensions (x- and y-axis); however, subtle deep motions (z-axis changes) of facial muscles when speaking can be difficult to detect accurately. Hence, the depth feature of the face (the point cloud feature in this study) was used to investigate the potential for consonant recognition, recorded by a time-of-flight 3D camera. In this study, we propose an algorithm to recognize the seven categories of Mandarin consonants using the depth features of the speaker’s face. The proposed system yielded suitable classification accuracy for the recognition of seven categories of Mandarin consonants. This result implies that depth features can be used for speech-processing applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shiri Lev-Ari|AUTHOR Shiri Lev-Ari]]^^1^^, [[Robin Dodsworth|AUTHOR Robin Dodsworth]]^^2^^, [[Jeff Mielke|AUTHOR Jeff Mielke]]^^2^^, [[Sharon Peperkamp|AUTHOR Sharon Peperkamp]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^RHUL, UK; ^^2^^North Carolina State University, USA; ^^3^^LSCP (UMR 8554), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2305–2309&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The way people speak reflects their demographic background. Listeners exploit this contingent variation and make use of information about speakers’ background to process their speech. Evidence for this comes from both phonetic and lexical tasks, and the two are assumed to tap into the same mechanism and provide equivalent results. Curiously, this assumption has never been tested. Additionally, while it has been established that expectations can influence language processing in general, the role of individual differences in susceptibility to this influence is relatively unexplored. We investigate these two questions in the context of Southern and General American speech varieties in the USA. We show that phonetic and lexical tasks are not equivalent, and furthermore, that the two are driven by mechanisms that are sensitive to different individual variables: while performance at the lexical level is influenced by implicit bias, performance at the phonetic level is influenced by working memory. These results thus change our understanding of how expectations influence processing, and have implications for how to conduct and interpret studies on the topic.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bruno Ferenc Segedin|AUTHOR Bruno Ferenc Segedin]], [[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]
</p><p class="cpabstractcardaffiliationlist">University of California at Davis, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2310–2314&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice-activated artificially-intelligent digital devices are a new type of interlocutor. Like for human talkers, they have idiosyncratic speech patterns that require listeners to perceptually adapt to during language comprehension. One question is how perceptual adaptation to a novel accent in speech produced by a digital device voice compares to adaptation to human voices. Furthermore, adaptation to one talker can  generalize to novel voices. Hence, we also tested whether perceptual adaptation to accented device voices generalizes to novel human voices, and vice versa. In this study, listeners were first exposed to words with a shifted phoneme realization in either a device or human voice. Later, participants were tested on whether they shifted their identification of words in the shifted talker. Additionally, we tested whether listeners applied the shift to novel device and human voices not heard in exposure. Results reveal talker-specific learning for both device and human voices. Yet, the size of the shift was larger for the device voices. Furthermore, listeners exposed to the shift in device voices showed generalization to novel human voices, and vice versa. These patterns of adaptation and generalization for device and human talkers have implications for models of speech perception models and human-computer interaction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Katerina Papadimitriou|AUTHOR Katerina Papadimitriou]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]
</p><p class="cpabstractcardaffiliationlist">University of Thessaly, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2315–2319&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Although fingerspelling is an often overlooked component of sign languages, it has great practical value in the communication of important context words that lack dedicated signs. In this paper we consider the problem of fingerspelling recognition in videos, introducing an end-to-end lexicon-free model that consists of a deep auto-encoder image feature learner followed by an attention-based encoder-decoder for prediction. The feature extractor is a vanilla auto-encoder variant, employing a quadratic activation function. The learned features are subsequently fed into the attention-based encoder-decoder. The latter deviates from traditional recurrent neural network architectures, being a fully convolutional attention-based encoder-decoder that is equipped with a multi-step attention mechanism relying on a quadratic alignment function and gated linear units over the convolution output. The introduced model is evaluated on the TTIC/UChicago fingerspelling video dataset, where it outperforms previous approaches in letter accuracy under all three, signer-dependent, -adapted, and -independent, experimental paradigms.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Natalie Lewandowski|AUTHOR Natalie Lewandowski]]^^1^^, [[Daniel Duran|AUTHOR Daniel Duran]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Independent Scientist, Germany; ^^2^^Albert-Ludwigs-Universität Freiburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2255–2259&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a study on the interactions between implicit attention to acoustic-phonetic detail in speech and individual differences (IDs). Attention to phonetic detail was assessed with acoustically manipulated speech stimuli within a computer game, an alternative to regular highly-controlled categorization tests. Twenty-two native German speakers (11f) completed the game and further tests including individual attention test measures (e.g. Simon Test), the BFI-10 (short version of the Big Five Inventory), and a Self-monitoring Test (need for social approval). With this study, we contribute to the understanding of the processes underlying human speech perception and the impact of cognitive and personality features on the attention to phonetic detail. Our results show that the general (non-verbal) attention capacity (mental flexibility, inhibition), interacts with implicit attention to phonetic detail. Furthermore, IDs in personality, such as  sensitivity to social cues or  conscientiousness significantly add to the effects. Understanding these interactions, especially arising in an intuitive and non-explicit study design, is an important step on the way towards explaining not only the influence of IDs on attention to phonetic detail, but also the dynamics of speech interaction (e.g. phonetic convergence).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kaylah Lalonde|AUTHOR Kaylah Lalonde]]
</p><p class="cpabstractcardaffiliationlist">Boys Town National Research Hospital, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2260–2264&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In audiovisual (AV) speech, correlations over time between visible mouth movements and the amplitude envelope of auditory speech help to reduce uncertainty as to when peaks in the auditory signal will occur. Previous studies demonstrated greater AV benefit to speech detection in noise for sentences with higher cross-modal correlations than sentences with lower cross-modal correlations.

This study examined whether the mechanisms that underlie AV detection benefits have downstream effects on speech recognition in noise. Participants were presented 72 sentences in noise, in auditory-only and AV conditions, at either their 50% auditory speech recognition threshold in noise (SRT-50) or at a signal-to-noise ratio (SNR) 6 dB poorer than their SRT-50. They were asked to repeat each sentence. Mean AV benefit across subjects was calculated for each sentence. Pearson correlations and mixed modeling were used to examined whether variability in AV benefit across sentences was related to natural variation in the degree of cross-modal correlation across sentences.

In the more difficult listening condition, higher cross-modal correlations were associated with higher AV sentence recognition benefit. The relationship was strongest in the 0.8–2.2 kHz and 0.8–6 kHz frequency regions. These results demonstrate that cross-modal correlations contribute to variability in AV speech recognition in noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[M. Bentum|AUTHOR M. Bentum]], [[L. ten Bosch|AUTHOR L. ten Bosch]], [[A. van den Bosch|AUTHOR A. van den Bosch]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2265–2269&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The event-related potential (ERP) component named  phonological mismatch negativity (PMN) arises when listeners hear an unexpected word form in a spoken sentence [1]. The PMN is thought to reflect the mismatch between expected and perceived auditory speech input. In this paper, we use the PMN to test a central premise in the predictive coding framework [2], namely that the mismatch between prior expectations and sensory input is an important mechanism of perception. We test this with natural speech materials containing approximately 50,000 word tokens. The corresponding EEG-signal was recorded while participants (n = 48) listened to these materials. Following [3], we quantify the mismatch with two word probability distributions (WPD): a WPD based on preceding context, and a WPD that is additionally updated based on the incoming audio of the current word. We use the between-WPD cross entropy for each word in the utterances and show that a higher cross entropy correlates with a more negative PMN. Our results show that listeners anticipate auditory input while processing each word in naturalistic speech. Moreover, complementing previous research, we show that predictive language processing occurs across the whole probability spectrum.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[M. Bentum|AUTHOR M. Bentum]], [[L. ten Bosch|AUTHOR L. ten Bosch]], [[A. van den Bosch|AUTHOR A. van den Bosch]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2270–2274&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The mismatch between top-down predicted and bottom-up perceptual input is an important mechanism of perception according to the predictive coding framework (Friston, [1]). In this paper we develop and validate a new information-theoretic measure that quantifies the mismatch between expected and observed auditory input during speech processing. We argue that such a mismatch measure is useful for the study of speech processing. To compute the mismatch measure, we use naturalistic speech materials containing approximately 50,000 word tokens. For each word token we first estimate the prior word probability distribution with the aid of statistical language modelling, and next use automatic speech recognition to update this word probability distribution based on the unfolding speech signal. We validate the mismatch measure with multiple analyses, and show that the auditory-based update improves the probability of the correct word and lowers the uncertainty of the word probability distribution. Based on these results, we argue that it is possible to explicitly estimate the mismatch between predicted and perceived speech input with the cross entropy between word expectations computed before and after an auditory update.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel R. Turner|AUTHOR Daniel R. Turner]], [[Ann R. Bradlow|AUTHOR Ann R. Bradlow]], [[Jennifer S. Cole|AUTHOR Jennifer S. Cole]]
</p><p class="cpabstractcardaffiliationlist">Northwestern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2275–2279&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The pitch perception literature has been largely built on experimental data collected using nonspeech stimuli, which has then been generalized to speech. In the present study, we compare the perceptibility of identical pitch movements in speech and nonspeech that vary in duration and in pitch range. Our nonspeech results closely replicate earlier findings and we show that speech is a significantly more difficult medium for pitch discrimination. Pitch movements in speech have to be larger and longer to achieve the salience of the most common speech analog, pulse trains. The direction of pitch movement also affects one’s ability to discern pitch; in particular falling excursions are the most difficult. We found that the perceptual threshold for falling pitch in speech was more than 100 times that of previous estimates with nonspeech stimuli. Our findings show that the perceptual response to nonspeech does not adequately map onto speech, and future work in speech research and its applications should use speech-like stimuli, rather than convenient substitutes like pulse trains, pure tones, or isolated vowels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[L. ten Bosch|AUTHOR L. ten Bosch]], [[L. Boves|AUTHOR L. Boves]], [[K. Mulder|AUTHOR K. Mulder]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2280–2284&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Reaction times (RTs) are used widely in psychological and psycholinguistic research as inexpensive measures of underlying cognitive processes. However, inferring cognitive processes from RTs is hampered by the fact that actual responses are the result of multiple factors, many of which may not be related to the process of interest. In lexical decision experiments, the use of RTs is further complicated by the fact that the response to some stimuli is missing, and the fact that part of the responses are ‘incorrect’.

In this paper we investigate the distribution of missing and incorrect responses in the RT sequences of two large lexical decision experiments. It appears that a substantial part of incorrect responses cluster together. Then, we investigate the effect of clusters of incorrect responses on surrounding RTs.

Also, we extend previous research on methods for discovering and removing so-called local speed effects from RT sequences. For this purpose, we show that a recently introduced graph-based RT analysis method can help to better understand and analyze RT sequences.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Li Liu|AUTHOR Li Liu]]^^1^^, [[Jianze Li|AUTHOR Jianze Li]]^^2^^, [[Gang Feng|AUTHOR Gang Feng]]^^3^^, [[Xiao-Ping Zhang|AUTHOR Xiao-Ping Zhang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ryerson University, Canada; ^^2^^CUHK, China; ^^3^^GIPSA-lab (UMR 5216), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2285–2289&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Cued Speech (CS) is a multi-modal system, which complements the lip reading with manual hand cues in the phonetic level to make the spoken language visible. It has been found that lip and hand movements are asynchronous in CS, and thus the study of hand temporal organization is very important for the multi-modal CS feature fusion. In this work, we propose a novel diphthong-hand preceding model (D-HPM) by investigating the relationship between hand preceding time (HPT) and diphthong time instants in sentences for British English CS. Besides, we demonstrate that HPT of the first and second parts of diphthongs has a very strong correlation. Combining the monophthong-HPM (M-HPM) and D-HPM, we present a hybrid temporal segmentation detection algorithm (HTSDA) for the hand movement in CS. The evaluation of the proposed algorithm is carried out by a hand position recognition experiment using the multi-Gaussian classifier as well as the long-short term memory (LSTM). The results show that the HTSDA significantly improves the recognition performance compared with the baseline (i.e., audio-based segmentation) and the state-of-the-art M-HPM. To the best of our knowledge, this is the first work to study the temporal organization of hand movements in British English CS.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuriko Yokoe|AUTHOR Yuriko Yokoe]]
</p><p class="cpabstractcardaffiliationlist">Sophia University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2290–2294&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>A perception experiment with Japanese listeners is conducted to investigate the nature of place shift phenomenon that was previously found with French and English listeners. Hallé et al. [1] showed that unattested consonant sequences /tl, dl/ are perceptually repaired to form grammatically acceptable consonant clusters /kl, gl/ in the listeners’ native language.

In this study, a similar experiment with Japanese listeners, whose mother tongue lacks the onset clusters altogether, is conducted. The result explicitly shows that the place shift phenomenon ought not to be interpreted in relation to the top-down phonotactic feedback. Rather, I will argue that both labial and velar shift reflect an autonomous, signal-driven process. As such, language specificity in speech perception must reside in the listeners’ cue weighting, rather than encoded linguistic knowledge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Krishna Somandepalli|AUTHOR Krishna Somandepalli]], [[Naveen Kumar|AUTHOR Naveen Kumar]], [[Arindam Jati|AUTHOR Arindam Jati]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2320–2324&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In many speech processing applications, the objective is to model different modes of variability to obtain robust speech features. In this paper, we learn speech representations in a multiview paradigm by constraining the views to known modes of variability such as speakers or spoken words. We use deep multiset canonical correlation (dMCCA) because it can model more than two views in parallel to learn a shared subspace across them. In order to model thousands of views (e.g., speakers), we demonstrate that stochastically sampling a small number of views generalizes dMCCA to the larger set of views. To evaluate our approach, we study two different aspects of the Speech Commands Dataset: variability among the speakers and speech commands. We show that, by treating observations from one mode of variability as multiple parallel views, we can learn representations that are discriminative to the other mode. We first consider different speakers as views of the same word to learn their shared subspace to represent an utterance. We then constrain the different words spoken by the same person as multiple views to learn speaker representations. Using classification and unsupervised clustering, we evaluate the efficacy of multiview representations to identify speech commands and speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chelzy Belitz|AUTHOR Chelzy Belitz]], [[Hussnain Ali|AUTHOR Hussnain Ali]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2325–2329&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Of the nearly 35 million people in the USA who are hearing impaired, only an estimated 25% use hearing aids (HA). A good number of HAs are prescribed but not used partially because of the time to convergence for best operation between the audiologist and user. To improve HA retention, it is suggested that a machine learning (ML) protocol could be established which improves initial HA configurations given a user’s pure-tone audiogram. This study examines a ML clustering method to predict the best initial HA fitting from a corpus of over 90,000 audiogram-fitting pairs collected from hearing centers throughout the USA. We first examine the final HA comfort targets to determine a limited number of preset configurations using several multi-dimensional clustering methods (Birch, Ward, and k-means). The goal is to reduce the amount of adjustments between the centroid, selected as a fitting configuration to represent the cluster, and the final HA configurations. This may be used to reduce the adjustment cycles for HAs or as preset starting configurations for personal sound amplification products (PSAPs). Using various classification methods, audiograms are mapped to a limited number of potential preset configurations. Finally, the average adjustment between the preset fitting targets and the final fitting targets is examined.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Truc Nguyen|AUTHOR Truc Nguyen]], [[Franz Pernkopf|AUTHOR Franz Pernkopf]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Graz, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2330–2334&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep learning (DL) is key for the recent boost of acoustic scene classification (ASC) performance. Especially, convolutional neural networks (CNNs) are widely adopted with affirmed success. However, models are large and cumbersome, i.e. they have many layers, parallel branches or large ensemble of individual models. In this paper, we propose a resource-efficient model using CliqueNets for feature learning and a mixture-of-experts (MoEs) layer. CliqueNets are a recurrent feedback structure enabling feature refinement by the alternate propagation between constructed loop layers. In addition, we use mixup data augmentation to construct adversarial training examples. It is used for balancing the dataset of DCASE 2018 task 1B over the recordings of the mismatched devices A, B and C. This prevents over-fitting on the dataset of Device A, caused by the gap of data amount between the different recording devices. Experimental results show that the proposed model achieves 64.7% average classification accuracy for Device C and B, and 70.0% for Device A with less than one million of parameters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohsin Y. Ahmed|AUTHOR Mohsin Y. Ahmed]]^^1^^, [[Md. Mahbubur Rahman|AUTHOR Md. Mahbubur Rahman]]^^2^^, [[Jilong Kuang|AUTHOR Jilong Kuang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Virginia, USA; ^^2^^Samsung, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2335–2339&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>DeepLung is an end-to-end deep learning based audio sensing and classification framework for lung anomaly (e.g. cough, wheeze) detection for pulmonary patients from streaming audio and inertial sensor data from a chest-held smartphone. We design and develop 1-D and 2-D convolutional neural networks for DeepLung, and train them using the Interspeech 2010 Paralinguistic Challenge features. Two different audio windowing schemes: i) real-time respiration cycle based natural windowing, and ii) static length windowing are compared and experimented with. Classifiers are developed considering 2 different system architectures: i) mobile-cloud hybrid architecture, and ii) mobile in-situ architecture. Patient privacy is preserved in the phone by filtering speech with a shallow classifier. To evaluate DeepLung, a novel and rigorous lung activity dataset is made by collecting audio and inertial sensor data from more than 131 real pulmonary patients and healthy subjects and annotated accurately by professional crowdsourcing. Experimental results show that the best combination of DeepLung convolutional neural network is 15–27% more accurate when compared to a state-of-the-art smartphone based body sound detection system, with a best F1 score of 98%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Roger K. Moore|AUTHOR Roger K. Moore]], [[Lucy Skidmore|AUTHOR Lucy Skidmore]]
</p><p class="cpabstractcardaffiliationlist">University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2340–2344&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The term ‘phoneme’ lies at the heart of speech science and technology, and yet it is not clear that the research community fully appreciates its meaning and implications. In particular, it is suspected that many researchers use the term in a casual sense to refer to the sounds of speech, rather than as a well defined abstract concept. If true, this means that some sections of the community may be missing an opportunity to understand and exploit the implications of this important psychological phenomenon. Here we review the correct meaning of the term ‘phoneme’ and report the results of an investigation into its use/misuse in the accepted papers at INTERSPEECH-2018. It is confirmed that a significant proportion of the community (i) may not be aware of the critical difference between ‘phonetic’ and ‘phonemic’ levels of description, (ii) may not fully understand the significance of ‘phonemic contrast’, and as a consequence, (iii) consistently misuse the term ‘phoneme’. These findings are discussed, and recommendations are made as to how this situation might be mitigated.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hannah Muckenhirn|AUTHOR Hannah Muckenhirn]]^^1^^, [[Vinayak Abrol|AUTHOR Vinayak Abrol]]^^2^^, [[Mathew Magimai-Doss|AUTHOR Mathew Magimai-Doss]]^^1^^, [[Sébastien Marcel|AUTHOR Sébastien Marcel]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Idiap Research Institute, Switzerland; ^^2^^University of Oxford, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2345–2349&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modeling directly raw waveforms through neural networks for speech processing is gaining more and more attention. Despite its varied success, a question that remains is: what kind of information are such neural networks capturing or learning for different tasks from the speech signal? Such an insight is not only interesting for advancing those techniques but also for understanding better speech signal characteristics. This paper takes a step in that direction, where we develop a gradient based approach to estimate the relevance of each speech sample input on the output score. We show that analysis of the resulting “relevance signal” through conventional speech signal processing techniques can reveal the information modeled by the whole network. We demonstrate the potential of the proposed approach by analyzing raw waveform CNN-based phone recognition and speaker identification systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kevin Kilgour|AUTHOR Kevin Kilgour]], [[Mauricio Zuluaga|AUTHOR Mauricio Zuluaga]], [[Dominik Roblek|AUTHOR Dominik Roblek]], [[Matthew Sharifi|AUTHOR Matthew Sharifi]]
</p><p class="cpabstractcardaffiliationlist">Google, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2350–2354&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose the Fréchet Audio Distance (FAD), a novel, reference-free evaluation metric for music enhancement algorithms. We demonstrate how typical evaluation metrics for speech enhancement and blind source separation can fail to accurately measure the perceived effect of a wide variety of distortions. As an alternative, we propose adapting the Fréchet Inception Distance (FID) metric used to evaluate generative image models to the audio domain. FAD is validated using a wide variety of artificial distortions and is compared to the signal based metrics signal to distortion ratio (SDR), cosine distance, and magnitude L2 distance. We show that, with a correlation coefficient of 0.52, FAD correlates more closely with human perception than either SDR, cosine distance or magnitude L2 distance, with correlation coefficients of 0.39, -0.15 and -0.01 respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuan Gong|AUTHOR Yuan Gong]], [[Jian Yang|AUTHOR Jian Yang]], [[Jacob Huber|AUTHOR Jacob Huber]], [[Mitchell MacKnight|AUTHOR Mitchell MacKnight]], [[Christian Poellabauer|AUTHOR Christian Poellabauer]]
</p><p class="cpabstractcardaffiliationlist">University of Notre Dame, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2355–2359&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/1541" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a new database of voice recordings with the goal of supporting research on vulnerabilities and protection of voice-controlled systems (VCSs). In contrast to prior efforts, the proposed database contains both genuine voice commands and replayed recordings of such commands, collected in ’’realistic VCSs usage scenarios’’ and using ’’modern voice assistant development kits’’. Specifically, the database contains recordings from four systems (each with a different microphone array) in a variety of environmental conditions with different forms of background noise and relative positions between speaker and device. To the best of our knowledge, this is the first publicly available database1 that has been specifically designed for the protection of state-of-the-art voice-controlled systems against various replay attacks in various conditions and environments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Balamurali B.T.|AUTHOR Balamurali B.T.]], [[Jer-Ming Chen|AUTHOR Jer-Ming Chen]]
</p><p class="cpabstractcardaffiliationlist">SUTD, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2360–2363&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In an earlier study [1], we have successfully classified a vowel-gesture parameter, gamma γ(f) (relative vocal tract impedance spectrum measured using broadband signal excitation applied at the speaker’s mouth during vowel phonation), via ensemble classification yielding accuracy exceeding 80% for six nominal regions of the vowel plane. In this follow-up investigation, we analyze gamma using t-SNE, a dimension reduction technique to allow visualizing gamma in low dimensional space, at two levels: inter-speaker and intra-speaker. Examining the same gamma dataset from [1], t-SNE yielded good spatial clustering in identifying the 6 different speakers with an accuracy exceeding 90%, attributable to the inter-speaker variation. Next, we further evaluated gamma of measurements only from a particular speaker in the lower dimension, which indicates intra-speaker distribution which may be associated with different measurement sessions. Using gamma may be seen as a meaningful parameter deserving further study, because it is inherently a function of the calibration load — unique for every speaker and measurement session. Because the calibration is made with the subject’s mouth closed, so the measurement field during calibration is loaded solely by the impedance of the radiation field as seen at the subject’s lips and baffled by the subject’s face (geometrical information).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Geon Woo Lee|AUTHOR Geon Woo Lee]], [[Jung Hyuk Lee|AUTHOR Jung Hyuk Lee]], [[Seong Ju Kim|AUTHOR Seong Ju Kim]], [[Hong Kook Kim|AUTHOR Hong Kook Kim]]
</p><p class="cpabstractcardaffiliationlist">GIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2364–2365&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Multi-channel speech/audio separation and enhancement methods are popularly used for many speech/audio related applications. However, these methods may cause a loss of spatial cues, including the interaural time difference and interaural level difference, for further processing of monoaural signals. Thus, listeners may encounter difficulties in understanding the direction of the source signal. We present a directional audio renderer using a personalized HRTF, which is estimated by a neural network that combines DNN and CNN with anthropometric parameters and ear images of the listener. This demonstrated directional audio renderer concept aims to help foster research on audio processing for virtual reality/augmented reality to improve the quality of service of such devices.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wikus Pienaar|AUTHOR Wikus Pienaar]], [[Daan Wissing|AUTHOR Daan Wissing]]
</p><p class="cpabstractcardaffiliationlist">North-West University, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2366–2367&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Proper phonological analyses, descriptions and explanations as well as gaining insight into language variation and change rely heavily upon ample and trustworthy phonetic data. Our ’’Online Speech Processing and Analysis Suite’’ is a positive development in just this direction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dieter Maurer|AUTHOR Dieter Maurer]]^^1^^, [[Heidy Suter|AUTHOR Heidy Suter]]^^1^^, [[Christian d’Hereuse|AUTHOR Christian d’Hereuse]]^^1^^, [[Volker Dellwo|AUTHOR Volker Dellwo]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Zürcher Hochschule der Künste, Switzerland; ^^2^^Universität Zürich, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2368–2369&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the specialist literature on vowel acoustics, there is an extensive and often controversial debate on whether the primary acoustic cues of vowel quality are contained in the formant patterns or, alternatively, in the spectral shape. Yet, recent studies have shown that neither formant patterns nor spectral shapes are vowel quality-specific but that they are ambiguous because of a complex interaction between pitch and vowel-related spectral characteristics. In order to give insight into the phenomenon of formant pattern and spectral shape ambiguity of vowel sounds and its role for vowel acoustics, exemplary series of speech and of vowel sounds are presented in an online documentation, most of them selected from the Zurich Corpus. The presentation includes sound playbacks and results of an acoustic analysis (FFT spectra, LPC curves, spectrograms, f,,o,, contours, formant patterns) and of a vowel recognition test. A Klatt synthesiser is also included for resynthesis and synthesis purposes. The presentation intends (i) to support researchers in their evaluation of existing and future studies, questioning whether the actual variation and pitch-dependency of the vowel spectrum is taken into account when attempting to generalise experimental results, and (ii) to support students in their acquisition of state-of-the-art knowledge of vowel acoustics.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anton Noll|AUTHOR Anton Noll]], [[Jonathan Stuefer|AUTHOR Jonathan Stuefer]], [[Nicola Klingler|AUTHOR Nicola Klingler]], [[Hannah Leykum|AUTHOR Hannah Leykum]], [[Carina Lozo|AUTHOR Carina Lozo]], [[Jan Luttenberger|AUTHOR Jan Luttenberger]], [[Michael Pucher|AUTHOR Michael Pucher]], [[Carolin Schmid|AUTHOR Carolin Schmid]]
</p><p class="cpabstractcardaffiliationlist">Austrian Academy of Sciences, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2370–2371&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we introduce Sound Tools eXtended (STx) version 5.0, an acoustic speech and sound processing application. STx 5.0 contains an integrated, simplified and compact GUI, specifically designed for speech analysis for phoneticians, linguists, psychologists, and researchers in related fields. It features a well structured user interface, compatibility with established tools (TextGrid [1], MAUS [2]), and top-notch signal analysis tools. STx 5.0 enables researchers as well as students to conduct advanced analysis of audio files, especially of speech recordings. STx 5.0 implements a new interface for the already established profiles in STx 5.0, which helps customize settings according to the researcher’s needs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mohamed Eldesouki|AUTHOR Mohamed Eldesouki]], [[Naassih Gopee|AUTHOR Naassih Gopee]], [[Ahmed Ali|AUTHOR Ahmed Ali]], [[Kareem Darwish|AUTHOR Kareem Darwish]]
</p><p class="cpabstractcardaffiliationlist">HBKU, Qatar</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2372–2373&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents FarSpeech, QCRI’s combined Arabic speech recognition, natural language processing (NLP), and dialect identification pipeline. It features modern web technologies to capture live audio, transcribes Arabic audio, NLP processes the transcripts, and identifies the dialect of the speaker. For transcription, we use QATS, which is a Kaldi-based ASR system that uses Time Delay Neural Networks (TDNN). For NLP, we use a SOTA Arabic NLP toolkit that employs various deep neural network and SVM based models. Finally, our dialect identification system uses multi-modality from both acoustic and linguistic input. FarSpeech1 presents different screens to display the transcripts, text segmentation, part-of-speech tags, recognized named entities, diacritized text, and the identified dialect of the speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fasih Haider|AUTHOR Fasih Haider]], [[Saturnino Luz|AUTHOR Saturnino Luz]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2374–2375&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Ambient Assisted Living (AAL) technologies are being developed which could assist elderly people to live healthy and active lives. These technologies have been used to monitor people’s daily exercises, consumption of calories and sleeping patterns, and to provide coaching interventions to foster positive behaviour. Speech and audio processing can be used to complement such AAL technologies to inform interventions for healthy ageing by analyzing acoustic data captured in the user’s home. However, collection of data in home settings present a number of challenges. One of the most pressing challenges concerns how to manage privacy and data protection. To address this issue, we have developed a low-cost system which can extract audio features while protecting the actual spoken content upon detection of voice activity, and store audio features for further processing which offer privacy guarantees. These privacy preserving features are being tested in the context of a larger project which includes health and well-being monitoring and coaching.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chitralekha Gupta|AUTHOR Chitralekha Gupta]], [[Karthika Vijayan|AUTHOR Karthika Vijayan]], [[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Xiaoxue Gao|AUTHOR Xiaoxue Gao]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2376–2377&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Singing like a professional singer is extremely appealing to the general public. However, many individuals are not able to sing like a singer who has received formal training over several years. We develop a web platform, where users can perform personalized singing synthesis. A user has to read and record the lyrics of a song in our web platform, and enjoy good quality singing vocals synthesized in his/her own voice. We perform a template-based speech-to-singing voice conversion at the backend of the web interface, that uses the prosody characteristics of the song derived from good quality singing by a trained singer and retains the speaker characteristics from the respective user. We utilize an improved temporal alignment scheme between speech and singing signals using tandem features, and employ a deep-spectral map to incorporate singing spectral characteristics into user’s voice. The singing vocals are later synthesized by a vocoder. Using this web platform, we advocate that ‘everyone can sing as they desire’.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Neville Ryant|AUTHOR Neville Ryant]]^^1^^, [[Kenneth Church|AUTHOR Kenneth Church]]^^2^^, [[Christopher Cieri|AUTHOR Christopher Cieri]]^^1^^, [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]]^^3^^, [[Jun Du|AUTHOR Jun Du]]^^4^^, [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]^^5^^, [[Mark Liberman|AUTHOR Mark Liberman]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Pennsylvania, USA; ^^2^^Baidu Research, USA; ^^3^^LSCP (UMR 8554), France; ^^4^^USTC, China; ^^5^^Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 978–982&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces the second DIHARD challenge, the second in a series of speaker diarization challenges intended to improve the robustness of diarization systems to variation in recording equipment, noise conditions, and conversational domain. The challenge comprises four tracks evaluating diarization performance under two input conditions (single channel vs. multi-channel) and two segmentation conditions (diarization from a reference speech segmentation vs. diarization from scratch). In order to prevent participants from overtuning to a particular combination of recording conditions and conversational domain, recordings are drawn from a variety of sources ranging from read audiobooks to meeting speech, to child language acquisition recordings, to dinner parties, to web video. We describe the task and metrics, challenge design, datasets, and baseline systems for speech enhancement, speech activity detection, and diarization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Prachi Singh|AUTHOR Prachi Singh]]^^1^^, [[Harsha Vardhan M.A.|AUTHOR Harsha Vardhan M.A.]]^^1^^, [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]^^1^^, [[A. Kanagasundaram|AUTHOR A. Kanagasundaram]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indian Institute of Science, India; ^^2^^University of Jaffna, Sri Lanka</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 983–987&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the LEAP System, developed for the Second DIHARD diarization Challenge. The evaluation data in the challenge is composed of multi-talker speech in restaurants, doctor-patient conversations, child language acquisition recordings in home environments and audio extracted YouTube videos. The LEAP system is developed using two types of embeddings, one based on i-vector representations and the other one based on x-vector representations. The initial diarization output obtained using agglomerative hierarchical clustering (AHC) done on the probabilistic linear discriminant analysis (PLDA) scores is refined using the Variational-Bayes hidden Markov model (VB-HMM) model. We propose a modified VB-HMM model with posterior scaling which provides significant improvements in the final diarization error rate (DER). We also use a domain compensation on the i-vector features to reduce the mis-match between training and evaluation conditions. N(s)TN(s)TN(s)T Using the proposed approaches, we obtain relative improvements in DER of about 7.1% relative for the best individual system over the DIHARD baseline system and about 13.7% relative for the final system combination on evaluation set. An analysis performed using the proposed posterior scaling method shows that scaling results in improved discrimination among the HMM states in the VB-HMM.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ignacio Viñals|AUTHOR Ignacio Viñals]], [[Pablo Gimeno|AUTHOR Pablo Gimeno]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 988–992&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the latest improvements in Speaker Diarization obtained by ViVoLAB research group for the 2019 DIHARD Diarization Challenge. This evaluation seeks the improvement of the diarization task in adverse conditions. For this purpose, the audio recordings involve multiple scenarios with no restrictions in terms of speakers, overlapped speech nor quality of the audio. Our submission follows the traditional segmentation-clustering-resegmentation pipeline: Speaker embeddings are extracted from acoustic segments with a single speaker on them, later clustered by means of a PLDA. Our contribution in this work is focused on the clustering step. We present results with our Variational Bayes PLDA clustering and our tree-based clustering strategy, which sequentially assigns the different embeddings to its corresponding speaker according to a PLDA model. Both strategies compare multiple diarization hypotheses and choose their candidate one according to a generative criterion. We also analyze the impact of the different available embeddings in the state-of-the-art with both clustering approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zbyněk Zajíc|AUTHOR Zbyněk Zajíc]], [[Marie Kunešová|AUTHOR Marie Kunešová]], [[Marek Hrúz|AUTHOR Marek Hrúz]], [[Jan Vaněk|AUTHOR Jan Vaněk]]
</p><p class="cpabstractcardaffiliationlist">University of West Bohemia, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 993–997&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present our system developed by the team from the New Technologies for the Information Society (NTIS) research center of the University of West Bohemia in Pilsen, for the Second DIHARD Speech Diarization Challenge. The base of our system follows the currently-standard approach of segmentation, i/x-vector extraction, clustering, and resegmentation. The hyperparameters for each of the subsystems were selected according to the domain classifier trained on the development set of DIHARD II. We compared our system with results from the Kaldi diarization (with i/x-vectors) and combined these systems. At the time of writing of this abstract, our best submission achieved a DER of 23.47% and a JER of 48.99% on the evaluation set (in Track 1 using reference SAD).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tae Jin Park|AUTHOR Tae Jin Park]], [[Manoj Kumar|AUTHOR Manoj Kumar]], [[Nikolaos Flemotomos|AUTHOR Nikolaos Flemotomos]], [[Monisankha Pal|AUTHOR Monisankha Pal]], [[Raghuveer Peri|AUTHOR Raghuveer Peri]], [[Rimita Lahiri|AUTHOR Rimita Lahiri]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 998–1002&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we describe components that form a part of USC-SAIL team’s submissions to Track 1 and Track 2 of the second DIHARD speaker diarization challenge. We describe each module in our speaker diarization pipeline and explain the rationale behind our choice of algorithms for each module, while comparing the Diarization Error Rate (DER) against different module combinations. We propose a clustering scheme based on spectral clustering that yields competitive performance. Moreover, we introduce an overlap detection scheme and a re-segmentation system for speaker diarization and investigate their performances using controlled and in-the-wild conditions. In addition, we describe the additional components that will be integrated to our speaker diarization system. To pursue the best performance, we compare our system with the state-of-the-art methods that are presented in the previous challenge and literature. We include preliminary results of our speaker diarization system on the evaluation data from the second DIHARD challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sergey Novoselov|AUTHOR Sergey Novoselov]]^^1^^, [[Aleksei Gusev|AUTHOR Aleksei Gusev]]^^1^^, [[Artem Ivanov|AUTHOR Artem Ivanov]]^^2^^, [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]]^^2^^, [[Andrey Shulipa|AUTHOR Andrey Shulipa]]^^3^^, [[Anastasia Avdeeva|AUTHOR Anastasia Avdeeva]]^^3^^, [[Artem Gorlanov|AUTHOR Artem Gorlanov]]^^2^^, [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ITMO University, Russia; ^^2^^STC-innovations, Russia; ^^3^^ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1003–1007&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the ITMO University (DI-IT team) speaker diarization systems submitted to DIHARD Challenge II. As with DIHARD I, this challenge is focused on diarization task for microphone recordings in varying difficult conditions. According to the results of the previous DIHARD I Challenge state-of-the-art diarization systems are based on x-vector embeddings. Such embeddings are clustered using agglomerative hierarchical clustering (AHC) algorithm by means of PLDA scoring. Current research continues the investigation of deep speaker embedding efficiency for the speaker diarization task. This paper explores new types of embedding extractors with different deep neural network architectures and training strategies. We also used AHC to perform embeddings clustering. Alternatively to the PLDA scoring in our AHC procedure we used discriminatively trained cosine similarity metric learning (CSML) model for scoring. Moreover we focused on the optimal AHC threshold tuning according to the specific speech quality. Environment classifier was preliminary trained on development set to predict acoustic conditions for this purpose. We show that such threshold adaptation scheme allows to reduce diarization error rate compared to common AHC threshold for all conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cheng-I Lai|AUTHOR Cheng-I Lai]], [[Nanxin Chen|AUTHOR Nanxin Chen]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1013–1017&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present JHU’s system submission to the ASVspoof 2019 Challenge: Anti-Spoofing with Squeeze-Excitation and Residual neTworks (ASSERT). Anti-spoofing has gathered more and more attention since the inauguration of the ASVspoof Challenges, and ASVspoof 2019 dedicates to address attacks from all three major types: text-to-speech, voice conversion, and replay. Built upon previous research work on Deep Neural Network (DNN), ASSERT is a pipeline for DNN-based approach to anti-spoofing. ASSERT has four components: feature engineering, DNN models, network optimization and system combination, where the DNN models are variants of squeeze-excitation and residual networks. We conducted an ablation study of the effectiveness of each component on the ASVspoof 2019 corpus, and experimental results showed that ASSERT obtained more than 93% and 17% relative improvements over the baseline systems in the two sub-challenges in ASVspoof 2019, ranking ASSERT one of the top performing systems. Code and pretrained models are made publicly available.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Jichen Yang|AUTHOR Jichen Yang]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1058–1062&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker verification systems in practice are vulnerable to spoofing attacks. The high quality recording and playback devices make replay attack a real threat to speaker verification. Additionally, the furtherance in voice conversion and speech synthesis has produced perceptually natural sounding speech. The ASVspoof 2019 challenge is organized to study the robustness of countermeasures against such attacks, which cover two common modes of attacks, logical and physical access. The former deals with synthetic attacks arising from voice conversion and text-to-speech techniques, whereas the latter deals with replay attacks. In this work, we explore several novel countermeasures based on long range acoustic features that are found to be effective for spoofing attack detection. The long range features capture different aspects of long range information as they are computed from subbands and octave power spectrum in contrast to the conventional way from linear power spectrum. These novel features are combined with the other known features for improved detection of spoofing attacks. We obtain a tandem detection cost function of 0.1264 and 0.1381 (equal error rate 4.13% and 5.95%) for logical and physical access on the best combined system submitted to the challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Su-Yu Chang|AUTHOR Su-Yu Chang]], [[Kai-Cheng Wu|AUTHOR Kai-Cheng Wu]], [[Chia-Ping Chen|AUTHOR Chia-Ping Chen]]
</p><p class="cpabstractcardaffiliationlist">National Sun Yat-sen University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1063–1067&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we study a countermeasure module to detect spoofing attacks with converted or synthesized speech in tandem automatic speaker verification (ASV). Our approach integrates representation learning and transfer learning methods. For representation learning, good embedding network functions are learned from audio signals with the goal to distinguish different types of spoofing attacks. For transfer learning, the embedding network functions are used to initialize fine-tuning networks. We experiment well-known neural network architectures and front-end raw features to diversify and strengthen the information source for embedding. We participate in the 2019 Automatic Speaker Verification Spoofing and Countermeasures Challenge (ASVspoof 2019) and evaluate the proposed methods with the logical access condition tasks for detecting converted speech and synthesized speech. On the ASVspoof 2019 development set, our best single system achieves a minimum tandem decision cost function of nearly 0 during system development. On the ASVspoof 2019 evaluation set, our primary system achieves a minimum tandem decision cost of 0.1791, and an equal error rate (EER) of 9.08%. Our system does not have over-training issue as it achieves decent performance with unseen test data of the types presented in training, yet the generalization gap is not small with mismatched test data types.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alejandro Gomez-Alanis|AUTHOR Alejandro Gomez-Alanis]]^^1^^, [[Antonio M. Peinado|AUTHOR Antonio M. Peinado]]^^1^^, [[Jose A. Gonzalez|AUTHOR Jose A. Gonzalez]]^^2^^, [[Angel M. Gomez|AUTHOR Angel M. Gomez]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad de Granada, Spain; ^^2^^Universidad de Málaga, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1068–1072&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The aim of this work is to develop a single anti-spoofing system which can be applied to effectively detect all the types of spoofing attacks considered in the ASVspoof 2019 Challenge: text-to-speech, voice conversion and replay based attacks. To achieve this, we propose the use of a Light Convolutional Gated Recurrent Neural Network (LC-GRNN) as a deep feature extractor to robustly represent speech signals as utterance-level embeddings, which are later used by a back-end recognizer which performs the final genuine/spoofed classification. This novel architecture combines the ability of light convolutional layers for extracting discriminative features at frame level with the capacity of gated recurrent unit based RNNs for learning long-term dependencies of the subsequent deep features. The proposed system has been presented as a contribution to the ASVspoof 2019 Challenge, and the results show a significant improvement in comparison with the baseline systems. Moreover, experiments were also carried out on the ASVspoof 2015 and 2017 corpora, and the results indicate that our proposal clearly outperforms other popular methods recently proposed and other similar deep feature based systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hossein Zeinali|AUTHOR Hossein Zeinali]]^^1^^, [[Themos Stafylakis|AUTHOR Themos Stafylakis]]^^2^^, [[Georgia Athanasopoulou|AUTHOR Georgia Athanasopoulou]]^^2^^, [[Johan Rohdin|AUTHOR Johan Rohdin]]^^1^^, [[Ioannis Gkinis|AUTHOR Ioannis Gkinis]]^^2^^, [[Lukáš Burget|AUTHOR Lukáš Burget]]^^1^^, [[Jan Černocký|AUTHOR Jan Černocký]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Brno University of Technology, Czech Republic; ^^2^^Omilia, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1073–1077&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present the system description of the joint efforts of Brno University of Technology (BUT) and Omilia — Conversational Intelligence for the ASVSpoof2019 Spoofing and Countermeasures Challenge. The primary submission for Physical access (PA) is a fusion of two VGG networks, trained on single and two-channels features. For Logical access (LA), our primary system is a fusion of VGG and the recently introduced SincNet architecture. The results on PA show that the proposed networks yield very competitive performance in all conditions and achieved 86% relative improvement compared to the official baseline. On the other hand, the results on LA showed that although the proposed architecture and training strategy performs very well on certain spoofing attacks, it fails to generalize to certain attacks that are unseen during training.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Moustafa Alzantot|AUTHOR Moustafa Alzantot]], [[Ziqi Wang|AUTHOR Ziqi Wang]], [[Mani B. Srivastava|AUTHOR Mani B. Srivastava]]
</p><p class="cpabstractcardaffiliationlist">University of California at Los Angeles, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1078–1082&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The state-of-art models for speech synthesis and voice conversion are capable of generating synthetic speech that is perceptually indistinguishable from bonafide human speech. These methods represent a threat to the automatic speaker verification (ASV) systems. Additionally, replay attacks where the attacker uses a speaker to replay a previously recorded genuine human speech are also possible. In this paper, we present our solution for the ASVSpoof2019 competition, which aims to develop countermeasure systems that distinguish between spoofing attacks and genuine speeches. Our model is inspired by the success of residual convolutional networks in many classification tasks. We build three variants of a residual convolutional neural network that accept different feature representations (MFCC, log-magnitude STFT, and CQCC) of input. We compare the performance achieved by our model variants and the competition baseline models. In the logical access scenario, the fusion of our models has zero t-DCF cost and zero equal error rate (EER), as evaluated on the development set. On the evaluation set, our model fusion improves the t-DCF and EER by 25% compared to the baseline algorithms. Against physical access replay attacks, our model fusion improves the baseline algorithms t-DCF and EER scores by 71% and 75% on the evaluation set, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]
</p><p class="cpabstractcardaffiliationlist">University of Seoul, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1083–1087&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we concentrate on replacing the process of extracting hand-crafted acoustic feature with end-to-end DNN using complementary high-resolution spectrograms. As a result of advance in audio devices, typical characteristics of a replayed speech based on conventional knowledge alter or diminish in unknown replay configurations. Thus, it has become increasingly difficult to detect spoofed speech with a conventional knowledge-based approach. To detect unrevealed characteristics that reside in a replayed speech, we directly input spectrograms into an end-to-end DNN without knowledge-based intervention. Explorations dealt in this study that differentiates from existing spectrogram-based systems are twofold: complementary information and high-resolution. Spectrograms with different information are explored, and it is shown that additional information such as the phase information can be complementary. High-resolution spectrograms are employed with the assumption that the difference between a bona-fide and a replayed speech exists in the details. Additionally, to verify whether other features are complementary to spectrograms, we also examine raw waveform and an i-vector based system. Experiments conducted on the ASVspoof 2019 physical access challenge show promising results, where t-DCF and equal error rates are 0.0570 and 2.45% for the evaluation set, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]^^1^^, [[Xin Wang|AUTHOR Xin Wang]]^^2^^, [[Ville Vestman|AUTHOR Ville Vestman]]^^3^^, [[Md. Sahidullah|AUTHOR Md. Sahidullah]]^^4^^, [[Héctor Delgado|AUTHOR Héctor Delgado]]^^1^^, [[Andreas Nautsch|AUTHOR Andreas Nautsch]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^2^^, [[Nicholas Evans|AUTHOR Nicholas Evans]]^^1^^, [[Tomi H. Kinnunen|AUTHOR Tomi H. Kinnunen]]^^5^^, [[Kong Aik Lee|AUTHOR Kong Aik Lee]]^^6^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EURECOM, France; ^^2^^NII, Japan; ^^3^^University of Eastern Finland, Finland; ^^4^^Loria (UMR 7503), France; ^^5^^University of Eastern Finland, Finland; ^^6^^NEC, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1008–1012&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>ASVspoof, now in its third edition, is a series of community-led challenges which promote the development of countermeasures to protect automatic speaker verification (ASV) from the threat of spoofing. Advances in the 2019 edition include: (i) a consideration of both logical access (LA) and physical access (PA) scenarios and the three major forms of spoofing attack, namely synthetic, converted and replayed speech; (ii) spoofing attacks generated with state-of-the-art neural acoustic and waveform models; (iii) an improved, controlled simulation of replay attacks; (iv) use of the tandem detection cost function (t-DCF) that reflects the impact of both spoofing and countermeasures upon ASV reliability. Even if ASV remains the core focus, in retaining the equal error rate (EER) as a secondary metric, ASVspoof also embraces the growing importance of  fake audio detection. ASVspoof 2019 attracted the participation of 63 research teams, with more than half of these reporting systems that improve upon the performance of two baseline spoofing countermeasures. This paper describes the 2019 database, protocols and challenge results. It also outlines major findings which demonstrate the real progress made in protecting against the threat of spoofing and fake audio.</p></div>
\rules except wikilink

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bhusan Chettri|AUTHOR Bhusan Chettri]]^^1^^, [[Daniel Stoller|AUTHOR Daniel Stoller]]^^1^^, [[Veronica Morfi|AUTHOR Veronica Morfi]]^^1^^, [[Marco A. Martínez Ramírez|AUTHOR Marco A. Martínez Ramírez]]^^1^^, [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]]^^1^^, [[Bob L. Sturm|AUTHOR Bob L. Sturm]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Queen Mary University of London, UK; ^^2^^KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1018–1022&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Detecting spoofing attempts of automatic speaker verification (ASV) systems is challenging, especially when using only one modelling approach. For robustness, we use both deep neural networks and traditional machine learning models and combine them as ensemble models through logistic regression. They are trained to detect logical access (LA) and physical access (PA) attacks on the dataset released as part of the ASV Spoofing and Countermeasures Challenge 2019. We propose dataset partitions that ensure different attack types are present during training and validation to improve system robustness. Our ensemble model outperforms all our single models and the baselines from the challenge for both attack types. We investigate why some models on the PA dataset strongly outperform others and find that spoofed recordings in the dataset tend to have longer silences at the end than genuine ones. By removing them, the PA task becomes much more challenging, with the tandem detection cost function (t-DCF) of our best single model rising from 0.1672 to 0.5018 and equal error rate (EER) increasing from 5.98% to 19.8% on the development set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Weicheng Cai|AUTHOR Weicheng Cai]], [[Haiwei Wu|AUTHOR Haiwei Wu]], [[Danwei Cai|AUTHOR Danwei Cai]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1023–1027&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes our DKU replay detection system for the ASVspoof 2019 challenge. The goal is to develop spoofing countermeasure for automatic speaker recognition in physical access scenario. We leverage the countermeasure system pipeline from four aspects, including the data augmentation, feature representation, classification, and fusion. First, we introduce an utterance-level deep learning framework for anti-spoofing. It receives the variable-length feature sequence and outputs the utterance-level scores directly. Based on the framework, we try out various kinds of input feature representations extracted from either the magnitude spectrum or phase spectrum. Besides, we also perform the data augmentation strategy by applying the speed perturbation on the raw waveform. Our best single system employs a residual neural network trained by the speed-perturbed group delay gram. It achieves EER of 1.04% on the development set, as well as EER of 1.08% on the evaluation set. Finally, using the simple average score from several single systems can further improve the performance. EER of 0.24% on the development set and 0.66% on the evaluation set is obtained for our primary system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Radosław Białobrzeski|AUTHOR Radosław Białobrzeski]], [[Michał Kośmider|AUTHOR Michał Kośmider]], [[Mateusz Matuszewski|AUTHOR Mateusz Matuszewski]], [[Marcin Plata|AUTHOR Marcin Plata]], [[Alexander Rakowski|AUTHOR Alexander Rakowski]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Poland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1028–1032&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a replay attack detection system consisting of two convolutional neural network models. The first model consists of a small Bayesian neural network, motivated by the hypothesis that Bayesian models are robust to overfitting. The second one uses a bigger architecture, LCNN, extended with several regularization techniques to improve generalization. Our experiments, considering both size of the networks and use of the Bayesian approach, indicated that smaller networks are sufficient to achieve competitive results. To better estimate the performance against unseen spoofing methods, the final models were selected using novel Attack-Out Cross-Validation. In this procedure each model was tested on a subset of data containing not only previously unseen speakers, but also unseen spoofing attacks. The system was submitted to ASVspoof 2019 challenge’s PA condition and achieved a t-DCF score of 0.0219 and EER of 0.88% on the evaluation dataset, which is a 10 times relative improvement over the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]]^^1^^, [[Sergey Novoselov|AUTHOR Sergey Novoselov]]^^2^^, [[Andzhukaev Tseren|AUTHOR Andzhukaev Tseren]]^^1^^, [[Marina Volkova|AUTHOR Marina Volkova]]^^1^^, [[Artem Gorlanov|AUTHOR Artem Gorlanov]]^^1^^, [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1033–1037&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the Speech Technology Center (STC) antispoofing systems submitted to the ASVspoof 2019 challenge. The ASVspoof2019 is the extended version of the previous challenges and includes 2 evaluation conditions: logical access use-case scenario with speech synthesis and voice conversion attack types and physical access use-case scenario with replay attacks. During the challenge we developed anti-spoofing solutions for both scenarios. The proposed systems are implemented using deep learning approach and are based on different types of acoustic features. We enhanced Light CNN architecture previously considered by the authors for replay attacks detection and which performed high spoofing detection quality during the ASVspoof2017 challenge. In particular here we investigate the efficiency of angular margin based softmax activation for training robust deep Light CNN classifier to solve the mentioned-above tasks. Submitted systems achieved EER of 1.86% in logical access scenario and 0.54% in physical access scenario on the evaluation part of the Challenge corpora. High performance obtained for the unknown types of spoofing attacks demonstrates the stability of the offered approach in both evaluation conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yexin Yang|AUTHOR Yexin Yang]], [[Hongji Wang|AUTHOR Hongji Wang]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Zhengyang Chen|AUTHOR Zhengyang Chen]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]
</p><p class="cpabstractcardaffiliationlist">Shanghai Jiao Tong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1038–1042&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The robustness of an anti-spoofing system is progressively more important in order to develop a reliable speaker verification system. Previous challenges and datasets mainly focus on a specific type of spoofing attacks. The ASVspoof 2019 edition is the first challenge to address two major spoofing types — logical and physical access. This paper presents the SJTU’s submitted anti-spoofing system to the ASVspoof 2019 challenge. Log-CQT features are developed in conjunction with multi-layer convolutional neural networks for robust performance across both subtasks. CNNs with gradient linear units (GLU) activations are utilized for spoofing detection. The proposed system shows consistent performance improvement over all types of spoofing attacks. Our primary submissions achieve the 5^^th^^ and 8^^th^^ positions for the logical and physical access respectively. Moreover, our contrastive submission to the PA task exhibits better generalization compared to our primary submission, and achieves a comparable performance to the 3^^rd^^ position of the challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[K.N.R.K. Raju Alluri|AUTHOR K.N.R.K. Raju Alluri]], [[Anil Kumar Vuppala|AUTHOR Anil Kumar Vuppala]]
</p><p class="cpabstractcardaffiliationlist">IIIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1043–1047&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The ASVspoof 2019 challenge focuses on countermeasures for all major spoofing attacks, namely speech synthesis (SS), voice conversion (VC), and replay spoofing attacks. This paper describes the IIIT-H spoofing countermeasures developed for ASVspoof 2019 challenge. In this study, three instantaneous cepstral features namely, single frequency cepstral coefficients, zero time windowing cepstral coefficients, and instantaneous frequency cepstral coefficients are used as front-end features. A Gaussian mixture model is used as back-end classifier. The experimental results on ASVspoof 2019 dataset reveal that the proposed instantaneous features are efficient in detecting VC and SS based attacks. In detecting replay attacks, proposed features are comparable with baseline systems. Further analysis is carried out using metadata to assess the impact of proposed countermeasures on different synthetic speech generating algorithm/replay configurations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rongjin Li|AUTHOR Rongjin Li]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Zheng Li|AUTHOR Zheng Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]
</p><p class="cpabstractcardaffiliationlist">Xiamen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1048–1052&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker anti-spoofing is crucial to prevent security breaches when the speaker verification systems encounter the spoofed attacks from the advanced speech synthesis algorithms and high fidelity replay devices. In this paper, we propose a framework based on multiple features integration and multi-task learning (MFMT) for improving anti-spoofing performance. It is important to integrate the complementary information of multiple spectral features within the network, such as MFCC, CQCC, Fbank, etc., as often a single kind of feature is not enough to grasp the global spoofing cues and it generalizes poorly. Furthermore, we propose a helpful butterfly unit (BU) for multi-task learning to propagate the shared representations between the binary decision task and the other auxiliary task. The BU can obtain task representations of other branch during forward propagation and prevent the gradient from assimilating the branch during back propagation. Our proposed system yielded an EER of 9.01% on ASVspoof 2017, while the best single system and the average scores fusion obtained the evaluation EER of 2.39% and 0.96% on ASVspoof 2019 PA, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jennifer Williams|AUTHOR Jennifer Williams]], [[Joanna Rownicka|AUTHOR Joanna Rownicka]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1053–1057&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present our system submission to the ASVspoof 2019 Challenge Physical Access (PA) task. The objective for this challenge was to develop a countermeasure that identifies speech audio as either bona fide or intercepted and replayed. The target prediction was a value indicating that a speech segment was bona fide (positive values) or “spoofed” (negative values). Our system used convolutional neural networks (CNNs) and a representation of the speech audio that combined x-vector attack embeddings with signal processing features. The x-vector attack embeddings were created from mel-frequency cepstral coefficients (MFCCs) using a time-delay neural network (TDNN). These embeddings jointly modeled 27 different environments and 9 types of attacks from the labeled data. We also used sub-band spectral centroid magnitude coefficients (SCMCs) as features. We included an additive Gaussian noise layer during training as a way to augment the data to make our system more robust to previously unseen attack examples. We report system performance using the tandem detection cost function (tDCF) and equal error rate (EER). Our approach performed better that both of the challenge baselines. Our technique suggests that our x-vector attack embeddings can help regularize the CNN predictions even when environments or attacks are more challenging.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ewan Dunbar|AUTHOR Ewan Dunbar]]^^1^^, [[Robin Algayres|AUTHOR Robin Algayres]]^^2^^, [[Julien Karadayi|AUTHOR Julien Karadayi]]^^2^^, [[Mathieu Bernard|AUTHOR Mathieu Bernard]]^^2^^, [[Juan Benjumea|AUTHOR Juan Benjumea]]^^2^^, [[Xuan-Nga Cao|AUTHOR Xuan-Nga Cao]]^^2^^, [[Lucie Miskic|AUTHOR Lucie Miskic]]^^1^^, [[Charlotte Dugrain|AUTHOR Charlotte Dugrain]]^^1^^, [[Lucas Ondel|AUTHOR Lucas Ondel]]^^3^^, [[Alan W. Black|AUTHOR Alan W. Black]]^^4^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^5^^, [[Sakriani Sakti|AUTHOR Sakriani Sakti]]^^6^^, [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LLF (UMR 7110), France; ^^2^^LSCP (UMR 8554), France; ^^3^^Brno University of Technology, Czech Republic; ^^4^^Carnegie Mellon University, USA; ^^5^^LIG (UMR 5217), France; ^^6^^NAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1088–1092&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present the Zero Resource Speech Challenge 2019, which proposes to build a speech synthesizer without any text or phonetic labels: hence, TTS without T (text-to-speech without text). We provide raw audio for a target voice in an unknown language (the Voice dataset), but no alignment, text or labels. Participants must discover subword units in an unsupervised way (using the Unit Discovery dataset) and align them to the voice recordings in a way that works best for the purpose of synthesizing novel utterances from novel speakers, similar to the target speaker’s voice. We describe the metrics used for evaluation, a baseline system consisting of unsupervised subword unit discovery plus a standard TTS system, and a topline TTS using gold phoneme transcriptions. We present an overview of the 19 submitted systems from 10 teams and discuss the main results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Siyuan Feng|AUTHOR Siyuan Feng]], [[Tan Lee|AUTHOR Tan Lee]], [[Zhiyuan Peng|AUTHOR Zhiyuan Peng]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1093–1097&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study addresses the problem of unsupervised subword unit discovery from untranscribed speech. It forms the basis of the ultimate goal of ZeroSpeech 2019, building text-to-speech systems without text labels. In this work, unit discovery is formulated as a pipeline of phonetically discriminative feature learning and unit inference. One major difficulty in robust unsupervised feature learning is dealing with speaker variation. Here the robustness towards speaker variation is achieved by applying adversarial training and FHVAE based disentangled speech representation learning. A comparison of the two approaches as well as their combination is studied in a DNN-bottleneck feature (DNN-BNF) architecture. Experiments are conducted on ZeroSpeech 2019 and 2017. Experimental results on ZeroSpeech 2017 show that both approaches are effective while the latter is more prominent, and that their combination brings further marginal improvement in across-speaker condition. Results on ZeroSpeech 2019 show that in the ABX discriminability task, our approaches significantly outperform the official baseline, and are competitive to or even outperform the official topline. The proposed unit sequence smoothing algorithm improves synthesis quality, at a cost of slight decrease in ABX discriminability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bolaji Yusuf|AUTHOR Bolaji Yusuf]], [[Alican Gök|AUTHOR Alican Gök]], [[Batuhan Gundogdu|AUTHOR Batuhan Gundogdu]], [[Oyku Deniz Kose|AUTHOR Oyku Deniz Kose]], [[Murat Saraclar|AUTHOR Murat Saraclar]]
</p><p class="cpabstractcardaffiliationlist">Boğaziçi Üniversitesi, Turkey</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1098–1102&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Zero-resource speech processing efforts focus on unsupervised discovery of sub-word acoustic units. Common approaches work with spatial similarities between the acoustic frame representations within Bayesian or neural network-based frameworks. We propose two methods that utilize the temporal proximity information in addition to the acoustic similarity for clustering frames into acoustic units. The first approach uses a temporally biased self-organizing map (SOM) to discover such units. Since the SOM unit indices are correlated with (vector) spatial distance, we pool neighboring units and then train a recurrent neural network to predict each pooled unit. The second approach incorporates temporal awareness by training a recurrent sparse autoencoder, in which unsupervised clustering is done on the intermediate softmax layer. This network is then fine-tuned using aligned pairs of acoustically similar sequences obtained via unsupervised term discovery. Our approaches outperform the provided baseline system on two main metrics of the Zerospeech 2019 challenge, ABX-discriminability and bitrate of the quantized embeddings, both for English and the surprise language. Furthermore, the temporal-awareness and the post-filtering techniques adopted in this work resulted in an enhanced continuity of the decoding, yielding low bitrates.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryan Eloff|AUTHOR Ryan Eloff]]^^1^^, [[André Nortje|AUTHOR André Nortje]]^^1^^, [[Benjamin van Niekerk|AUTHOR Benjamin van Niekerk]]^^1^^, [[Avashna Govender|AUTHOR Avashna Govender]]^^2^^, [[Leanne Nortje|AUTHOR Leanne Nortje]]^^1^^, [[Arnu Pretorius|AUTHOR Arnu Pretorius]]^^1^^, [[Elan van Biljon|AUTHOR Elan van Biljon]]^^1^^, [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]]^^1^^, [[Lisa van Staden|AUTHOR Lisa van Staden]]^^1^^, [[Herman Kamper|AUTHOR Herman Kamper]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Stellenbosch University, South Africa; ^^2^^University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1103–1107&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For our submission to the ZeroSpeech 2019 challenge, we apply discrete latent-variable neural networks to unlabelled speech and use the discovered units for speech synthesis. Unsupervised discrete subword modelling could be useful for studies of phonetic category learning in infants or in low-resource speech technology requiring symbolic input. We use an autoencoder (AE) architecture with intermediate discretisation. We decouple acoustic unit discovery from speaker modelling by conditioning the AE’s decoder on the training speaker identity. At test time, unit discovery is performed on speech from an unseen speaker, followed by unit decoding conditioned on a known target speaker to obtain reconstructed filterbanks. This output is fed to a neural vocoder to synthesise speech in the target speaker’s voice. For discretisation, categorical variational autoencoders (CatVAEs), vector-quantised VAEs (VQ-VAEs) and straight-through estimation are compared at different compression levels on two languages. Our final model uses convolutional encoding, VQ-VAE discretisation, deconvolutional decoding and an FFTNet vocoder. We show that decoupled speaker conditioning intrinsically improves discrete acoustic representations, yielding competitive synthesis quality compared to the challenge baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andy T. Liu|AUTHOR Andy T. Liu]], [[Po-chun Hsu|AUTHOR Po-chun Hsu]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]
</p><p class="cpabstractcardaffiliationlist">National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1108–1112&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an unsupervised end-to-end training scheme where we discover discrete subword units from speech without using any labels. The discrete subword units are learned under an ASR-TTS autoencoder reconstruction setting, where an ASR-Encoder is trained to discover a set of common linguistic units given a variety of speakers, and a TTS-Decoder trained to project the discovered units back to the designated speech. We propose a discrete encoding method, Multilabel-Binary Vectors (MBV), to make the ASR-TTS autoencoder differentiable. We found that the proposed encoding method offers automatic extraction of speech content from speaker style, and is sufficient to cover full linguistic content in a given language. Therefore, the TTS-Decoder can synthesize speech with the same content as the input of ASR-Encoder but with different speaker characteristics, which achieves voice conversion (VC). We further improve the quality of VC using adversarial training, where we train a TTS-Patcher that augments the output of TTS-Decoder. Objective and subjective evaluations show that the proposed approach offers strong VC results as it eliminates speaker identity while preserving content within speech. In the ZeroSpeech 2019 Challenge, we achieved outstanding performance in terms of low bitrate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Karthik Pandia D. S.|AUTHOR Karthik Pandia D. S.]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]
</p><p class="cpabstractcardaffiliationlist">IIT Madras, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1113–1117&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Zerospeech synthesis is the task of building vocabulary independent speech synthesis systems, where transcriptions are unavailable for training data. It is, therefore, necessary to convert training data into a sequence of fundamental acoustic units that can be used for synthesis during the test. This paper attempts to discover, and model perceptual acoustic units consisting of steady state, and transient regions in speech. The transients roughly correspond to CV, VC units, while the steady-state corresponds to sonorants and fricatives. The speech signal is first preprocessed by segmenting the same into CVC-like units using a short-term energy-like contour. These CVC segments are clustered using a connected components-based graph clustering technique. The clustered CVC segments are initialized such that the onset (CV) and decays (VC) correspond to transients, and the rhyme corresponds to steady-states. Following this initialization, the units are allowed to re-organise on the continuous speech into a final set of AUs in an HMM-GMM framework. AU sequences thus obtained are used to train synthesis models. The performance of the proposed approach is evaluated on the Zerospeech 2019 challenge database. Subjective and objective scores show that reasonably good quality synthesis with low bit rate encoding can be achieved using the proposed AUs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andros Tjandra|AUTHOR Andros Tjandra]]^^1^^, [[Berrak Sisman|AUTHOR Berrak Sisman]]^^2^^, [[Mingyang Zhang|AUTHOR Mingyang Zhang]]^^2^^, [[Sakriani Sakti|AUTHOR Sakriani Sakti]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^2^^, [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NAIST, Japan; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 1118–1122&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe our submitted system for the ZeroSpeech Challenge 2019. The current challenge theme addresses the difficulty of constructing a speech synthesizer without any text or phonetic labels and requires a system that can (1) discover subword units in an unsupervised way, and (2) synthesize the speech with a target speaker’s voice. Moreover, the system should also balance the discrimination score ABX, the bit-rate compression rate, and the naturalness and the intelligibility of the constructed voice. To tackle these problems and achieve the best trade-off, we utilize a vector quantized variational autoencoder (VQ-VAE) and a multi-scale codebook-to-spectrogram (Code2Spec) inverter trained by mean square error and adversarial loss. The VQ-VAE extracts the speech to a latent space, forces itself to map it into the nearest codebook and produces compressed representation. Next, the inverter generates a magnitude spectrogram to the target voice, given the codebook vectors from VQ-VAE. In our experiments, we also investigated several other clustering algorithms, including K-Means and GMM, and compared them with the VQ-VAE result on ABX scores and bit rates. Our proposed approach significantly improved the intelligibility (in CER), the MOS, and discrimination ABX scores compared to the official ZeroSpeech 2019 baseline or even the topline.</p></div>
\rules except wikilink

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manfred Kaltenbacher|AUTHOR Manfred Kaltenbacher]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Wien, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Our knowledge-based societies in the information age are highly dependent on efficient verbal communication. Today most people have employments which rely on their communication competence. Consequently, communication disorders became a worldwide socio-economic factor. To increase the quality of life on one hand and to keep the economic costs under control on the other, new medical strategies are needed to prevent communication disorders, enable early diagnosis and eventually treat and rehabilitate people concerned.

The key issue for communication is phonation, a complex process of voice production taking place in the larynx. Based on aeroacoustic principles, the sound is generated by the pulsating air jet and supra-glottal turbulent structures. The laryngeal sound is further filtered and amplified by the supra-glottal acoustic resonance spaces, radiated at the lips and perceived as voice. There is no doubt that the possibility to produce voice is crucial for human communication, although many people do not realize this until they lose their voice temporarily, e.g. due to common respiratory inflammations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nigel G. Ward|AUTHOR Nigel G. Ward]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at El Paso, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Prosody is essential in human interaction and relevant to every area of speech science and technology. Our understanding of prosody, although still fragmentary, is rapidly advancing. This survey will give non-specialists the knowledge needed to decide whether and how to integrate prosodic information into their models and systems. It will start with the basics: the paralinguistic, phonological and pragmatic functions of prosody, its physiology and perception, commonly and less-commonly-used prosodic features, and the three main approaches to modeling prosody. Regarding practical applications, it will overview ways to use prosody in speech recognition, speech synthesis, dialog systems, and the inference of speaker states and traits. Recent trends will then be presented, including modeling pitch as more than a single scalar value, modeling prosody beyond just intonation, representing prosodic knowledge with constructions of multiple prosodic features in specific temporal configurations, modeling observed prosody as the result of the superposition of patterns representing independent intents, modeling multi-speaker phenomenon, and the use of unsupervised methods. Finally, we will consider remaining challenges in research and applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Simon Roessig|AUTHOR Simon Roessig]], [[Doris Mücke|AUTHOR Doris Mücke]], [[Lena Pagel|AUTHOR Lena Pagel]]
</p><p class="cpabstractcardaffiliationlist">Universität zu Köln, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2533–2537&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speakers of intonation languages use bundles of cues to express prosodic prominence. This work contributes further evidence for the multi-dimensionality of prosodic prominence in German reporting articulatory (3D EMA) and acoustic recordings from 27 speakers. In particular, we show that speakers use specific categorical and continuous modifications of the laryngeal system (tonal onglide) as well as continuous modifications of the supra-laryngeal system (lip aperture and tongue body position) to mark focus structure prosodically. These modifications are found between unaccented and accented but also within the group of accented words, revealing that speakers use prosodic modulations to directly encode prominence. On the basis of these findings we develop a dynamical model of prosodic patterns that is able to capture the manipulations as the modulation of an attractor landscape that is shaped by the different prosodic dimensions involved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Antti Suni|AUTHOR Antti Suni]]^^1^^, [[Marcin Włodarczak|AUTHOR Marcin Włodarczak]]^^2^^, [[Martti Vainio|AUTHOR Martti Vainio]]^^1^^, [[Juraj Šimko|AUTHOR Juraj Šimko]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Helsinki, Finland; ^^2^^Stockholm University, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2538–2542&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a methodology for assessing similarities and differences between language varieties and dialects in terms of prosodic characteristics. A multi-speaker, multi-dialect WaveNet network is trained on low sample-rate signal retaining only prosodic characteristics of the original speech. The network is conditioned on labels related to speakers’ region or dialect. The resulting conditioning embeddings are subsequently used as a multi-dimensional characteristics of different language varieties, with results consistent with dialectological studies. The method and results are illustrated on a Swedia 2000 corpus of Swedish dialectal variation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andy Murphy|AUTHOR Andy Murphy]], [[Irena Yanushevskaya|AUTHOR Irena Yanushevskaya]], [[Ailbhe Ní Chasaide|AUTHOR Ailbhe Ní Chasaide]], [[Christer Gobl|AUTHOR Christer Gobl]]
</p><p class="cpabstractcardaffiliationlist">Trinity College Dublin, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2543–2547&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper explores how prominence can be modelled in speech synthesis through voice quality variation. Synthetic utterances varying in voice quality (breathy, modal, tense) were generated using a glottal source model where the global waveshape parameter R,,d,, was the main control parameter and f,,0,, was not varied. A manipulation task perception experiment was conducted to establish perceptually salient R,,d,, values in the signalling of focus. The participants were presented with mini-dialogues designed to elicit narrow focus (with different focal syllable locations) and were asked to manipulate an unknown parameter in the synthetic utterances to produce a natural response. The results showed that participants manipulated R,,d,, not only in focal syllables, but also in the pre- and postfocal material. The direction of R,,d,, manipulation in the focal syllables was the same across the three voice qualities — towards decreased R,,d,, values (tenser phonation). The magnitude of the decrease in R,,d,, was significantly less for tense voice compared to breathy and modal voice, but did not vary with the location of the focal syllable in the utterance. Overall, the results suggest that R,,d,, is effective as a control parameter for modelling prominence in synthetic speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rachel Albar|AUTHOR Rachel Albar]], [[Hiyon Yoo|AUTHOR Hiyon Yoo]]
</p><p class="cpabstractcardaffiliationlist">LLF (UMR 7110), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2548–2552&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate Japanese learners’ ability to produce and understand the French continuative rising contour. In French, rising contours can be linked to syntactic, metrical, interactional and phrasing functions, while in Japanese, prosodic boundaries are marked with a default low tone (L%).

Our main hypothesis is that Japanese learners’ proficiency is linked to their phonological awareness of rising contours in French. We expect that advanced learners will be able to correctly produce rising contours in internal AP and IP positions, and even distinguish between subtle differences in rising contours.

We present the results from two different experiments. To test learners’ ability to produce rising contours, subjects were asked to naturally reproduce utterances containing violations in certain prosodic contours. Results show that, although the task remains difficult, learners were able to correct non-rising contours to varying degrees. We then conducted a sentence completion task where subjects listened to the beginning of a statement and chose the adequate sequence of words that followed what they had heard. Results show that Japanese learners, no matter their proficiency, are not able to distinguish the different types of rising contours that are dependent on different syntactic boundaries.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Masaki Okawa|AUTHOR Masaki Okawa]], [[Takuya Saito|AUTHOR Takuya Saito]], [[Naoki Sawada|AUTHOR Naoki Sawada]], [[Hiromitsu Nishizaki|AUTHOR Hiromitsu Nishizaki]]
</p><p class="cpabstractcardaffiliationlist">University of Yamanashi, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2553–2557&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigated the waveform representation for audio signal classification. Recently, many studies on audio waveform classification such as acoustic event detection and music genre classification have been published. Most studies on audio waveform classification have proposed the use of a deep learning (neural network) framework. Generally, a frequency analysis method such as Fourier transform is applied to extract the frequency or spectral information from the input audio waveform before inputting the raw audio waveform into the neural network. In contrast to these previous studies, in this paper, we propose a novel waveform representation method, in which audio waveforms are represented as a bit sequence, for audio classification. In our experiment, we compare the proposed bit representation waveform, which is directly given to a neural network, to other representations of audio waveforms such as a raw audio waveform and a power spectrum with two classification tasks: one is an acoustic event classification task and the other is a sound/music classification task. The experimental results showed that the bit representation waveform achieved the best classification performance for both the tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manjunath Mulimani|AUTHOR Manjunath Mulimani]], [[Shashidhar G. Koolagudi|AUTHOR Shashidhar G. Koolagudi]]
</p><p class="cpabstractcardaffiliationlist">NITK Surathkal, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2558–2562&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a novel Fused Visual Features (FVFs) are proposed for Acoustic Event Classification (AEC) in the meeting room and office environments. The codes of Visual Features (VFs) are evaluated from row vectors and Scale Invariant Feature Transform (SIFT) vectors of the grayscale Gammatonegram of an acoustic event separately using Locality-constrained Linear Coding (LLC). Further, VFs from row vectors and SIFT vectors of the grayscale Gammatonegram are fused to get FVFs. Performance of the proposed FVFs is evaluated on acoustic events of publicly available UPC-TALP and DCASE datasets in clean and noisy conditions. Results show that proposed FVFs are robust to noise and achieve overall recognition accuracy of 96.40% and 90.45% on UPC-TALP and DCASE datasets, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yu-Han Shen|AUTHOR Yu-Han Shen]], [[Ke-Xin He|AUTHOR Ke-Xin He]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2563–2567&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a temporal-frequential attention model for sound event detection (SED). Our network learns how to listen with two attention models: a temporal attention model and a frequential attention model. Proposed system learns when to listen using the temporal attention model while it learns where to listen on the frequency axis using the frequential attention model. With these two models, we attempt to make our system pay more attention to important frames or segments and important frequency components for sound event detection. Our proposed method is demonstrated on the task 2 of Detection and Classification of Acoustic Scenes and Events (DCASE) 2017 Challenge and outperforms state-of-the-art methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Logan Ford|AUTHOR Logan Ford]], [[Hao Tang|AUTHOR Hao Tang]], [[François Grondin|AUTHOR François Grondin]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2568–2572&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many of the recent advances in audio event detection, particularly on the AudioSet data set, have focused on improving performance using the released embeddings produced by a pre-trained model. In this work, we instead study the task of training a multi-label event classifier directly from the audio recordings of AudioSet. Using the audio recordings, not only are we able to reproduce results from prior work, we have also confirmed improvements of other proposed additions, such as an attention module. Moreover, by training the embedding network jointly with the additions, we achieve an mAP of 0.392 and an AUC of 0.971, surpassing the state of the art without transfer learning from a large data set. We also analyze the output activations of the network and find that the models are able to localize audio events when a finer time resolution is needed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chandan K.A. Reddy|AUTHOR Chandan K.A. Reddy]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Johannes Gehrke|AUTHOR Johannes Gehrke]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2573–2577&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice-over-Internet-Protocol (VoIP) calls are prone to various speech impairments due to environmental and network conditions resulting in bad user experience. A reliable audio impairment classifier helps to identify the cause for bad audio quality. The user feedback after the call can act as the ground truth labels for training a supervised classifier on a large audio dataset. However, the labels are noisy as most of the users lack the expertise to precisely articulate the impairment in the perceived speech. In this paper, we analyze the effects of massive noise in labels in training dense networks and Convolutional Neural Networks (CNN) using engineered features, spectrograms and raw audio samples as inputs. We demonstrate that CNN can generalize better on the training data with a large number of noisy labels and gives remarkably higher test performance. The classifiers were trained both on randomly generated label noise and the label noise introduced by human errors. We also show that training with noisy labels requires a significant increase in the training dataset size, which is in proportion to the amount of noise in the labels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lorenzo Tarantino|AUTHOR Lorenzo Tarantino]]^^1^^, [[Philip N. Garner|AUTHOR Philip N. Garner]]^^2^^, [[Alexandros Lazaridis|AUTHOR Alexandros Lazaridis]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EPFL, Switzerland; ^^2^^Idiap Research Institute, Switzerland; ^^3^^Swisscom, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2578–2582&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech Emotion Recognition (SER) has been shown to benefit from many of the recent advances in deep learning, including recurrent based and attention based neural network architectures as well. Nevertheless, performance still falls short of that of humans. In this work, we investigate whether SER could benefit from the self-attention and global windowing of the transformer model. We show on the IEMOCAP database that this is indeed the case. Finally, we investigate whether using the distribution of, possibly conflicting, annotations in the training data, as soft targets could outperform a majority voting. We prove that this performance increases with the agreement level of the annotators.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Eliya Nachmani|AUTHOR Eliya Nachmani]], [[Lior Wolf|AUTHOR Lior Wolf]]
</p><p class="cpabstractcardaffiliationlist">Facebook, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2583–2587&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a deep learning method for singing voice conversion. The proposed network is not conditioned on the text or on the notes, and it directly converts the audio of one singer to the voice of another. Training is performed without any form of supervision: no lyrics or any kind of phonetic features, no notes, and no matching samples between singers. The proposed network employs a single CNN encoder for all singers, a single WaveNet decoder, and a classifier that enforces the latent representation to be singer-agnostic. Each singer is represented by one embedding vector, which the decoder is conditioned on. In order to deal with relatively small datasets, we propose a new data augmentation scheme, as well as new training losses and protocols that are based on backtranslation. Our evaluation presents evidence that the conversion produces natural signing voices that are highly recognizable as the target singer.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Juheon Lee|AUTHOR Juheon Lee]], [[Hyeong-Seok Choi|AUTHOR Hyeong-Seok Choi]], [[Chang-Bin Jeon|AUTHOR Chang-Bin Jeon]], [[Junghyun Koo|AUTHOR Junghyun Koo]], [[Kyogu Lee|AUTHOR Kyogu Lee]]
</p><p class="cpabstractcardaffiliationlist">Seoul National University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2588–2592&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose an end-to-end Korean singing voice synthesis system from lyrics and a symbolic melody using the following three novel approaches: 1) phonetic enhancement masking, 2) local conditioning of text and pitch to the super-resolution network, and 3) conditional adversarial training. The proposed system consists of two main modules; a mel-synthesis network that generates a mel-spectrogram from the given input information, and a super-resolution network that upsamples the generated mel-spectrogram into a linear-spectrogram. In the mel-synthesis network, phonetic enhancement masking is applied to generate implicit formant masks solely from the input text, which enables a more accurate phonetic control of singing voice. In addition, we show that two other proposed methods — local conditioning of text and pitch, and conditional adversarial training — are crucial for a realistic generation of the human singing voice in the super-resolution process. Finally, both quantitative and qualitative evaluations are conducted, confirming the validity of all proposed methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuan-Hao Yi|AUTHOR Yuan-Hao Yi]], [[Yang Ai|AUTHOR Yang Ai]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]
</p><p class="cpabstractcardaffiliationlist">USTC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2593–2597&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a method of using autoregressive neural networks for the acoustic modeling of singing voice synthesis (SVS). Singing voice differs from speech and it contains more local dynamic movements of acoustic features, e.g., vibratos. Therefore, our method adopts deep autoregressive (DAR) models to predict the F0 and spectral features of singing voice in order to better describe the dependencies among the acoustic features of consecutive frames. For F0 modeling, discretized F0 values are used and the influences of the history length in DAR are analyzed by experiments. An F0 post-processing strategy is also designed to alleviate the inconsistency between the predicted F0 contours and the F0 values determined by music notes. Furthermore, we extend the DAR model to deal with continuous spectral features, and a prenet module with self-attention layers is introduced to process historical frames. Experiments on a Chinese singing voice corpus demonstrate that our method using DARs can produce F0 contours with vibratos effectively, and can achieve better objective and subjective performance than the conventional method using recurrent neural networks (RNNs).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sara Dahmani|AUTHOR Sara Dahmani]], [[Vincent Colotte|AUTHOR Vincent Colotte]], [[Valérian Girard|AUTHOR Valérian Girard]], [[Slim Ouni|AUTHOR Slim Ouni]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2598–2602&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In recent years, the performance of speech synthesis systems has been improved thanks to deep learning-based models, but generating expressive audiovisual speech is still an open issue. The variational auto-encoders (VAE)s are recently proposed to learn latent representations of data. In this paper, we present a system for expressive text-to-audiovisual speech synthesis that learns a latent embedding space of emotions using a conditional generative model based on the variational auto-encoder framework. When conditioned on textual input, the VAE is able to learn an embedded representation that captures emotion characteristics from the signal, while being invariant to the phonetic content of the utterances. We applied this method in an unsupervised manner to generate duration, acoustic and visual features of speech. This conditional variational auto-encoder (CVAE) has been used to blend emotions together. This model was able to generate nuances of a given emotion or to generate new emotions that do not exist in our database. We conducted three perceptive experiments to evaluate our findings.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David Ayllón|AUTHOR David Ayllón]], [[Fernando Villavicencio|AUTHOR Fernando Villavicencio]], [[Pierre Lanchantin|AUTHOR Pierre Lanchantin]]
</p><p class="cpabstractcardaffiliationlist">ObEN, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2603–2607&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/3049" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech-to-Singing refers to techniques that transform speech to a singing voice. A major performance factor of this process relies on the precision to align the phonetic sequence of the input speech to the timing of the target singing. Unfortunately, the precision of existing techniques for phone-level lyrics-to-audio alignment has been found insufficient for this task. We propose a complete pipeline for automatic phone-level lyrics-to-audio alignment based on an HMM-based forced-aligner and singing acoustics normalization. The system obtains phone-level precision in the range of a few tens of milliseconds as we report in the objective evaluation. The subjective evaluation reveals that the smoothness of the singing voice generated with the proposed methodology was found close to the one obtained using manual alignments.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Théo Biasutto--Lervat|AUTHOR Théo Biasutto--Lervat]], [[Sara Dahmani|AUTHOR Sara Dahmani]], [[Slim Ouni|AUTHOR Slim Ouni]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2608–2612&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we investigate how to learn labial coarticulation to generate a sparse representation of the face from speech. To do so, we experiment a sequential deep learning model, bidirectional gated recurrent networks, which have reached nice result in addressing the articulatory inversion problem and so should be able to handle coarticulation effects. As acquiring audiovisual corpora is expensive and time-consuming, we designed our solution to counteract the lack of data. Firstly, we have used phonetic information (phoneme label and respective duration) as input to ensure speaker independence, and in second hand, we have experimented around pretraining strategies to reach acceptable performances. We demonstrate how a careful initialization of the last layers of the network can greatly ease the training and help to handle coarticulation effect. This initialization relies on dimensionality reduction strategies, allowing injecting knowledge of useful latent representation of the visual data into the network. We focused on two data-driven tools (PCA and autoencoder) and one hand-crafted latent space coming from animation community, blendshapes decomposition. We have trained and evaluated the model with a corpus consisting of 4 hours of French speech, and we have gotten an average RMSE close to 1.3mm.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel S. Park|AUTHOR Daniel S. Park]]^^1^^, [[William Chan|AUTHOR William Chan]]^^2^^, [[Yu Zhang|AUTHOR Yu Zhang]]^^1^^, [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]]^^1^^, [[Barret Zoph|AUTHOR Barret Zoph]]^^1^^, [[Ekin D. Cubuk|AUTHOR Ekin D. Cubuk]]^^1^^, [[Quoc V. Le|AUTHOR Quoc V. Le]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Google, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2613–2617&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present SpecAugment, a simple data augmentation method for speech recognition. SpecAugment is applied directly to the feature inputs of a neural network (i.e., filter bank coefficients). The augmentation policy consists of warping the features, masking blocks of frequency channels, and masking blocks of time steps. We apply SpecAugment on Listen, Attend and Spell networks for end-to-end speech recognition tasks. We achieve state-of-the-art performance on the LibriSpeech 960h and Switchboard 300h tasks, outperforming all prior work. On LibriSpeech, we achieve 6.8% WER on test-other without the use of a language model, and 5.8% WER with shallow fusion with a language model. This compares to the previous state-of-the-art hybrid system of 7.5% WER. For Switchboard, we achieve 7.2%/14.6% on the Switchboard/CallHome portion of the Hub5’00 test set without the use of a language model, and 6.8%/14.1% with shallow fusion, which compares to the previous state-of-the-art hybrid system at 8.3%/17.3% WER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[George Saon|AUTHOR George Saon]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[Michael Picheny|AUTHOR Michael Picheny]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2618–2622&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prior work has shown that connectionist temporal classification (CTC)-based automatic speech recognition systems perform well when using bidirectional long short-term memory (BLSTM) networks unrolled over the whole speech utterance. This is because whole-utterance BLSTMs better capture long-term context. We hypothesize that this also leads to overfitting and propose soft forgetting as a solution. During training, we unroll the BLSTM network only over small non-overlapping chunks of the input utterance. We randomly pick a chunk size for each batch instead of a fixed global chunk size. In order to retain some utterance-level information, we encourage the hidden states of the BLSTM network to approximate those of a pre-trained whole-utterance BLSTM. Our experiments on the 300-hour English Switchboard dataset show that soft forgetting improves the word error rate (WER) above a competitive whole-utterance phone CTC BLSTM by an average of 7–9% relative. We obtain WERs of 9.1%/17.4% using speaker-independent and 8.7%/16.8% using speaker-adapted models respectively on the Hub5-2000 Switchboard/CallHome test sets. We also show that soft forgetting improves the WER when the model is used with limited temporal context for streaming recognition. Finally, we present some empirical insights into the regularization and data augmentation effects of soft forgetting.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haoran Miao|AUTHOR Haoran Miao]], [[Gaofeng Cheng|AUTHOR Gaofeng Cheng]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Ta Li|AUTHOR Ta Li]], [[Yonghong Yan|AUTHOR Yonghong Yan]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2623–2627&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The hybrid CTC/attention end-to-end automatic speech recognition (ASR) combines CTC ASR system and attention ASR system into a single neural network. Although the hybrid CTC/attention ASR system takes the advantages of both CTC and attention architectures in training and decoding, it remains challenging to be used for streaming speech recognition for its attention mechanism, CTC prefix probability and bidirectional encoder. In this paper, we propose a stable monotonic chunkwise attention (sMoChA) to stream its attention branch and a truncated CTC prefix probability (T-CTC) to stream its CTC branch. On the acoustic model side, we utilize the latency-controlled bidirectional long short-term memory (LC-BLSTM) to stream its encoder. On the joint CTC/attention decoding side, we propose the dynamic waiting joint decoding (DWDJ) algorithm to collect the decoding hypotheses from the CTC and attention branches. Through the combination of the above methods, we stream the hybrid CTC/attention ASR system without much word error rate degradation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Zhang|AUTHOR Wei Zhang]], [[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[Ulrich Finkler|AUTHOR Ulrich Finkler]], [[George Saon|AUTHOR George Saon]], [[Abdullah Kayi|AUTHOR Abdullah Kayi]], [[Alper Buyuktosunoglu|AUTHOR Alper Buyuktosunoglu]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[David Kung|AUTHOR David Kung]], [[Michael Picheny|AUTHOR Michael Picheny]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2628–2632&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Modern Automatic Speech Recognition (ASR) systems rely on distributed deep learning to for quick training completion. To enable efficient distributed training, it is imperative that the training algorithms can converge with a large mini-batch size. In this work, we discovered that Asynchronous Decentralized Parallel Stochastic Gradient Descent (ADPSGD) can work with much larger batch size than commonly used Synchronous SGD (SSGD) algorithm. On commonly used public SWB-300 and SWB-2000 ASR datasets, ADPSGD can converge with a batch size 3X as large as the one used in SSGD, thus enable training at a much larger scale. Further, we proposed a Hierarchical-ADPSGD (H-ADPSGD) system in which learners on the same computing node construct a super learner via a fast allreduce implementation, and super learners deploy ADPSGD algorithm among themselves. On a 64 Nvidia V100 GPU cluster connected via a 100Gb/s Ethernet network, our system is able to train SWB-2000 to reach a 7.6% WER on the Hub5-2000 Switchboard (SWB) test-set and a 13.2% WER on the Call-Home (CH) test-set in 5.2 hours. To the best of our knowledge, this is the fastest ASR training system that attains this level of model accuracy for SWB-2000 task to be ever reported in the literature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Xuankai Chang|AUTHOR Xuankai Chang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">Shanghai Jiao Tong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2633–2637&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end models for monaural multi-speaker automatic speech recognition (ASR) have become an important and interesting approach when dealing with the multi-talker mixed speech under cocktail party scenario. However, there is still a large performance gap between the multi-speaker and single-speaker speech recognition systems. In this paper, we propose a novel framework that integrates teacher-student training with the attention-based end-to-end ASR model, which can do the knowledge distillation from the single-talker ASR system to multi-talker one effectively. First the objective function is revised to combine the knowledge from both single-talker and multi-talker labels. Then we extend the original single attention to speaker parallel attention modules in the teacher-student training based end-to-end framework to boost the performance more. Moreover, a curriculum learning strategy on the training data with an ordered signal-to-noise ratios (SNRs) is designed to obtain a further improvement. The proposed methods are evaluated on two-speaker mixed speech generated from the WSJ0 corpus, which is commonly used for this task recently. The experimental results show that the newly proposed knowledge transfer architecture with an end-to-end model can significantly improve the system performance for monaural multi-talker speech recognition, and more than 15% relative WER reduction is achieved against the traditional end-to-end model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tobias Menne|AUTHOR Tobias Menne]], [[Ilya Sklyar|AUTHOR Ilya Sklyar]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]
</p><p class="cpabstractcardaffiliationlist">RWTH Aachen University, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2638–2642&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Significant performance degradation of automatic speech recognition (ASR) systems is observed when the audio signal contains cross-talk. One of the recently proposed approaches to solve the problem of multi-speaker ASR is the deep clustering (DPCL) approach. Combining DPCL with a state-of-the-art hybrid acoustic model, we obtain a word error rate (WER) of 16.5% on the commonly used wsj0-2mix dataset, which is the best performance reported thus far to the best of our knowledge. The wsj0-2mix dataset contains simulated cross-talk where the speech of multiple speakers overlaps for almost the entire utterance. In a more realistic ASR scenario the audio signal contains significant portions of single-speaker speech and only part of the signal contains speech of multiple competing speakers. This paper investigates obstacles of applying DPCL as a preprocessing method for ASR in such a scenario of sparsely overlapping speech. To this end we present a data simulation approach, closely related to the wsj0-2mix dataset, generating sparsely overlapping speech datasets of arbitrary overlap ratio. The analysis of applying DPCL to sparsely overlapping speech is an important interim step between the fully overlapping datasets like wsj0-2mix and more realistic ASR datasets, such as CHiME-5 or AMI.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ann R. Bradlow|AUTHOR Ann R. Bradlow]]
</p><p class="cpabstractcardaffiliationlist">Northwestern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>This presentation will consider the causes, characteristics, and consequences of second-language (L2) speech production through the lens of a talker-listener alignment model. Rather than focusing on L2 speech as deviant from the L1 target, this model views speech communication as a cooperative activity in which interlocutors adjust their speech production and perception in a bi-directional, dynamic manner. Three lines of support will be presented. First, principled accounts of salient acoustic-phonetic markers of L2 speech will be developed with reference to language-general challenges of L2 speech production and to language-specific L1-L2 structural interactions. Next, we will examine recognition of L2 speech by listeners from various language backgrounds, noting in particular that for L2 listeners, L2 speech can be equally (or sometimes, more) intelligible than L1 speech. Finally, we will examine perceptual adaptation to L2 speech by L1 listeners, highlighting studies that focused on interactive, dialogue-based test settings where we can observe the dynamics of talker adaptation to the listener and vice versa. Throughout this survey, I will refer to current methodological and technical developments in corpus-based phonetics and interactive testing paradigms that open new windows on the dynamics of speech communication across a language barrier.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[John S. Novak III|AUTHOR John S. Novak III]], [[Daniel Bunn|AUTHOR Daniel Bunn]], [[Robert V. Kenyon|AUTHOR Robert V. Kenyon]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Chicago, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2643–2647&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>When speaking to second language learners, talkers often reduce their rate of speech to assist their listeners’ understanding and comprehension. This study grants English as a Second Language subjects fine-grained, real-time control over the playback rates of lengthy audio tracks of conversational speech, and tests the subjects’ listening comprehension at their desired playback speeds and at unmodified speeds. We find evidence that slower playback rates are preferred, but no evidence that such playback rates affect listener comprehension.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shuju Shi|AUTHOR Shuju Shi]]^^1^^, [[Chilin Shih|AUTHOR Chilin Shih]]^^1^^, [[Jinsong Zhang|AUTHOR Jinsong Zhang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Illinois at Urbana-Champaign, USA; ^^2^^BLCU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2648–2652&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Theories of second language (L2) acquisition of phonology / phonetics / pronunciation / accent often resort to the similarity/ dissimilarity between the first language (L1) and L2 sound inventories. Measuring the similarity of two speech sounds could involve many acoustic dimensions, e.g., fundamental frequency (F0), formants, duration, etc.. The measurement of the sound inventories of two languages can be further complicated by the distribution of sounds within each inventory as well as the interaction of phonology and phonetics between the two inventories. This paper attempts to propose a tentative approach to quantify similarity/dissimilarity of sound pairs between two language inventories and to incorporate phonological influence in the acoustic measures used. The language pairs studied are English and Mandarin Chinese and only their vowel inventories are considered. Mel-Frequency Cepstral Coefficients (MFCCs) are used as features, and Principle Component Analysis (PCA) is used and slightly adjusted to simulate the perceptual space. Similarity/dissimilarity of sound pairs between the language inventories are examined and potential L2 error patterns are predicted based on the proposed approach. Results showed that predicted results using the proposed approach can be well related to those by Speech Learning Model (SLM), Perceptual Assimilation Model for L2 (PAM-L2) and Native Language Magnet Model (NLM).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Juqiang Chen|AUTHOR Juqiang Chen]], [[Catherine T. Best|AUTHOR Catherine T. Best]], [[Mark Antoniou|AUTHOR Mark Antoniou]]
</p><p class="cpabstractcardaffiliationlist">Western Sydney University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2653–2657&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present study investigated how cognitive factors, memory load and attention control, affected imitation of Thai tones by Mandarin speakers with no prior Thai experience. Mandarin speakers lengthened the syllable duration, enlarged the F0 excursion and moved some F0 max location earlier compared with the stimuli, even in the immediate imitation condition. Talker variability had a larger impact on imitation than memory load, whereas vowel variability did not have any effect. Perceptual assimilation patterns partially influenced imitation performance, suggesting phonological categorization in imitation and a perception-production link.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Annie Tremblay|AUTHOR Annie Tremblay]]^^1^^, [[Mirjam Broersma|AUTHOR Mirjam Broersma]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Kansas, USA; ^^2^^Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2658–2662&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study investigates whether and how foreign-language knowledge affects the use of non-native cues in speech segmentation. It does so by testing whether Dutch listeners’ French knowledge enhances their use of word-final fundamental-frequency (F0) rise — consistent with the typical French prosodic pattern — in artificial-language (AL) speech segmentation. More specifically, this study examines whether Dutch listeners with good French knowledge outperform Dutch listeners with limited French knowledge in the selection of AL words over (nonword or partword) foils, following exposure to an AL with word-final F0 rises. Dutch listeners with good French knowledge completed the AL-segmentation task from Kim et al.’s [2] word-final F0-rise condition. The results were compared to Kim et al.’s [2] Dutch listeners with limited French knowledge and Tremblay et al.’s [1] native French listeners in the same condition. Dutch listeners with good French knowledge performed more accurately than Dutch listeners with limited French knowledge but less accurately than native French listeners on trials with partword foils, with the three groups not differing on trials with nonword foils. Given these results, we propose that foreign-language knowledge can help listeners compute the conditional probability of co-occurrence of successive syllables in an AL and can thus enhance AL speech segmentation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Abdalghani Abujabal|AUTHOR Abdalghani Abujabal]]^^1^^, [[Judith Gaspers|AUTHOR Judith Gaspers]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MPI for Informatics, Germany; ^^2^^Amazon, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2663–2667&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Named entity recognition (NER) is a vital task in spoken language understanding, which aims to identify mentions of named entities in text e.g., from transcribed speech. Existing neural models for NER rely mostly on dedicated word-level representations, which suffer from two main shortcomings. First, the vocabulary size is large, yielding large memory requirements and training time. Second, these models are not able to learn morphological or phonological representations. To remedy the above shortcomings, we adopt a neural solution based on bidirectional LSTMs and conditional random fields, where we rely on subword units, namely  characters,  phonemes, and  bytes. For each word in an utterance, our model learns a representation from each of the subword units. We conducted experiments in a real-world large-scale setting for the use case of a voice-controlled device covering four languages with up to 5.5M utterances per language. Our experiments show that (1) with increasing training data, performance of models trained solely on subword units becomes closer to that of models with dedicated word-level embeddings (91.35 vs 93.92 F1 for English), while using a much smaller vocabulary size (332 vs 74K), (2) subword units enhance models with dedicated word-level embeddings, and (3) combining different subword units improves performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Saurabhchand Bhati|AUTHOR Saurabhchand Bhati]]^^1^^, [[Shekhar Nayak|AUTHOR Shekhar Nayak]]^^2^^, [[K. Sri Rama Murty|AUTHOR K. Sri Rama Murty]]^^2^^, [[Najim Dehak|AUTHOR Najim Dehak]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Johns Hopkins University, USA; ^^2^^IIT Hyderabad, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2668–2672&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unsupervised discovery of acoustic units from the raw speech signal forms the core objective of zero-resource speech processing. It involves identifying the acoustic segment boundaries and consistently assigning unique labels to acoustically similar segments. In this work, the possible candidates for segment boundaries are identified in an unsupervised manner from the kernel Gram matrix computed from the Mel-frequency cepstral coefficients (MFCC). These segment boundary candidates are used to train a siamese network, that is intended to learn embeddings that minimize intrasegment distances and maximize the intersegment distances. The siamese embeddings capture phonetic information from longer contexts of the speech signal and enhance the intersegment discriminability. These properties make the siamese embeddings better suited for acoustic segmentation and clustering than the raw MFCC features. The Gram matrix computed from the siamese embeddings provides unambiguous evidence for boundary locations. The initial candidate boundaries are refined using this evidence, and siamese embeddings are extracted for the new acoustic segments. A graph growing approach is used to cluster the siamese embeddings, and a unique label is assigned to acoustically similar segments. The performance of the proposed method for acoustic segmentation and clustering is evaluated on Zero Resource 2017 database.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bolaji Yusuf|AUTHOR Bolaji Yusuf]], [[Murat Saraclar|AUTHOR Murat Saraclar]]
</p><p class="cpabstractcardaffiliationlist">Boğaziçi Üniversitesi, Turkey</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2673–2677&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>State of the art vocabulary-independent spoken term detection methods are typically based on variants of the dynamic time warping (DTW) algorithm since DTW, being based on acoustic sequence matching, allows robust retrieval in settings with scarcity of linguistic resources. However, the DTW comes with a high computational cost which limits its practicality in a deployed server. To this end, we investigate the efficacy of subsampling and propose a neural network architecture to reduce the computational load of DTW-based keyword search. We use a time-subsampled RNN to reduce the frame rate of the document as well as the dimensionality of representation while training it to maintain the cost incurred along the DTW alignment path, thus allowing us to reduce the computational complexity (both space and time) of the search algorithm.

Experiments on the Turkish and Zulu limited language packs of the IARPA Babel program show that the proposed methods allow considerable reduction in CPU time (88 times) and memory usage (18 times) without significant loss in search accuracy (0.0270 ATWV). Moreover, even at very high compression levels with lower search precision, high recall rates are maintained, allowing the potential of multi-resolution search.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zixiaofan Yang|AUTHOR Zixiaofan Yang]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]
</p><p class="cpabstractcardaffiliationlist">Columbia University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2678–2682&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic word embeddings have been proven to be useful in query-by-example keyword search. Such embeddings are typically trained to distinguish the same word from a different word using exact orthographic representations; so, two different words will have dissimilar embeddings even if they are pronounced similarly or share the same stem. However, in real-world applications such as keyword search in low-resource languages, models are expected to find all derived and inflected forms for a certain keyword. In this paper, we address this mismatch by incorporating linguistic information when training neural acoustic word embeddings. We propose two linguistically-informed methods for training these embeddings, both of which, when we use metrics that consider non-exact matches, outperform state-of-the-art models on the Switchboard dataset. We also present results on Sinhala to show that models trained on English can be directly transferred to embed spoken words in a very different language with high accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liming Wang|AUTHOR Liming Wang]], [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]]
</p><p class="cpabstractcardaffiliationlist">University of Illinois at Urbana-Champaign, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2683–2687&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper demonstrates three different systems capable of performing the multimodal word discovery task. A multimodal word discovery system accepts, as input, a database of spoken descriptions of images (or a set of corresponding phone transcripts), and learns a lexicon which is a mapping from phone strings to their associated image concepts. Three systems are demonstrated: one based on a statistical machine translation (SMT) model, two based on neural machine translation (NMT). On Flickr8k, the SMT-based model performs much better than the NMT-based one, achieving a 49.6% F1 score. Finally, we apply our word discovery system to the task of image retrieval and achieve 29.1% recall@10 on the standard 1000-image Flickr8k tests set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marcely Zanon Boito|AUTHOR Marcely Zanon Boito]]^^1^^, [[Aline Villavicencio|AUTHOR Aline Villavicencio]]^^2^^, [[Laurent Besacier|AUTHOR Laurent Besacier]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LIG (UMR 5217), France; ^^2^^University of Essex, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2688–2692&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Since Bahdanau et al. [1] first introduced attention for neural machine translation, most sequence-to-sequence models made use of attention mechanisms [2, 3, 4]. While they produce soft-alignment matrices that could be interpreted as alignment between target and source languages, we lack metrics to quantify their quality, being unclear which approach produces the best alignments. This paper presents an empirical evaluation of 3 of the main sequence-to-sequence models for word discovery from unsegmented phoneme sequences: CNN, RNN and Transformer-based. This task consists in aligning word sequences in a source language with phoneme sequences in a target language, inferring from it word segmentation on the target side [5]. Evaluating word segmentation quality can be seen as an extrinsic evaluation of the soft-alignment matrices produced during training. Our experiments in a low-resource scenario on Mboshi and English languages (both aligned to French) show that RNNs surprisingly outperform CNNs and Transformer for this task. Our results are confirmed by an intrinsic evaluation of alignment quality through the use Average Normalized Entropy (ANE). Lastly, we improve our best word discovery model by using an alignment entropy confidence measure that accumulates ANE over all the occurrences of a given alignment pair in the collection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]]^^1^^, [[Julien van Hout|AUTHOR Julien van Hout]]^^1^^, [[Colleen Richey|AUTHOR Colleen Richey]]^^1^^, [[Mitchell McLaren|AUTHOR Mitchell McLaren]]^^1^^, [[Maria A. Barrios|AUTHOR Maria A. Barrios]]^^2^^, [[Aaron Lawson|AUTHOR Aaron Lawson]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SRI International, USA; ^^2^^Lab41, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2438–2442&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The VOiCES from a Distance Challenge 2019 was designed to foster research in the area of speaker recognition and automatic speech recognition (ASR) with a special focus on single-channel distant/far-field audio under various noisy conditions. The challenge was based on the recently released VOiCES corpus, with 60 international teams involved, of which 24 teams participated in the evaluation. In this paper, we separately present the challenge’s speaker recognition and ASR tasks. For each task, we outline the training, development, and test data, as well as the evaluation metrics. Then, we report and discuss the results in light of the participant-provided system descriptions, to highlight the major factors contributing to high performance in distant speech processing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sergey Novoselov|AUTHOR Sergey Novoselov]]^^1^^, [[Aleksei Gusev|AUTHOR Aleksei Gusev]]^^1^^, [[Artem Ivanov|AUTHOR Artem Ivanov]]^^1^^, [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]]^^1^^, [[Andrey Shulipa|AUTHOR Andrey Shulipa]]^^2^^, [[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]]^^1^^, [[Vladimir Volokhov|AUTHOR Vladimir Volokhov]]^^1^^, [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2443–2447&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the Speech Technology Center (STC) speaker recognition (SR) systems submitted to the VOiCES From a Distance challenge 2019. The challenge’s SR task is focused on the problem of speaker recognition in single channel distant/far-field audio under noisy conditions. In this work we investigate different deep neural networks architectures for speaker embedding extraction to solve the task. We show that deep networks with residual frame level connections outperform more shallow architectures. Simple energy based speech activity detector (SAD) and automatic speech recognition (ASR) based SAD are investigated in this work. We also address the problem of data preparation for robust embedding extractors training. The reverberation for the data augmentation was performed using automatic room impulse response generator. In our systems we used discriminatively trained cosine similarity metric learning model as embedding backend. Scores normalization procedure was applied for each individual subsystem we used. Our final submitted systems were based on the fusion of different subsystems. The results obtained on the VOiCES development and evaluation sets demonstrate effectiveness and robustness of the proposed systems when dealing with distant/far-field audio under noisy conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pavel Matějka|AUTHOR Pavel Matějka]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Hossein Zeinali|AUTHOR Hossein Zeinali]], [[Ladislav Mošner|AUTHOR Ladislav Mošner]], [[Anna Silnova|AUTHOR Anna Silnova]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Ondřej Novotný|AUTHOR Ondřej Novotný]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]]
</p><p class="cpabstractcardaffiliationlist">Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2448–2452&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is a post-evaluation analysis of our efforts in VOiCES 2019 Speaker Recognition challenge. All systems in the fixed condition are based on x-vectors with different features and DNN topologies. The single best system reaches minDCF of 0.38 (5.25% EER) and a fusion of 3 systems yields minDCF of 0.34 (4.87% EER).We also analyze how speaker verification (SV) systems evolved in last few years and show results also on SITW 2016 Challenge. EER on the core-core condition of the SITW 2016 challenge dropped from 5.85% to 1.65% for system fusions submitted for SITW 2016 and VOiCES 2019, respectively. The less restrictive open condition allowed us to use external data for PLDA adaptation and achieve additional small performance improvement. In our submission to open condition, we used three x-vector systems and also one system based on i-vectors.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ivan Medennikov|AUTHOR Ivan Medennikov]]^^1^^, [[Yuri Khokhlov|AUTHOR Yuri Khokhlov]]^^1^^, [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]]^^2^^, [[Ivan Sorokin|AUTHOR Ivan Sorokin]]^^1^^, [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]]^^1^^, [[Vladimir Bataev|AUTHOR Vladimir Bataev]]^^1^^, [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]]^^1^^, [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]]^^1^^, [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]]^^1^^, [[Oleg Petrov|AUTHOR Oleg Petrov]]^^3^^, [[Alexander Zatvornitskiy|AUTHOR Alexander Zatvornitskiy]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^ITMO University, Russia; ^^3^^ITMO University, Russia; ^^4^^Speech Technology Center, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2453–2457&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is a description of the Speech Technology Center (STC) automatic speech recognition (ASR) system for the “VOiCES from a Distance Challenge 2019”. We participated in the Fixed condition of the ASR task, which means that the only training data available was an 80-hour subset of the LibriSpeech corpus. The main difficulty of the challenge is a mismatch between clean training data and distant noisy development/ evaluation data. In order to tackle this, we applied room acoustics simulation and weighted prediction error (WPE) dereverberation. We also utilized well-known speaker adaptation using x-vector speaker embeddings, as well as novel room acoustics adaptation with R-vector room impulse response (RIR) embeddings. The system used a lattice-level combination of 6 acoustic models based on different pronunciation dictionaries and input features. N-best hypotheses were rescored with 3 neural network language models (NNLMs) trained on both words and sub-word units. NNLMs were also explored for out-of-vocabulary (OOV) words handling by means of artificial texts generation. The final system achieved Word Error Rate (WER) of 14.7% on the evaluation data, which is the best result in the challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tze Yuang Chong|AUTHOR Tze Yuang Chong]], [[Kye Min Tan|AUTHOR Kye Min Tan]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Chang Huai You|AUTHOR Chang Huai You]], [[Hanwu Sun|AUTHOR Hanwu Sun]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]
</p><p class="cpabstractcardaffiliationlist">A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2458–2462&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the development of the automatic speech recognition (ASR) system for the submission to the VOiCES from a Distance Challenge 2019. In this challenge, we focused on the fixed condition, where the task is to recognize reverberant and noisy speech based on a limited amount of clean training data. In our system, the mismatch between the training and testing conditions was reduced by using multi-style training where the training data was artificially contaminated with different reverberation and noise sources. Also, the Weighted Prediction Error (WPE) algorithm was used to reduce the reverberant effect in the evaluation data. To boost the system performance, acoustic models of different neural network architectures were trained and the respective systems were fused to give the final output. Moreover, an LSTM language model was used to rescore the lattice to compensate the weak n-gram model trained from only the transcription text. Evaluated on the development set, our system showed an average word error rate (WER) of 27.04%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Xue|AUTHOR Wei Xue]]^^1^^, [[Ying Tong|AUTHOR Ying Tong]]^^1^^, [[Guohong Ding|AUTHOR Guohong Ding]]^^1^^, [[Chao Zhang|AUTHOR Chao Zhang]]^^2^^, [[Tao Ma|AUTHOR Tao Ma]]^^3^^, [[Xiaodong He|AUTHOR Xiaodong He]]^^1^^, [[Bowen Zhou|AUTHOR Bowen Zhou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JD.com, China; ^^2^^JD.com, UK; ^^3^^JD.com, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2693–2697&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sound source localization (SSL) is challenging in presence of reverberation since the cross-correlation between the direct-path signals in different microphones, which indicates the spatial information of the sound source, is interfered by the reverberation signal components. A novel algorithm is proposed in this paper to estimate the cross-correlation of the  direct-path speech signals, such that the robustness of SSL to reverberation can be improved. The proposed method follows a similar scheme to the multichannel linear prediction (MCLP), which is commonly used for speech dereverberation, while avoids the explicit estimation of the direct-path signal of each channel. This is achieved by revealing the relationship between the direct-path signal cross-correlation (DPCC) and the MCLP coefficient vector, and finally deriving the DPCC by using only the multichannel reverberant signals. It is also shown that the pre-whitening operation, which is widely used for SSL, can be inherently integrated into the estimated DPCC. An adaptive method is further derived to facilitate online frame-level SSL. The proposed method can be easily applied to conventional cross-correlation based SSL methods by using the DPCC rather than the full cross-correlation. Experiments conducted in various reverberant conditions demonstrate the effectiveness of the proposed method.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[François Grondin|AUTHOR François Grondin]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2698–2702&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces a modification of phase transform on singular value decomposition (SVD-PHAT) to localize multiple sound sources. This work aims to improve localization accuracy and keeps the algorithm complexity low for real-time applications. This method relies on multiple scans of the search space, with projection of each low-dimensional observation onto orthogonal subspaces. We show that this method localizes multiple sound sources more accurately than discrete SRP-PHAT, with a reduction in the Root Mean Square Error up to 0.0395 radians.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Ying Zhou|AUTHOR Ying Zhou]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">Shanghai Jiao Tong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2703–2707&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the scenario with noise and reverberation, the performance of current methods for direction of arrival (DOA) estimation usually degrades significantly. Inspired by the success of time-frequency masking in speech enhancement and speech separation, this paper proposes new methods to better utilize time-frequency masking in convolution neural network to improve the robustness of localization. First a mask estimation network is developed to assist DOA estimation by either appending or multiplying the estimated masks to the original input feature. Then we further propose a multi-task learning architecture to optimize the mask and DOA estimation networks jointly, and two modes are designed and compared. Experiments show that all the proposed methods have better robustness and generalization in noisy and reverberant conditions compared to the conventional methods, and the multi-task methods have the best performance among all approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yoshiki Masuyama|AUTHOR Yoshiki Masuyama]]^^1^^, [[Masahito Togami|AUTHOR Masahito Togami]]^^2^^, [[Tatsuya Komatsu|AUTHOR Tatsuya Komatsu]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Waseda University, Japan; ^^2^^LINE, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2708–2712&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose two mask-based beamforming methods using a deep neural network (DNN) trained by multichannel loss functions. Beamforming technique using time-frequency (TF)-masks estimated by a DNN have been applied to many applications where TF-masks are used for estimating spatial covariance matrices. To train a DNN for mask-based beamforming, loss functions designed for monaural speech enhancement/separation have been employed. Although such a training criterion is simple, it does not directly correspond to the performance of mask-based beamforming. To overcome this problem, we use multichannel loss functions which evaluate the estimated spatial covariance matrices based on the multichannel Itakura–Saito divergence. DNNs trained by the multichannel loss functions can be applied to construct several beamformers. Experimental results confirmed their effectiveness and robustness to microphone configurations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guanjun Li|AUTHOR Guanjun Li]]^^1^^, [[Shan Liang|AUTHOR Shan Liang]]^^1^^, [[Shuai Nie|AUTHOR Shuai Nie]]^^1^^, [[Wenju Liu|AUTHOR Wenju Liu]]^^1^^, [[Meng Yu|AUTHOR Meng Yu]]^^2^^, [[Lianwu Chen|AUTHOR Lianwu Chen]]^^3^^, [[Shouye Peng|AUTHOR Shouye Peng]]^^4^^, [[Changliang Li|AUTHOR Changliang Li]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^Tencent, USA; ^^3^^Tencent, China; ^^4^^Xueersi Online School, China; ^^5^^Kingsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2713–2717&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>SpeakerBeam is a state-of-the-art method for extracting a speech signal of target speaker from a mixture using an adaption utterance. The existing multi-channel SpeakerBeam utilizes the spectral features of the signals with the ignorance of the spatial discriminability of the multi-channel processing. In this paper, we tightly integrate spectral and spatial information for target speaker extraction. In the proposed scheme, a multi-channel mixture signal is firstly filtered into a set of beamformed signals using fixed beam patterns. An attention network is then designed to identify the direction of the target speaker and to combine the beamformed signals into an enhanced signal dominated by the target speaker energy. Further, SpeakerBeam inputs the enhanced signal and outputs the mask of the target speaker. Finally, the attention network and SpeakerBeam are jointly trained. Experimental results demonstrate that the proposed scheme largely improves the existing multi-channel SpeakerBeam in low signal-to-interference ratio or same-gender scenarios.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2718–2722&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, with the advent of deep learning, there has been significant progress in the processing of speech mixtures. In particular, the use of neural networks has enabled target speech extraction, which extracts speech signal of a target speaker from a speech mixture by utilizing auxiliary clue representing the characteristics of the target speaker. For example, audio clues derived from an auxiliary utterance spoken by the target speaker have been used to characterize the target speaker. Audio clues should capture the fine-grained characteristic of the target speaker’s voice (e.g., pitch). Alternatively, visual clues derived from a video of the target speaker’s face speaking in the mixture have also been investigated. Visual clues should mainly capture the phonetic information derived from lip movements. In this paper, we propose a novel target speech extraction scheme that combines audio and visual clues about the target speaker to take advantage of the information provided by both modalities. We introduce an attention mechanism that emphasizes the most informative speaker clue at every time frame. Experiments on mixture of two speakers demonstrated that our proposed method using audio-visual speaker clues significantly improved the extraction performance compared with the conventional methods using either audio or visual speaker clues.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[François G. Germain|AUTHOR François G. Germain]]^^1^^, [[Qifeng Chen|AUTHOR Qifeng Chen]]^^2^^, [[Vladlen Koltun|AUTHOR Vladlen Koltun]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Stanford University, USA; ^^2^^HKUST, China; ^^3^^Intel, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2723–2727&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/1924" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an end-to-end deep learning approach to denoising speech signals by processing the raw waveform directly. Given input audio containing speech corrupted by an additive background signal, the system aims to produce a processed signal that contains only the speech content. Recent approaches have shown promising results using various deep network architectures. In this paper, we propose to train a fully-convolutional context aggregation network using a deep feature loss. That loss is based on comparing the internal feature activations in a different network, trained for audio classification. Our approach outperforms the state of the art in objective speech quality metrics and in large-scale perceptual experiments with human listeners. It also outperforms an identical network trained using traditional regression losses. The advantage of the new approach is particularly pronounced for the hardest data with the most intrusive background noise, for which denoising is most needed and most challenging. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Quan Wang|AUTHOR Quan Wang]]^^1^^, [[Hannah Muckenhirn|AUTHOR Hannah Muckenhirn]]^^2^^, [[Kevin Wilson|AUTHOR Kevin Wilson]]^^1^^, [[Prashant Sridhar|AUTHOR Prashant Sridhar]]^^1^^, [[Zelin Wu|AUTHOR Zelin Wu]]^^1^^, [[John R. Hershey|AUTHOR John R. Hershey]]^^3^^, [[Rif A. Saurous|AUTHOR Rif A. Saurous]]^^1^^, [[Ron J. Weiss|AUTHOR Ron J. Weiss]]^^1^^, [[Ye Jia|AUTHOR Ye Jia]]^^1^^, [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, USA; ^^2^^Idiap Research Institute, Switzerland; ^^3^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2728–2732&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a novel system that separates the voice of a target speaker from multi-speaker signals, by making use of a reference signal from the target speaker. We achieve this by training two separate neural networks: (1) A speaker recognition network that produces speaker-discriminative embeddings; (2) A spectrogram masking network that takes both noisy spectrogram and speaker embedding as input, and produces a mask. Our system significantly reduces the speech recognition WER on multi-speaker signals, with minimal WER degradation on single-speaker signals.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chien-Feng Liao|AUTHOR Chien-Feng Liao]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^, [[Xugang Lu|AUTHOR Xugang Lu]]^^2^^, [[Hisashi Kawai|AUTHOR Hisashi Kawai]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica, Taiwan; ^^2^^NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2733–2737&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In a noisy environment, a lossy speech signal can be automatically restored by a listener if he/she knows the language well. That is, with the built-in knowledge of a “language model”, a listener may effectively suppress noise interference and retrieve the target speech signals. Accordingly, we argue that familiarity with the underlying linguistic content of spoken utterances benefits speech enhancement (SE) in noisy environments. In this study, in addition to the conventional modeling for learning the acoustic noisy-clean speech mapping, an abstract symbolic sequential modeling is incorporated into the SE framework. This symbolic sequential modeling can be regarded as a “linguistic constraint” in learning the acoustic noisy-clean speech mapping function. In this study, the symbolic sequences for acoustic signals are obtained as discrete representations with a Vector Quantized Variational Autoencoder algorithm. The obtained symbols are able to capture high-level phoneme-like content from speech signals. The experimental results demonstrate that the proposed framework can obtain notable performance improvement in terms of perceptual evaluation of speech quality (PESQ) and short-time objective intelligibility (STOI) on the TIMIT dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pejman Mowlaee|AUTHOR Pejman Mowlaee]]^^1^^, [[Daniel Scheran|AUTHOR Daniel Scheran]]^^2^^, [[Johannes Stahl|AUTHOR Johannes Stahl]]^^2^^, [[Sean U.N. Wood|AUTHOR Sean U.N. Wood]]^^2^^, [[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Widex, Denmark; ^^2^^Technische Universität Graz, Austria; ^^3^^Victoria University of Wellington, New Zealand</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2738–2742&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While the acoustic frequency domain has been widely used for speech enhancement, usage of the modulation domain is less common. In this paper, we investigate single-channel speech enhancement in the recently proposed Double Spectrum (DS) framework and provide insights on the statistical properties of speech and noise in the DS domain. Relying on our statistical analysis in the DS, we derive a maximum a posteriori estimator of speech in the DS domain. By means of experiments, we evaluate the speech enhancement performance of the proposed method and relevant benchmarks in the acoustic frequency and modulation domains and show that the proposed method achieves a good balance between noise attenuation and speech distortion for various SNRs and noise types.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jian Yao|AUTHOR Jian Yao]], [[Ahmad Al-Dahle|AUTHOR Ahmad Al-Dahle]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2743–2747&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose the coarse-to-fine optimization for the task of speech enhancement. Cosine similarity loss [1] has proven to be an effective metric to measure similarity of speech signals. However, due to the large variance of the enhanced speech with even the same cosine similarity loss in high dimensional space, a deep neural network learnt with this loss might not be able to predict enhanced speech with good quality. Our coarse-to-fine strategy optimizes the cosine similarity loss for different granularities so that more constraints are added to the prediction from high dimension to relatively low dimension. In this way, the enhanced speech will better resemble the clean speech. Experimental results show the effectiveness of our proposed coarse-to-fine optimization in both discriminative models and generative models. Moreover, we apply the coarse-to-fine strategy to the adversarial loss in generative adversarial network (GAN) and propose dynamic perceptual loss, which dynamically computes the adversarial loss from coarse resolution to fine resolution. Dynamic perceptual loss further improves the accuracy and achieves state-of-the-art results compared with other generative models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Like Hui|AUTHOR Like Hui]], [[Siyuan Ma|AUTHOR Siyuan Ma]], [[Mikhail Belkin|AUTHOR Mikhail Belkin]]
</p><p class="cpabstractcardaffiliationlist">Ohio State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2748–2752&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We apply a fast kernel method for mask-based single-channel speech enhancement. Specifically, our method solves a kernel regression problem associated to a non-smooth kernel function (exponential power kernel) with a highly efficient iterative method (EigenPro). Due to the simplicity of this method, its hyper-parameters such as kernel bandwidth can be automatically and efficiently selected using line search with subsamples of training data. We observe an empirical correlation between the regression loss (mean square error) and regular metrics for speech enhancement. This observation justifies our training target and motivates us to achieve lower regression loss by training separate kernel models for different frequency subbands. We compare our method with the state-of-the-art deep neural networks on mask-based HINT and TIMIT. Experimental results show that our kernel method consistently outperforms deep neural networks while requiring less training time.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Florian Metze|AUTHOR Florian Metze]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>Human information processing is inherently multimodal. Speech and language are therefore best processed and generated in a situated context. Future human language technologies must be able to jointly process multimodal data, and not just text, images, acoustics or speech in isolation. Despite advances in Computer Vision, Automatic Speech Recognition, Multimedia Analysis and Natural Language Processing, state-of-the-art computational models are not integrating multiple modalities nowhere near as effectively and efficiently as humans. Researchers are only beginning to tackle these challenges in “vision and language” research. In this talk, I will show the potential of multi-modal processing to (1) improve recognition for challenging conditions (i.e. lip-reading), (2) adapt models to new conditions (i.e. context or personalization), (3) ground semantics across modalities or languages (i.e. translation and language acquisition), (4) training models with weak or non-existent labels (i.e. SoundNet or bootstrapping of recognizers without parallel data), and (5) make models interpretable (i.e. representation learning). I will present and discuss significant recent research results from each of these areas and will highlight the commonalities and differences. I hope to stimulate exchange and cross-fertilization of ideas by presenting not just abstract concepts, but by pointing the audience to new and existing tasks, datasets, and challenges.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nilay Shrivastava|AUTHOR Nilay Shrivastava]]^^1^^, [[Astitwa Saxena|AUTHOR Astitwa Saxena]]^^1^^, [[Yaman Kumar|AUTHOR Yaman Kumar]]^^2^^, [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]]^^3^^, [[Amanda Stent|AUTHOR Amanda Stent]]^^4^^, [[Debanjan Mahata|AUTHOR Debanjan Mahata]]^^4^^, [[Preeti Kaur|AUTHOR Preeti Kaur]]^^1^^, [[Roger Zimmermann|AUTHOR Roger Zimmermann]]^^5^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NSUT, India; ^^2^^Adobe, India; ^^3^^IIIT Delhi, India; ^^4^^Bloomberg, USA; ^^5^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2753–2757&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Visual speech recognition (VSR) is the task of recognizing spoken language from video input only, without any audio. VSR has many applications as an assistive technology, especially if it could be deployed in mobile devices and embedded systems. The need for intensive computational resources and large memory footprint are two major obstacles in deploying neural network models for VSR in a resource constrained environment. We propose a novel end-to-end deep neural network architecture for word level VSR called MobiVSR with a design parameter that aids in balancing the model’s accuracy and parameter count. We use depthwise 3D convolution along with channel shuffling for the first time in the domain of VSR and show how it makes our model efficient. MobiVSR achieves an accuracy of 70% on a challenging Lip Reading in the Wild dataset with 6 times fewer parameters and 20 times smaller memory footprint than the current state of the art. MobiVSR can also be compressed to 6 MB by applying post training quantization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pujitha Appan Kandala|AUTHOR Pujitha Appan Kandala]], [[Abhinav Thanda|AUTHOR Abhinav Thanda]], [[Dilip Kumar Margam|AUTHOR Dilip Kumar Margam]], [[Rohith Chandrashekar Aralikatti|AUTHOR Rohith Chandrashekar Aralikatti]], [[Tanay Sharma|AUTHOR Tanay Sharma]], [[Sharad Roy|AUTHOR Sharad Roy]], [[Shankar M. Venkatesan|AUTHOR Shankar M. Venkatesan]]
</p><p class="cpabstractcardaffiliationlist">Samsung, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2758–2762&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Visual speech recognition or lip-reading suffers from high word error rate (WER) as lip-reading is based solely on articulators that are visible to the camera. Recent works mitigated this problem using complex architectures of deep neural networks. I-vector based speaker adaptation is a well known technique in ASR systems used to reduce WER on unseen speakers. In this work, we explore speaker adaptation of lip-reading models using latent identity vectors (visual i-vectors) obtained by factor analysis on visual features. In order to estimate the visual i-vectors, we employ two ways to collect sufficient statistics: first using GMM based universal background model (UBM) and second using RNN-HMM based UBM. The speaker-specific visual i-vector is given as an additional input to the hidden layers of the lip-reading model during train and test phases. On GRID corpus, use of visual i-vectors results in 15% and 10% relative improvements over current state of the art lip-reading architectures on unseen speakers using RNN-HMM and GMM based methods respectively. Furthermore, we explore the variation of WER with dimension of visual i-vectors, and with the amount of unseen speaker data required for visual i-vector estimation. We also report the results on Korean visual corpus that we created.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandros Koumparoulis|AUTHOR Alexandros Koumparoulis]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]
</p><p class="cpabstractcardaffiliationlist">University of Thessaly, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2763–2767&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent works in visual speech recognition utilize deep learning advances to improve accuracy. Focus however has been primarily on recognition performance, while ignoring the computational burden of deep architectures. In this paper we address these issues concurrently, aiming at both high computational efficiency and recognition accuracy in lipreading. For this purpose, we investigate the MobileNet convolutional neural network architectures, recently proposed for image classification. In addition, we extend the 2D convolutions of MobileNets to 3D ones, in order to better model the spatio-temporal nature of the lipreading problem. We investigate two architectures in this extension, introducing the temporal dimension as part of either the depthwise or the pointwise MobileNet convolutions. To further boost computational efficiency, we also consider using pointwise convolutions alone, as well as networks operating on half the mouth region. We evaluate the proposed architectures on speaker-independent visual-only continuous speech recognition on the popular TCD-TIMIT corpus. Our best system outperforms a baseline CNN by 4.27% absolute in word error rate and over 12 times in computational efficiency, whereas, compared to a state-of-the-art ResNet, it is 37 times more efficient at a minor 0.07% absolute error rate degradation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Leyuan Qu|AUTHOR Leyuan Qu]], [[Cornelius Weber|AUTHOR Cornelius Weber]], [[Stefan Wermter|AUTHOR Stefan Wermter]]
</p><p class="cpabstractcardaffiliationlist">Universität Hamburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2768–2772&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Lip reading, also known as visual speech recognition, has recently received considerable attention. Although advanced feature engineering and powerful deep neural network architectures have been proposed for this task, the performance still cannot be competitive with speech recognition tasks using the audio modality as input. This is mainly because compared with audio, visual features carry less information relevant to word recognition. For example, the voiced sound made while the vocal cords vibrate can be represented by audio but is not reflected by mouth or lip movement. In this paper, we map the sequence of mouth movement images directly to mel-spectrogram to reconstruct the speech relevant information. Our proposed architecture consists of two components: (a) the mel-spectrogram reconstruction front-end which includes an encoder-decoder architecture with attention mechanism to predict mel-spectrogram from videos; (b) the lip reading back-end consisting of convolutional layers, bi-directional gated recurrent units, and connectionist temporal classification loss, which consumes the generated mel-spectrogram representation to predict text transcriptions. The speaker-dependent evaluation results demonstrate that our proposed model not only generates quality mel-spectrograms but also outperforms state-of-the-art models on the GRID benchmark lip reading dataset, with 0.843% character error rate and 2.525% word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[David Rybach|AUTHOR David Rybach]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]], [[Wei Li|AUTHOR Wei Li]], [[Mirkó Visontai|AUTHOR Mirkó Visontai]], [[Qiao Liang|AUTHOR Qiao Liang]], [[Trevor Strohman|AUTHOR Trevor Strohman]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Ian McGraw|AUTHOR Ian McGraw]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]]
</p><p class="cpabstractcardaffiliationlist">Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2773–2777&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The requirements for many applications of state-of-the-art speech recognition systems include not only low word error rate (WER) but also low latency. Specifically, for many use-cases, the system must be able to decode utterances in a streaming fashion and faster than real-time. Recently, a streaming recurrent neural network transducer (RNN-T) end-to-end (E2E) model has shown to be a good candidate for on-device speech recognition, with improved WER and latency metrics compared to conventional on-device models [1]. However, this model still lags behind a large state-of-the-art conventional model in quality [2]. On the other hand, a non-streaming E2E Listen, Attend and Spell (LAS) model has shown comparable quality to large conventional models [3]. This work aims to bring the quality of an E2E streaming model closer to that of a conventional system by incorporating a LAS network as a second-pass component, while still abiding by latency constraints. Our proposed two-pass model achieves a 17%–22% relative reduction in WER compared to RNN-T alone and increases latency by a small fraction over RNN-T.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Max W.Y. Lam|AUTHOR Max W.Y. Lam]]^^1^^, [[Jun Wang|AUTHOR Jun Wang]]^^2^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^3^^, [[Helen Meng|AUTHOR Helen Meng]]^^3^^, [[Dan Su|AUTHOR Dan Su]]^^2^^, [[Dong Yu|AUTHOR Dong Yu]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tencent, China; ^^2^^Tencent, China; ^^3^^CUHK, China; ^^4^^Tencent, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2778–2782&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic speech recognition (ASR) in challenging conditions, such as in the presence of interfering speakers or music, remains an unsolved problem. This paper presents Extract, Adapt, and Recognize (EAR), an end-to-end neural network that allows fully learnable separation and recognition components towards optimizing the ASR criterion. In between a state-of-the-art speech separation module as an  extractor and an acoustic modeling module as a  recognizer, the EAR introduces an  adaptor, where adapted acoustic features are learned from the separation outputs using a bi-directional long short term memory network trained to minimize the recognition loss directly. Relative to a conventional joint training model, the EAR model can achieve 8.5% to 22.3%, and 1.2% to 26.9% word error rate reductions (WERR), under various dBs of music corruption and speaker interference respectively. With speaker tracing the WERR can be further promoted to 12.4% to 29.0%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Abhinav Garg|AUTHOR Abhinav Garg]], [[Kwangyoun Kim|AUTHOR Kwangyoun Kim]], [[Mehul Kumar|AUTHOR Mehul Kumar]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]
</p><p class="cpabstractcardaffiliationlist">Samsung, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2783–2787&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present a new hierarchical character to byte-pair encoding (C2B) end-to-end neural network architecture for improving the performance of attention based encoder-decoder ASR models. We explore different strategies for building the hierarchical C2B models such as building the individual blocks one at a time, as well as training the entire model as a monolith in a single step. We show that C2B model trained simultaneously with four losses, two for character and two for BPE sequences help regularize the learning of character sequences as well as BPE sequences. The proposed multi-task multi-resolution hierarchical architecture improves the WER of a small footprint bidirectional full-attention E2E model on the 960 hours LibriSpeech corpus by around 15% relative and is comparable to the state-of-the-art performance of an almost 3 times bigger model on the same dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kyu J. Han|AUTHOR Kyu J. Han]]^^1^^, [[Jing Huang|AUTHOR Jing Huang]]^^1^^, [[Yun Tang|AUTHOR Yun Tang]]^^1^^, [[Xiaodong He|AUTHOR Xiaodong He]]^^2^^, [[Bowen Zhou|AUTHOR Bowen Zhou]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^JD.com, USA; ^^2^^JD.com, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2788–2792&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In contrast to the huge success of self-attention based neural networks in various NLP tasks, the efficacy of self-attention in speech applications is yet limited. This is partly because the full effectiveness of the self-attention mechanism could not be achieved without proper down-sampling schemes in speech tasks. To address this issue, we propose a new self-attention mechanism suitable for speech recognition, namely,  multi-stride self-attention. The proposed multi-stride approach lets each group of heads in self-attention process speech frames with a unique stride over neighboring frames. Thus, the entire attention mechanism would not be confined in a fixed frame shift and can have diverse contextual views for a given frame to determine attention weights more effectively. To validate our proposal we evaluated it on various speech corpora for speech recognition, both English and Chinese, and observed a consistent improvement, especially in terms of substitution and deletion errors, without the increase of model complexity. The average WER improvement of 7.5% (relative) obtained by the TDNNs having the multi-stride self-attention layer as compared to the baseline TDNN model shows the effectiveness of the proposed multi-stride self-attention mechanism.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xurong Xie|AUTHOR Xurong Xie]], [[Shansong Liu|AUTHOR Shansong Liu]], [[Max W.Y. Lam|AUTHOR Max W.Y. Lam]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2793–2797&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Discriminative training techniques define state-of-the-art performance for deep neural networks (DNNs) based speech recognition systems across a wide range of tasks. Conventional discriminative training methods produce deterministic DNN parameter estimates. They are inherently prone to overfitting, leading to poor generalization when given limited training data. In order to address this issue, this paper investigates the use of Bayesian learning and Gaussian Process (GP) based hidden activations to replace the deterministic parameter estimates of standard lattice-free maximum mutual information (LF-MMI) criterion trained time delay neural network (TDNN) acoustic models. Experiments conducted on the Switchboard conversational telephone speech recognition tasks suggest the proposed technique consistently outperforms the baseline LF-MMI trained TDNN systems using fixed parameter hidden activations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Liang Lu|AUTHOR Liang Lu]], [[Eric Sun|AUTHOR Eric Sun]], [[Yifan Gong|AUTHOR Yifan Gong]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2798–2802&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose self-teaching networks to improve the generalization capacity of deep neural networks. The idea is to generate soft supervision labels using the output layer for training the lower layers of the network. During the network training, we seek an auxiliary loss that drives the lower layer to mimic the behavior of the output layer. The connection between the two network layers through the auxiliary loss can help the gradient flow, which works similar to the residual networks. Furthermore, the auxiliary loss also works as a regularizer, which improves the generalization capacity of the network. We evaluated the self-teaching network with deep recurrent neural networks on speech recognition tasks, where we trained the acoustic model using 30 thousand hours of data. We tested the acoustic model using data collected from 4 scenarios. We show that the self-teaching network can achieve consistent improvements and outperform existing methods such as label smoothing and confidence penalization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yuanchao Li|AUTHOR Yuanchao Li]]^^1^^, [[Tianyu Zhao|AUTHOR Tianyu Zhao]]^^2^^, [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Honda, Japan; ^^2^^Kyoto University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2803–2807&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Accurately recognizing emotion from speech is a necessary yet challenging task due to the variability in speech and emotion. In this paper, we propose a speech emotion recognition (SER) method using end-to-end (E2E) multitask learning with self attention to deal with several issues. First, we extract features directly from speech spectrogram instead of using traditional hand-crafted features to better represent emotion. Second, we adopt self attention mechanism to focus on the salient periods of emotion in speech utterances. Finally, giving consideration to mutual features between emotion and gender classification tasks, we incorporate gender classification as an auxiliary task by using multitask learning to share useful information with emotion classification task. Evaluation on IEMOCAP (a commonly used database for SER research) demonstrates that the proposed method outperforms the state-of-the-art methods and improves the overall accuracy by an absolute of 7.7% compared to the best existing result.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maximilian Schmitt|AUTHOR Maximilian Schmitt]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2808–2812&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion recognition in speech is a meaningful task in affective computing and human-computer interaction. As human emotion is a frequently changing state, it is usually represented as a densely sampled time series of emotional dimensions, typically arousal and valence. For this, recurrent neural network (RNN) architectures are employed by default when it comes to modelling the contours with deep learning approaches. However, the amount of temporal context required is questionable, and it has not yet been clarified whether the consideration of long-term dependencies is actually beneficial. In this contribution, we demonstrate that RNNs are not necessary to accomplish the task of time-continuous emotion recognition. Indeed, results gained indicate that deep neural networks incorporating less complex convolutional layers can provide more accurate models. We highlight the pros and cons of recurrent and non-recurrent approaches and evaluate our methods on the public SEWA database, which was used as a benchmark in the 2017 and 2018 editions of the Audio-Visual Emotion Challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anda Ouyang|AUTHOR Anda Ouyang]], [[Ting Dang|AUTHOR Ting Dang]], [[Vidhyasaharan Sethu|AUTHOR Vidhyasaharan Sethu]], [[Eliathamby Ambikairajah|AUTHOR Eliathamby Ambikairajah]]
</p><p class="cpabstractcardaffiliationlist">UNSW, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2813–2817&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech based continuous emotion prediction systems have predominantly been based on complex non-linear back-ends, with an increasing attention on long-short term memory recurrent neural networks. While this has led to accurate predictions, complex models may suffer from issues with interpretability, model selection and overfitting. In this paper, we demonstrate that a linear model can capture most of the relationship between speech features and emotion labels in the continuous arousal-valence space. Specifically, an autoregressive exogenous model (ARX) is shown to be an effective backend. This approach is validated on three commonly used databases, namely RECOLA, SEWA and USC CreativeIT, and shown to be comparable in terms of performance to state-of-the-art LSTM systems. More importantly, this approach allows for the use of well-established linear system theory to aid with model interpretability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Atsushi Ando|AUTHOR Atsushi Ando]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Hosana Kamiyama|AUTHOR Hosana Kamiyama]], [[Satoshi Kobashikawa|AUTHOR Satoshi Kobashikawa]], [[Yushi Aono|AUTHOR Yushi Aono]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2818–2822&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a novel speech emotion recognition method that addresses the ambiguous nature of emotions in speech. Most conventional methods assume there is only a single ground truth, the dominant emotion, though utterances can contain multiple emotions. In order to solve this problem, several methods that consider ambiguous emotions (e.g. soft-target training) have been proposed. Unfortunately, training them is difficult since they work by estimating the proportions of all emotions. The proposed method improves both frameworks by evaluating the presence or absence of each emotion. We expect that it is much easier to estimate just presence/absence of emotions rather than trying to determine proportions of each, and the deliberate assessment of emotion existence information will help to estimate the proportion of each or dominant class more precisely. The proposed method employs two-step training. Multi-Label Emotion Existence (MLEE) model is trained first to estimate whether each emotion is present or absent. Then, the dominant emotion recognition model with hard- or soft-target labels is trained by means of the intermediate outputs of the MLEE model so as to utilize cues of emotion existence for inferring the dominant. Experiments demonstrate that the proposed method outperforms both hard- or soft-target based conventional emotion recognition schemes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Cristina Gorrostieta|AUTHOR Cristina Gorrostieta]]^^1^^, [[Reza Lotfian|AUTHOR Reza Lotfian]]^^1^^, [[Kye Taylor|AUTHOR Kye Taylor]]^^1^^, [[Richard Brutti|AUTHOR Richard Brutti]]^^1^^, [[John Kane|AUTHOR John Kane]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Cogito, USA; ^^2^^Cogito, Ireland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2823–2827&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Machine learning can unintentionally encode and amplify negative bias and stereotypes present in humans, be they conscious or unconscious. This has led to high-profile cases where machine learning systems have been found to exhibit bias towards gender, race, and ethnicity, among other demographic categories. Negative bias can be encoded in these algorithms based on: the representation of different population categories in the training data; bias arising from manual human labeling of these data; as well as modeling types and optimisation approaches. In this paper we assess the effect of gender bias in speech emotion recognition and find that emotional activation model accuracy is consistently lower for female compared to male audio samples. Further, we demonstrate that a fairer and more consistent model accuracy can be achieved by applying a simple de-biasing training technique.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fang Bao|AUTHOR Fang Bao]], [[Michael Neumann|AUTHOR Michael Neumann]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]
</p><p class="cpabstractcardaffiliationlist">Universität Stuttgart, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2828–2832&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Cycle consistent adversarial networks (CycleGAN) have shown great success in image style transfer with unpaired datasets. Inspired by this, we investigate emotion style transfer to generate synthetic data, which aims at addressing the data scarcity problem in speech emotion recognition. Specifically, we propose a CycleGAN-based method to transfer feature vectors extracted from a large unlabeled speech corpus into synthetic features representing the given target emotions. We extend the CycleGAN framework with a classification loss which improves the discriminability of the generated data. To show the effectiveness of the proposed method, we present results for speech emotion recognition using the generated feature vectors as (i) augmentation of the training data, and (ii) as standalone training set. Our experimental results reveal that when utilizing synthetic feature vectors, the classification performance improves in within-corpus and cross-corpus evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bajibabu Bollepalli|AUTHOR Bajibabu Bollepalli]], [[Lauri Juvela|AUTHOR Lauri Juvela]], [[Paavo Alku|AUTHOR Paavo Alku]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2833–2837&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Currently, there is increasing interest to use sequence-to-sequence models in text-to-speech (TTS) synthesis with attention like that in Tacotron models. These models are end-to-end, meaning that they learn both co-articulation and duration properties directly from text and speech. Since these models are entirely data-driven, they need large amounts of data to generate synthetic speech of good quality. However, in challenging speaking styles, such as Lombard speech, it is difficult to record sufficiently large speech corpora. Therefore, we propose a transfer learning method to adapt a TTS system of normal speaking style to Lombard style. We also experiment with a WaveNet vocoder along with a traditional vocoder (WORLD) in the synthesis of Lombard speech. The subjective and objective evaluation results indicated that the proposed adaptation system coupled with the WaveNet vocoder clearly outperformed the conventional deep neural network based TTS system in the synthesis of Lombard speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shreyas Seshadri|AUTHOR Shreyas Seshadri]]^^1^^, [[Lauri Juvela|AUTHOR Lauri Juvela]]^^1^^, [[Paavo Alku|AUTHOR Paavo Alku]]^^1^^, [[Okko Räsänen|AUTHOR Okko Räsänen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Aalto University, Finland; ^^2^^Tampere University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2838–2842&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Lombard speech is a speaking style associated with increased vocal effort that is naturally used by humans to improve intelligibility in the presence of noise. It is hence desirable to have a system capable of converting speech from normal to Lombard style. Moreover, it would be useful if one could adjust the degree of Lombardness in the converted speech so that the system is more adaptable to different noise environments. In this study, we propose the use of recently developed Augmented cycle-consistent adversarial networks (Augmented CycleGANs) for conversion between normal and Lombard speaking styles. The proposed system gives a smooth control on the degree of Lombardness of the mapped utterances by traversing through different points in the latent space of the trained model. We utilize a parametric approach that uses the Pulse Model in Log domain (PML) vocoder to extract features from normal speech that are then mapped to Lombard-style features using the Augmented CycleGAN. Finally, the mapped features are converted to Lombard speech with PML. The model is trained on multi-language data recorded in different noise conditions, and we compare its effectiveness to a previously proposed CycleGAN system in experiments for intelligibility and quality of mapped speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guanlong Zhao|AUTHOR Guanlong Zhao]], [[Shaojin Ding|AUTHOR Shaojin Ding]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]
</p><p class="cpabstractcardaffiliationlist">Texas A&M University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2843–2847&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Methods for foreign accent conversion (FAC) aim to generate speech that sounds similar to a given non-native speaker but with the accent of a native speaker. Conventional FAC methods borrow excitation information (F0 and aperiodicity; produced by a conventional vocoder) from a reference (i.e., native) utterance during synthesis time. As such, the generated speech retains some aspects of the voice quality of the native speaker. We present a framework for FAC that eliminates the need for conventional vocoders (e.g., STRAIGHT, World) and therefore the need to use the native speaker’s excitation. Our approach uses an acoustic model trained on a native speech corpus to extract speaker-independent phonetic posteriorgrams (PPGs), and then train a speech synthesizer to map PPGs from the non-native speaker into the corresponding spectral features, which in turn are converted into the audio waveform using a high-quality neural vocoder. At runtime, we drive the synthesizer with the PPG extracted from a native reference utterance. Listening tests show that the proposed system produces speech that sounds more clear, natural, and similar to the non-native speaker compared with a baseline system, while significantly reducing the perceived foreign accent of non-native utterances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Jacob Sager|AUTHOR Jacob Sager]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2848–2852&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We introduce a new model for emotion conversion in speech based on highway neural networks. Our model uses the contextual pitch, energy and spectral information of a source emotional utterance to predict the framewise fundamental frequency and signal intensity under a target emotion. We also incorporate a latent gender representation to promote cross-speaker generalizability. Our neural network is trained to maximize the error log-likelihood under an assumed Laplacian distribution. We validate our model on the VESUS repository collected at Johns Hopkins University, which contains parallel emotional utterances from 10 actors across 5 emotional classes. The proposed algorithm outperforms three state-of-the-art baselines in terms of the mean absolute error and correlation between the predicted and target values. We evaluate the quality of our emotion manipulations via crowd-sourcing. Finally, we apply our emotion morphing model to utterances generated by Wavenet to demonstrate our unique ability to inject emotion into synthetic speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Itshak Lapidot|AUTHOR Itshak Lapidot]]^^1^^, [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Afeka College, Israel; ^^2^^LIA (EA 4128), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2853–2857&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the context of detection of speaker recognition identity impersonation, we observed that the waveform  probability mass function (PMF) of genuine speech differs from significantly of of PMF from identity theft extracts. This is true for synthesized or converted speech as well as for replayed speech. In this work, we mainly ask whether this observation has a significant impact on spoofing detection performance. In a second step, we want to reduce the distribution gap of waveforms between authentic speech and spoofing speech. We propose a  genuinization of the spoofing speech (by analogy with  Gaussianisation), i.e. to obtain spoofing speech with a PMF close to the PMF of genuine speech. Our  genuinization is evaluated on ASVspoof 2019 challenge datasets, using the baseline system provided by the challenge organization. In the case of  constant Q cepstral coefficients (CQCC) features, the  genuinization leads to a degradation of the baseline system performance by a factor of 10, which shows a potentially large impact of the distribution os waveforms on spoofing detection performance. However, by “playing” with all configurations, we also observed different behaviors, including performance improvements in specific cases. This leads us to conclude that waveform distribution plays an important role and must be taken into account by anti-spoofing systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jian Gao|AUTHOR Jian Gao]]^^1^^, [[Deep Chakraborty|AUTHOR Deep Chakraborty]]^^2^^, [[Hamidou Tembine|AUTHOR Hamidou Tembine]]^^1^^, [[Olaitan Olaleye|AUTHOR Olaitan Olaleye]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^New York University, USA; ^^2^^UMass Amherst, USA; ^^3^^Signify, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2858–2862&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose a nonparallel data-driven emotional speech conversion method. It enables the transfer of emotion-related characteristics of a speech signal while preserving the speaker’s identity and linguistic content. Most existing approaches require parallel data and time alignment, which is not available in many real applications. We achieve nonparallel training based on an unsupervised style transfer technique, which learns a translation model between two distributions instead of a deterministic one-to-one mapping between paired examples. The conversion model consists of an encoder and a decoder for each emotion domain. We assume that the speech signal can be decomposed into an emotion-invariant content code and an emotion-related style code in latent space. Emotion conversion is performed by extracting and recombining the content code of the source speech and the style code of the target emotion. We tested our method on a nonparallel corpora with four emotions. The evaluation results show the effectiveness of our approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Themos Stafylakis|AUTHOR Themos Stafylakis]]^^1^^, [[Johan Rohdin|AUTHOR Johan Rohdin]]^^2^^, [[Oldřich Plchot|AUTHOR Oldřich Plchot]]^^2^^, [[Petr Mizera|AUTHOR Petr Mizera]]^^1^^, [[Lukáš Burget|AUTHOR Lukáš Burget]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Omilia, Greece; ^^2^^Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2863–2867&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Contrary to i-vectors, speaker embeddings such as x-vectors are incapable of leveraging unlabelled utterances, due to the classification loss over training speakers. In this paper, we explore an alternative training strategy to enable the use of unlabelled utterances in training. We propose to train speaker embedding extractors via reconstructing the frames of a target speech segment, given the inferred embedding of another speech segment of the same utterance. We do this by attaching to the standard speaker embedding extractor a decoder network, which we feed not merely with the speaker embedding, but also with the estimated phone sequence of the target frame sequence.

The reconstruction loss can be used either as a single objective, or be combined with the standard speaker classification loss. In the latter case, it acts as a regularizer, encouraging generalizability to speakers unseen during training. In all cases, the proposed architectures are trained from scratch and in an end-to-end fashion. We demonstrate the benefits from the proposed approach on the VoxCeleb and Speakers in the Wild Databases, and we report notable improvements over the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andreas Nautsch|AUTHOR Andreas Nautsch]]^^1^^, [[Jose Patino|AUTHOR Jose Patino]]^^1^^, [[Amos Treiber|AUTHOR Amos Treiber]]^^2^^, [[Themos Stafylakis|AUTHOR Themos Stafylakis]]^^3^^, [[Petr Mizera|AUTHOR Petr Mizera]]^^3^^, [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]^^1^^, [[Thomas Schneider|AUTHOR Thomas Schneider]]^^2^^, [[Nicholas Evans|AUTHOR Nicholas Evans]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EURECOM, France; ^^2^^Technische Universität Darmstadt, Germany; ^^3^^Omilia, Greece</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2868–2872&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In many voice biometrics applications there is a requirement to preserve privacy, not least because of the recently enforced General Data Protection Regulation (GDPR). Though progress in bringing privacy preservation to voice biometrics is lagging behind developments in other biometrics communities, recent years have seen rapid progress, with secure computation mechanisms such as homomorphic encryption being applied successfully to speaker recognition. Even so, the computational overhead incurred by processing speech data in the encrypted domain is substantial. While still tolerable for single biometric comparisons, most state-of-the-art systems perform some form of cohort-based score normalisation, requiring  many thousands of biometric comparisons. The computational overhead is then prohibitive, meaning that one must accept either degraded performance (no score normalisation) or potential for privacy violations. This paper proposes the first computationally feasible approach to privacy-preserving cohort score normalisation. Our solution is a cohort pruning scheme based on secure multi-party computation which enables privacy-preserving score normalisation using probabilistic linear discriminant analysis (PLDA) comparisons. The solution operates upon binary voice representations. While the binarisation is lossy in biometric rank-1 performance, it supports computationally-feasible biometric rank-n comparisons in the encrypted domain.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Liu|AUTHOR Yi Liu]], [[Liang He|AUTHOR Liang He]], [[Jia Liu|AUTHOR Jia Liu]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2873–2877&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In neural network based speaker verification, speaker embedding is expected to be discriminative between speakers while the intra-speaker distance should remain small. A variety of loss functions have been proposed to achieve this goal. In this paper, we investigate the large margin softmax loss with different configurations in speaker verification. Ring loss and minimum hyperspherical energy criterion are introduced to further improve the performance. Results on VoxCeleb show that our best system outperforms the baseline approach by 15% in EER, and by 13%, 33% in minDCF08 and minDCF10, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amirhossein Hajavi|AUTHOR Amirhossein Hajavi]], [[Ali Etemad|AUTHOR Ali Etemad]]
</p><p class="cpabstractcardaffiliationlist">Queen’s University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2878–2882&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Today’s interactive devices such as smart-phone assistants and smart speakers often deal with short-duration speech segments. As a result, speaker recognition systems integrated into such devices will be much better suited with models capable of performing the recognition task with short-duration utterances. In this paper, a new deep neural network, UtterIdNet, capable of performing speaker recognition with short speech segments is proposed. Our proposed model utilizes a novel architecture that makes it suitable for short-segment speaker recognition through an efficiently increased use of information in short speech segments. UtterIdNet has been trained and tested on the VoxCeleb datasets, the latest benchmarks in speaker recognition. Evaluations for different segment durations show consistent and stable performance for short segments, with significant improvement over the previous models for segments of 2 seconds, 1 second, and especially sub-second durations (250 ms and 500 ms).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jianfeng Zhou|AUTHOR Jianfeng Zhou]], [[Tao Jiang|AUTHOR Tao Jiang]], [[Zheng Li|AUTHOR Zheng Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]
</p><p class="cpabstractcardaffiliationlist">Xiamen University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2883–2887&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In speaker verification, the convolutional neural networks (CNN) have been successfully leveraged to achieve a great performance. Most of the models based on CNN primarily focus on learning the distinctive speaker embedding from the horizontal direction (time-axis). However, the feature relationship between channels is usually neglected. In this paper, we firstly aim toward an alternate direction of recalibrating the channel-wise features by introducing the recently proposed “squeeze-and-excitation” (SE) module for image classification. We effectively incorporate the SE blocks in the deep residual networks (ResNet-SE) and demonstrate a slightly improvement on VoxCeleb corpuses. Additionally, we propose a new loss function, namely additive supervision softmax (AS-Softmax), to make full use of the prior knowledge of the mis-classified samples at training stage by imposing more penalty on the mis-classified samples to regularize the training process. The experimental results on VoxCeleb corpuses demonstrate that the proposed loss could further improve the performance of speaker system, especially on the case that the combination of the ResNet-SE and the AS-Softmax.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suwon Shon|AUTHOR Suwon Shon]], [[Hao Tang|AUTHOR Hao Tang]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2888–2892&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose VoiceID loss, a novel loss function for training a speech enhancement model to improve the robustness of speaker verification. In contrast to the commonly used loss functions for speech enhancement such as the L2 loss, the VoiceID loss is based on the feedback from a speaker verification model to generate a ratio mask. The generated ratio mask is multiplied pointwise with the original spectrogram to filter out unnecessary components for speaker verification. In the experiments, we observed that the enhancement network, after training with the VoiceID loss, is able to ignore a substantial amount of time-frequency bins, such as those dominated by noise, for verification. The resulting model consistently improves the speaker verification system on both clean and noisy conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anderson R. Avila|AUTHOR Anderson R. Avila]]^^1^^, [[Jahangir Alam|AUTHOR Jahangir Alam]]^^2^^, [[Douglas O’Shaughnessy|AUTHOR Douglas O’Shaughnessy]]^^1^^, [[Tiago H. Falk|AUTHOR Tiago H. Falk]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^INRS-EMT, Canada; ^^2^^CRIM, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2893–2897&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recently, automatic speaker verification (ASV) systems have been acknowledged to be vulnerable to replay attacks. Multiple efforts have been taken by the research community to improve ASV robustness. In this paper, we propose a replay attack countermeasure based on the blind estimation of the magnitude of channel responses. For that, the log-spectrum average of the clean speech signal is predicted from a Gaussian mixture model (GMM) of RASTA filtered mel-frequency cepstral coefficients (MFCCs) trained on clean speech. The magnitude response of the channel is obtained by subtracting the log-spectrum of the observed signal from the predicted log-spectrum average of the clean signal. Two datasets are used in our experiments: (1) the TIMIT dataset, which is used to train the log-spectrum average of the clean signal; and (2) a dataset containing replay attacks used during the second Automatic Speaker Verification Spoofing and Countermeasures Challenge (ASVspoof 2017). Performance is compared to two benchmarks. The discrete Fourier transform power spectral (DFTspec) and the constant Q cepstral coefficients (CQCCs). Results show the proposed method outperforming the two benchmarks in most scenarios with equal error rate (EER) as low as 6.87% when testing on the development set and as low as 11.28% on the evaluation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hongji Wang|AUTHOR Hongji Wang]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]
</p><p class="cpabstractcardaffiliationlist">Shanghai Jiao Tong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2938–2942&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Replay spoofing attacks are a major threat for speaker verification systems. Although many anti-spoofing systems or countermeasures are proposed to detect dataset-specific replay attacks with promising performance, they generalize poorly when applied on unseen datasets. In this work, the cross-dataset scenario is treated as a domain-mismatch problem and dealt with using a domain adversarial training framework. Compared with previous approaches, features learned from this newly-designed architecture are more discriminative for spoofing detection, but more indistinguishable across different domains. Only labeled source-domain data and unlabeled target-domain data are required during the adversarial training process, which can be regarded as unsupervised domain adaptation. Experiments on the ASVspoof 2017 V.2 dataset as well as the physical access condition part of BTAS 2016 dataset demonstrate that a significant EER reduction of over relative 30% can be obtained after applying the proposed domain adversarial training framework. It is shown that our proposed model can benefit from a large amount of unlabeled target-domain training data to improve detection accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[A. Kanagasundaram|AUTHOR A. Kanagasundaram]]^^1^^, [[S. Sridharan|AUTHOR S. Sridharan]]^^2^^, [[G. Sriram|AUTHOR G. Sriram]]^^3^^, [[S. Prachi|AUTHOR S. Prachi]]^^3^^, [[C. Fookes|AUTHOR C. Fookes]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Jaffna, Sri Lanka; ^^2^^QUT, Australia; ^^3^^Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2943–2947&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The aim of this work is to gain insights into how the deep neural network (DNN) models should be trained for short utterance evaluation conditions in an x-vector based speaker verification system. The study suggests that the speaker embedding can be extracted with reduced dimensions for short utterance evaluation conditions. When the speaker embedding is extracted from deeper layer which has lower dimension, the x-vector system achieves 14% relative improvement over baseline approach on EER on NIST2010 5sec-5sec truncated conditions. We surmise that since short utterances have less phonetic information speaker discriminative x-vectors can be extracted from a deeper layer of the DNN which captures less phonetic information. Another interesting finding is that the x-vector system achieves 5% relative improvement on NIST2010 5sec-5sec evaluation condition when the back-end PLDA is trained using short utterance development data. The results confirms the intuitive expectation that duration of development utterances and the duration of evaluation utterances should be matched. Finally, for the duration mismatch condition, we propose a variance normalization approach for PLDA training that provides a 4% relative improvement on EER over baseline approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nanxin Chen|AUTHOR Nanxin Chen]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Najim Dehak|AUTHOR Najim Dehak]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2948–2952&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, a novel neural network layer is proposed to combine frame-level into utterance-level representations for speaker modeling. We followed the assumption that the frame-level outputs of the speaker embedding (a.k.a x-vector) encoder are multi-modal. Therefore, we modeled the frame-level information as a mixture of factor analyzers with latent variable (utterance embedding) tied across frames and mixture components, in as similar way as in the i-vector approach. We denote this layer as Tied Mixture of Factor Analyzers (TMFA) layer. The optimal value of the embedding is obtained by minimizing the reconstruction error of the frame-level representations given the embedding and the TMFA model parameters. However, the TMFA layer parameters (factor loading matrices, means and precisions) were trained with cross-entropy loss as the rest of parameters of the network. We experimented on the Speaker Recognition Evaluation 2016 Cantonese as well as in the Speaker in the Wild datasets. The proposed pooling layer improved w.r.t. mean plus standard deviation pooling — standard in x-vector approach — in most of the conditions evaluated; and obtained competitive performance w.r.t. the recently proposed learnable dictionary encoding pooling method, which also assumes multi-modal frame-level representations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Buddhi Wickramasinghe|AUTHOR Buddhi Wickramasinghe]], [[Eliathamby Ambikairajah|AUTHOR Eliathamby Ambikairajah]], [[Julien Epps|AUTHOR Julien Epps]]
</p><p class="cpabstractcardaffiliationlist">UNSW, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2953–2957&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Development of generalizable countermeasures for replay spoofing attacks on Automatic Speaker Verification (ASV) systems is still an open problem. Many countermeasures to date utilize bandpass filters to extract a variety of frequency band-based features. This paper proposes the use of adaptive bandpass filters, a concept adopted from human cochlear modelling to improve detection performance. Gains of filters used for subband based feature extraction are adaptively adjusted by varying their Q factors (Quality factor) as a function of input signal level to boost low amplitude signal components and improve the front-end’s sensitivity to them. This method is used to enhance information embedded in speech signals such as device channel effects which could be instrumental in distinguishing genuine speech signals from replayed ones. Three features extracted using the adaptive filter process yielded performance improvements over other auditory concepts-based baselines, showing the potential of using an adaptive filter mechanism for replay spoofing attack detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pierre-Michel Bousquet|AUTHOR Pierre-Michel Bousquet]], [[Mickael Rouvier|AUTHOR Mickael Rouvier]]
</p><p class="cpabstractcardaffiliationlist">LIA (EA 4128), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2958–2962&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Current speaker recognition systems, that are learned by using wide training datasets and include sophisticated modelings, turn out to be very specific, providing sometimes disappointing results in real-life applications. Any shift between training and test data, in terms of device, language, duration, noise or other tends to degrade accuracy of speaker detection. This study investigates unsupervised domain adaptation,when only a scarce and unlabeled “in-domain” development dataset is available. Details and relevance of different approaches are described and commented, leading to a new robust method that we call feature-Distribution Adaptor. Efficiency of the proposed technique is experimentally validated on the recent NIST 2016 and 2018 Speaker Recognition Evaluation datasets.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Suwon Shon|AUTHOR Suwon Shon]]^^1^^, [[Younggun Lee|AUTHOR Younggun Lee]]^^2^^, [[Taesu Kim|AUTHOR Taesu Kim]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^Neosapience, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2963–2967&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes a fast speaker search system to retrieve segments of the same voice identity in the large-scale data. A recent study shows that Locality Sensitive Hashing (LSH) enables quick retrieval of a relevant voice in the large-scale data in conjunction with i-vector while maintaining accuracy. In this paper, we proposed Random Speaker-variability Subspace (RSS) projection to map a data into LSH based hash tables. We hypothesized that rather than projecting on completely random subspace without considering data, projecting on randomly generated speaker variability space would give more chance to put the same speaker representation into the same hash bins, so we can use less number of hash tables. Multiple RSS can be generated by randomly selecting a subset of speakers from a large speaker cohort. From the experimental result, the proposed approach shows 100 times and 7 times faster than the linear search and LSH, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ankur T. Patil|AUTHOR Ankur T. Patil]]^^1^^, [[Rajul Acharya|AUTHOR Rajul Acharya]]^^1^^, [[Pulikonda Aditya Sai|AUTHOR Pulikonda Aditya Sai]]^^2^^, [[Hemant A. Patil|AUTHOR Hemant A. Patil]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^DA-IICT, India; ^^2^^IIIT Vadodara, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2898–2902&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Replay attack poses significant threat to Automatic Speaker Verification (ASV) system among various spoofing attacks, as it is easily accessible by low cost and high quality recording and playback devices. This paper presents a novel feature set, i.e., Cochlear Filter Cepstral Coefficient Instantaneous Frequency using Energy Separation Algorithm (CFCCIF-ESA) to develop countermeasure against replay spoofing attacks. Experimental results on ASVspoof 2017 Version 2.0 database reveal that the proposed CFCCIF-ESA performs better than the earlier proposed CFCCIF (using analytic signal generation via Hilbert transform) feature set. This is because ESA uses extremely short window to estimate instantaneous frequency being able to adapt during speech transitions across phonemes. Experiments are performed using Gaussian Mixture Model (GMM) as a classifier. Baseline Constant Q Cepstral Coefficient (CQCC) performs slightly better than CFCCIF-ESA on development set (i.e., 12.47% and 12.98% Equal Error Rate (EER) for CQCC and CFCCIF-ESA, respectively). However, contrasting results on evaluation set (i.e., 18.81% and 14.77% EER for CQCC and CFCCIF-ESA, respectively) indicates that the proposed CFCCIF-ESA gives relatively better performance for unseen attacks in evaluation data. Also, the proposed feature set gives an EER of 11.56% and 13.26% on development and evaluation dataset when fused with state-of-the-art Mel Frequency Cepstral Coefficient (MFCC).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Victoria Mingote|AUTHOR Victoria Mingote]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2903–2907&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Currently, most Speaker Verification (SV) systems based on neural networks use Cross-Entropy and/or Triplet loss functions. Despite these functions provide competitive results, they might not fully exploit the system performance, because they are not designed to optimize the verification task considering the performance measures, e.g. the Detection Cost Function (DCF) or the Equal Error Rate (EER). This paper proposes a first approach to this issue through the optimization of a loss function based on the DCF. This mechanism allows the end-to-end system to directly manage the threshold used to compute the ratio between the False Rejection Rate (FRR) and the False Acceptance Rate (FAR). This way connecting the system training directly to the operating point. Results in a text-dependent speaker verification framework, based on neural network super-vectors over the RSR2015 dataset, outperform reference systems using Cross-Entropy and Triplet loss, as well as our previously proposal based on an approximation of the Area Under the Curve ( aAUC).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lei Fan|AUTHOR Lei Fan]], [[Qing-Yuan Jiang|AUTHOR Qing-Yuan Jiang]], [[Ya-Qi Yu|AUTHOR Ya-Qi Yu]], [[Wu-Jun Li|AUTHOR Wu-Jun Li]]
</p><p class="cpabstractcardaffiliationlist">Nanjing University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2908–2912&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker identification and retrieval have been widely used in real applications. To overcome the inefficiency problem caused by real-valued representations, there have appeared some speaker hashing methods for speaker identification and retrieval by learning binary codes as representations. However, these hashing methods are based on i-vector and cannot achieve satisfactory retrieval accuracy as they cannot learn discriminative feature representations. In this paper, we propose a novel deep hashing method, called deep additive margin hashing (DAMH), to improve retrieval performance for speaker identification and retrieval task. Compared with existing speaker hashing methods, DAMH can perform feature learning and binary code learning seamlessly by incorporating these two procedures into an end-to-end architecture. Experimental results on a large-scale audio dataset VoxCeleb2 show that DAMH can outperform existing speaker hashing methods to achieve state-of-the-art performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mirko Marras|AUTHOR Mirko Marras]]^^1^^, [[Paweł Korus|AUTHOR Paweł Korus]]^^2^^, [[Nasir Memon|AUTHOR Nasir Memon]]^^2^^, [[Gianni Fenu|AUTHOR Gianni Fenu]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Cagliari, Italy; ^^2^^New York University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2913–2917&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we assess vulnerability of speaker verification systems to dictionary attacks. We seek master voices, i.e., adversarial utterances optimized to match against a large number of users by pure chance. First, we perform menagerie analysis to identify utterances which intrinsically hold this property. Then, we propose an adversarial optimization approach for generating master voices synthetically. Our experiments show that, even in the most secure configuration, on average, a master voice can match approx. 20% of females and 10% of males without any knowledge about the population. We demonstrate that dictionary attacks should be considered as a feasible threat model for sensitive and high-stakes deployments of speaker verification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tharshini Gunendradasan|AUTHOR Tharshini Gunendradasan]]^^1^^, [[Eliathamby Ambikairajah|AUTHOR Eliathamby Ambikairajah]]^^1^^, [[Julien Epps|AUTHOR Julien Epps]]^^1^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^UNSW, Australia; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2918–2922&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Replay attack poses a key threat for automatic speaker verification systems. Spoofing detection systems inspired by auditory perception have shown promise to date, however some aspects of auditory processing have not been investigated in this context. In this paper, a transmission line cochlear model that incorporates an active feedback mechanism is proposed for replay attack detection. This model compresses the considerable energy variation in each auditory sub-band filter by boosting low-amplitude signal, an effect that is not considered in many auditory models. To perform the compression, the parameters of each auditory sub-band filter are modified based on the sub-band energy, analogous to the effect of the closed-loop adaptation mechanism that allows perception of a wide dynamic range from a physically constrained system, which we term adaptive-Q. Evaluation on the ASVspoof 2017 version 2 database suggests that the adaptive-Q compression provided by the proposed model helps to improve the performance of replay detection, and a relative reduction in EER of 26% was achieved compared with the best results reported for auditory system-based feature proposed for replay attack detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sungrack Yun|AUTHOR Sungrack Yun]], [[Janghoon Cho|AUTHOR Janghoon Cho]], [[Jungyun Eum|AUTHOR Jungyun Eum]], [[Wonil Chang|AUTHOR Wonil Chang]], [[Kyuwoong Hwang|AUTHOR Kyuwoong Hwang]]
</p><p class="cpabstractcardaffiliationlist">Qualcomm, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2923–2927&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents an end-to-end text-independent speaker verification framework by jointly considering the speaker embedding (SE) network and automatic speech recognition (ASR) network. The SE network learns to output an embedding vector which distinguishes the speaker characteristics of the input utterance, while the ASR network learns to recognize the phonetic context of the input. In training our speaker verification framework, we consider both the triplet loss minimization and adversarial gradient of the ASR network to obtain more discriminative and text-independent speaker embedding vectors. With the triplet loss, the distances between the embedding vectors of the same speaker are minimized while those of different speakers are maximized. Also, with the adversarial gradient of the ASR network, the text-dependency of the speaker embedding vector can be reduced. In the experiments, we evaluated our speaker verification framework using the LibriSpeech and CHiME 2013 dataset, and the evaluation results show that our speaker verification framework shows lower equal error rate and better text-independency compared to the other approaches.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Soonshin Seo|AUTHOR Soonshin Seo]], [[Daniel Jun Rim|AUTHOR Daniel Jun Rim]], [[Minkyu Lim|AUTHOR Minkyu Lim]], [[Donghyun Lee|AUTHOR Donghyun Lee]], [[Hosung Park|AUTHOR Hosung Park]], [[Junseok Oh|AUTHOR Junseok Oh]], [[Changmin Kim|AUTHOR Changmin Kim]], [[Ji-Hwan Kim|AUTHOR Ji-Hwan Kim]]
</p><p class="cpabstractcardaffiliationlist">Sogang University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2928–2932&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The objective of speaker verification is to reject or accept whether or not the input speech is that of a enrolled speaker. Traditionally, i-vector or speaker embeddings system such as d-vector representing the speaker information has been showing high performance with similarity metrics at the backend. Recently it has been proposed an end-to-end system based on previous speaker embeddings approach without additional strategy after extraction. Among the various models, CNN based end-to-end system is showing state-of-the-art performance. CNN based model is trained to classify multiple speakers and speaker embeddings are extracted.

In this paper, we propose shortcut connections based deep speaker embeddings for end-to-end speaker verification system. We construct modified ResNet-18 model so that the activation outputs from bottleneck architecture have shortcut connections to speaker embeddings. Deep speaker embeddings are extracted by jointly training in end-to-end approach. The model was constructed without other sophisticated methods such as length normalization, or additive margin softmax loss. When we tested proposed model on the unconstrained conditions data set called VoxCeleb1, the result showed EER of 3.03% when tested with high dimensional deep speaker embeddings. This is the state-of-the-art performance of end-to-end speaker verification model on VoxCeleb1.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chang Huai You|AUTHOR Chang Huai You]]^^1^^, [[Jichen Yang|AUTHOR Jichen Yang]]^^2^^, [[Huy Dat Tran|AUTHOR Huy Dat Tran]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^A*STAR, Singapore; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2933–2937&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Device feature, which contains the information of both recording channel and playback channel, is the critical trait for replay spoofing detection. So far there have not been any technical reports about the usage of device information in spoofing detection for speaker verification. In this paper, we propose to build a replay device feature (RDF) extractor on the basis of the genuine-replay-pair training database. The RDF extractor is trained in constant-Q transform (CQT) spectrum domain. A bidirectional long short-term memory (BLSTM) is used in the neural network and finally the RDF extractor is formed by applying discrete cosine transform (DCT) to the output vector of the BLSTM. The experimental result on ASVspoof 2017 corpus version 2.0 shows that equal error rate (EER) of replay detection system with the proposed RDF reaches 15.08%. Furthermore, by combining the RDF with constant-Q cepstral coefficients plus log energy (CQCCE), the EER of the detection system can reduce to 8.99%. In addition, the experimental results also show that the RDF has much complementarity with conventional features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takuya Yoshioka|AUTHOR Takuya Yoshioka]], [[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Andreas Stolcke|AUTHOR Andreas Stolcke]], [[William Hinthorn|AUTHOR William Hinthorn]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Michael Zeng|AUTHOR Michael Zeng]], [[Xuedong Huang|AUTHOR Xuedong Huang]]
</p><p class="cpabstractcardaffiliationlist">Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2968–2972&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe a system that generates speaker-annotated transcripts of meetings by using multiple asynchronous distant microphones. The system is composed of continuous audio stream alignment, blind beamforming, speech recognition, speaker diarization, and system combination. While the idea of improving the meeting transcription accuracy by leveraging multiple recordings has been investigated in certain specific technology areas such as beamforming, our objective is to assess the feasibility of a complete system with a set of mobile devices and conduct a detailed analysis. With seven input audio streams, our system achieves a word error rate (WER) of 22.3% and a speaker-attributed WER (SAWER) of 26.7%, and comes within 3% of the close-talking microphone WER on non-overlapping speech. The relative gains in SAWER over a single-device system are 14.8%, 20.3%, and 22.4% for three, five, and seven microphones, respectively. The full system achieves a 13.6% diarization error rate, 10% of which are due to overlapped speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Inga R. Helgadóttir|AUTHOR Inga R. Helgadóttir]], [[Anna Björk Nikulásdóttir|AUTHOR Anna Björk Nikulásdóttir]], [[Michal Borský|AUTHOR Michal Borský]], [[Judy Y. Fong|AUTHOR Judy Y. Fong]], [[Róbert Kjaran|AUTHOR Róbert Kjaran]], [[Jón Guðnason|AUTHOR Jón Guðnason]]
</p><p class="cpabstractcardaffiliationlist">Reykjavik University, Iceland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3013–3017&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>All performed speeches in the Icelandic parliament, Althingi, are transcribed and published. An automatic speech recognition system (ASR) has been developed to reduce the manual work involved. To our knowledge, this is the first open source speech recognizer in use for Icelandic. In this paper the development of the ASR is described. In-lab system performance is evaluated and first results from the users are described. A word error rate (WER) of 7.91% was obtained on our in-lab speech recognition test set using time-delay deep neural network (TDNN) and re-scoring with a bidirectional recurrent neural network language model (RNN-LM). No further processing of the text is included in that number. In-lab F-score for the punctuation model is 80.6 and 61.6 for the paragraph model. The WER of the ASR, including punctuation marks and other post-processing, was 15.0 ± 6.0%, over 625 speeches, when tested in the wild. This is an upper limit since not all mismatches with the reference text are true errors of the ASR. The transcribers of Althingi graded 77% of the speech transcripts as Good. The Althingi corpus and ASR recipe, constitute a valuable resource for further developments within Icelandic language technology.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vishwa Gupta|AUTHOR Vishwa Gupta]]^^1^^, [[Lise Rebout|AUTHOR Lise Rebout]]^^1^^, [[Gilles Boulianne|AUTHOR Gilles Boulianne]]^^1^^, [[Pierre-André Ménard|AUTHOR Pierre-André Ménard]]^^1^^, [[Jahangir Alam|AUTHOR Jahangir Alam]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CRIM, Canada; ^^2^^CRIM, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3018–3022&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The Airbus air traffic control challenge evaluates speech recognition and call sign detection using real conversations between air traffic controllers and pilots at Toulouse airport in France. CRIM’s main contribution in acoustic modeling for transcribing these conversations is experimentation with bidirectional LSTM (BLSTM) models and lattice-free MMI (LF-MMI) trained TDNN models. Adapting these acoustic models trained from a large dataset to 40 hours of ATC acoustic training data reduces WER significantly compared to training them with the ATC data only. Multiple iterations of adaptation reduce WER for the BLSTM acoustic models significantly, but only marginally for the LF-MMI TDNN acoustic models. Constrained dialog between the air traffic controller and the pilot leads to language model perplexity below 12, and WER for leaderboard and evaluation sets of 9.98% and 9.41% respectively.

For call sign detection from the decoded transcript, we use a bidirectional LSTM followed by conditional random field classifier. This DNN architecture worked better than a finite state transducer based call sign detection. Taking a majority vote over call signs from multiple decodes reduced the call sign errors. The best F1 for call sign detection for leaderboard was 0.8289 and for evaluation 0.8017. Overall, we came 3rd in this evaluation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Samuel Thomas|AUTHOR Samuel Thomas]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Yinghui Huang|AUTHOR Yinghui Huang]], [[Michael Picheny|AUTHOR Michael Picheny]]
</p><p class="cpabstractcardaffiliationlist">IBM, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2973–2977&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present a study on building various deep neural network-based speech recognition systems for automatic caption generation that can deal with out-of-vocabulary (OOV) words. We develop several kinds of systems using various acoustic (hybrid, CTC, attention-based neural networks) and language modeling (n-gram and RNN-based neural networks) techniques on broadcast news. We discuss various limitations that the proposed systems have and introduce methods to effectively use them to detect OOVs. For automatic OOV recovery, we compare the use of different kinds of phonetic and graphemic sub-word units, that can be synthesized into word outputs. On an experimental three hour broadcast news test set with a 4% OOV rate, the proposed CTC and attention-based systems are capable of reliably detecting OOVs much better (0.52 F-score) than a traditional hybrid baseline system (0.21 F-score). These improved detection gains translate further to better WER performance. With reference to a non-OOV oracle baseline, the proposed systems at just 12% relative (1.4% absolute) loss in word error rate (WER), perform significantly better than the traditional hybrid system (with close to 50% relative loss), by recovering OOVs using their sub-word outputs.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Muhammad Umar Farooq|AUTHOR Muhammad Umar Farooq]], [[Farah Adeeba|AUTHOR Farah Adeeba]], [[Sahar Rauf|AUTHOR Sahar Rauf]], [[Sarmad Hussain|AUTHOR Sarmad Hussain]]
</p><p class="cpabstractcardaffiliationlist">UET Lahore, Pakistan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2978–2982&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Development of Large Vocabulary Continuous Speech Recognition (LVCSR) system is a cumbersome task, especially for low resource languages. Urdu is the national language and lingua franca of Pakistan, with 100 million speakers worldwide. Due to resource scarcity, limited work has been done in the domain of Urdu speech recognition. In this paper, collection of Urdu speech corpus and development of Urdu speech recognition system is presented. Urdu LVCSR is developed using 300 hours of read speech data with a vocabulary size of 199K words. Microphone speech is recorded from 1671 Urdu and Punjabi speakers in both indoor and outdoor environments. Different acoustic modeling techniques such as Gaussian Mixture Models based Hidden Markov Models (GMM-HMM), Time Delay Neural Networks (TDNN), Long-Short Term Memory (LSTM) and Bidirectional Long-Short Term Memory (BLSTM) networks are investigated. Cross entropy and Lattice Free Maximum Mutual Information (LF-MMI) objective functions are employed during acoustic modeling. In addition, Recurrent Neural Network Language Model (RNNLM) is also being used for re-scoring. Developed speech recognition system has been evaluated on 9.5 hours of collected test data and a minimum Word Error Rate (%WER) of 13.50% is achieved.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Min Tang|AUTHOR Min Tang]]
</p><p class="cpabstractcardaffiliationlist">Nuance Communications, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2983–2987&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Hybrid arbitration is a process where we select the best Automatic Speech Recognition (ASR) and Natural Language Understanding (NLU) result from embedded/client and cloud-based system outputs. It is a common approach that a lot of real world applications use to unify knowledge sources that are not available to client and cloud at the same time. In the past, people primarily relied on ASR confidence features and some application specific heuristics in the arbitration process. However, confidence features are unable to capture subtle context specific differences. In this paper, besides confidence, we also use raw ASR strings and NLU results in the hybrid arbitration process. We model the arbitration process as two steps — first, decide whether to wait for a slower system, and second, pick the best result. We compared multiple machine learning approaches and it turns out the Deep Neural Network (DNN) based classifier, using word embeddings to process ASR strings and NLU embeddings to process NLU information, can deliver the best performance. We conducted experiments on two production system setups, using field data from real users. Compared with traditional confidence score based approach, we obtain about 30% relative word error reduction and 30% relative sentence error rate reduction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[György Szaszák|AUTHOR György Szaszák]]^^1^^, [[Máté Ákos Tündik|AUTHOR Máté Ákos Tündik]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Telepathy Labs, Switzerland; ^^2^^BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2988–2992&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Punctuating ASR transcript has received increasing attention recently, and well-performing approaches were presented based on sequence-to-sequence modelling, exploiting textual (word and character) and/or acoustic-prosodic features. In this work we propose to consider character, word and prosody based features all at once to provide a robust and highly language independent platform for punctuation recovery, which can deal also well with highly agglutinating languages with less constrained word order. We demonstrate that using such a feature triplet improves ASR error robustness of punctuation in two quite differently organized languages, English and Hungarian. Moreover, in the highly agglutinating Hungarian, where word-based approaches suffer from the exploding vocabulary (poorer semantic representation through embeddings) and less constrained word order, we show that prosodic cues and the character-based model can powerfully counteract this loss of information. We also perform a deep analysis of punctuation w.r.t. both ASR errors and agglutination to explain the improvements we observed on a solid basis.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thomas Pellegrini|AUTHOR Thomas Pellegrini]]^^1^^, [[Jér^ome Farinas|AUTHOR Jér^ome Farinas]]^^1^^, [[Estelle Delpech|AUTHOR Estelle Delpech]]^^2^^, [[François Lancelot|AUTHOR François Lancelot]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IRIT (UMR 5505), France; ^^2^^Airbus, France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2993–2997&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we describe the outcomes of the challenge organized and run by Airbus and partners in 2018 on Air Traffic Control (ATC) speech recognition. The challenge consisted of two tasks applied to English ATC speech: 1) automatic speech-to-text transcription, 2) call sign detection (CSD). The registered participants were provided with 40 hours of speech along with manual transcriptions. Twenty-two teams submitted predictions on a five hour evaluation set. ATC speech processing is challenging for several reasons: high speech rate, foreign-accented speech with a great diversity of accents, noisy communication channels. The best ranked team achieved a 7.62% Word Error Rate and a 82.41% CSD F1-score. Transcribing pilots’ speech was found to be twice as harder as controllers’ speech. Remaining issues towards solving ATC ASR are also discussed in the paper.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dan Oneață|AUTHOR Dan Oneață]], [[Horia Cucu|AUTHOR Horia Cucu]]
</p><p class="cpabstractcardaffiliationlist">UPB, Romania</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2998–3002&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper addresses the problem of building a speech recognition system attuned to the control of unmanned aerial vehicles (UAVs). Even though UAVs are becoming widespread, the task of creating voice interfaces for them is largely unaddressed. To this end, we introduce a multi-modal evaluation dataset for UAV control, consisting of spoken commands and associated images, which represent the visual context of what the UAV “sees” when the pilot utters the command. We provide baseline results and address two research directions:  (i) how robust the language models are, given an incomplete list of commands at train time;  (ii) how to incorporate visual information in the language model. We find that recurrent neural networks (RNNs) are a solution to both tasks: they can be successfully adapted using a small number of commands and they can be extended to use visual cues. Our results show that the image-based RNN outperforms its text-only counterpart even if the command–image training associations are automatically generated and inherently imperfect.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xiaofei Wang|AUTHOR Xiaofei Wang]], [[Jinyi Yang|AUTHOR Jinyi Yang]], [[Ruizhi Li|AUTHOR Ruizhi Li]], [[Samik Sadhu|AUTHOR Samik Sadhu]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3003–3007&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Quality of data plays an important role in most deep learning tasks. In the speech community, transcription of speech recording is indispensable. Since the transcription is usually generated artificially, automatically finding errors in manual transcriptions not only saves time and labors but benefits the performance of tasks that need the training process. Inspired by the success of hybrid automatic speech recognition using both language model and acoustic model, two approaches of automatic error detection in the transcriptions have been explored in this work. Previous study using a biased language model approach, relying on a strong transcription-dependent language model, has been reviewed. In this work, we propose a novel acoustic model based approach, focusing on the phonetic sequence of speech. Both methods have been evaluated on a completely real dataset, which was originally transcribed with errors and strictly corrected manually afterwards.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Astik Biswas|AUTHOR Astik Biswas]], [[Raghav Menon|AUTHOR Raghav Menon]], [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]], [[Thomas Niesler|AUTHOR Thomas Niesler]]
</p><p class="cpabstractcardaffiliationlist">Stellenbosch University, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3008–3012&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present improvements in automatic speech recognition (ASR) for Somali, a currently extremely under-resourced language. This forms part of a continuing United Nations (UN) effort to employ ASR-based keyword spotting systems to support humanitarian relief programmes in rural Africa. Using just 1.57 hours of annotated speech data as a seed corpus, we increase the pool of training data by applying semi-supervised training to 17.55 hours of untranscribed speech. We make use of factorised time-delay neural networks (TDNN-F) for acoustic modelling, since these have recently been shown to be effective in resource-scarce situations. Three semi-supervised training passes were performed, where the decoded output from each pass was used for acoustic model training in the subsequent pass. The automatic transcriptions from the best performing pass were used for language model augmentation. To ensure the quality of automatic transcriptions, decoder confidence is used as a threshold. The acoustic and language models obtained from the semi-supervised approach show significant improvement in terms of WER and perplexity compared to the baseline. Incorporating the automatically generated transcriptions yields a 6.55% improvement in language model perplexity. The use of 17.55 hour of Somali acoustic data in semi-supervised training shows an improvement of 7.74% relative over the baseline.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tomasz Rutowski|AUTHOR Tomasz Rutowski]]^^1^^, [[Amir Harati|AUTHOR Amir Harati]]^^2^^, [[Yang Lu|AUTHOR Yang Lu]]^^2^^, [[Elizabeth Shriberg|AUTHOR Elizabeth Shriberg]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ellipsis Health, Poland; ^^2^^Ellipsis Health, Canada; ^^3^^Ellipsis Health, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3023–3027&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Machine learning models for speech-based depression classification offer promise for health care applications. Despite growing work on depression classification, little is understood about how the length of speech-input impacts model performance. We analyze results for speaker-independent depression classification using a corpus of over 1400 hours of speech from a human-machine health screening application. We examine performance as a function of response input length for two NLP systems that differ in overall performance.

Results for both systems show that performance depends on natural length, elapsed length, and ordering of the response within a session. Systems share a minimum length threshold, but differ in a response saturation threshold, with the latter higher for the better system. At saturation it is better to pose a new question to the speaker, than to continue the current response. These and additional reported results suggest how applications can be better designed to both elicit and process optimal input lengths for depression classification.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Philipp Klumpp|AUTHOR Philipp Klumpp]], [[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]], [[Tino Haderlein|AUTHOR Tino Haderlein]], [[Elmar Nöth|AUTHOR Elmar Nöth]]
</p><p class="cpabstractcardaffiliationlist">FAU Erlangen-Nürnberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3068–3072&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The feature vectors of a data set encode information about relations between speaker groups, clusters and outliers. Based on the assumption that these relations are conserved within the spatial properties of feature vectors, we introduce similarity maps to visualize consistencies and deviations in magnitude and orientation between two feature vectors. We also present an iterative approach to find subspaces of a high-dimensional feature space that encode information about predefined speaker clusters. The methods were evaluated with two different data sets, one from chronically hoarse speakers and a second one from Parkinson’s Disease patients and a healthy control group. The results showed that similarity maps provide a decent visualization of speaker groups and the spatial properties of their respective feature vectors. With the iterative optimization, it was possible to find features that show pronounced spatial differences between predefined clusters.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sandeep Nallan Chakravarthula|AUTHOR Sandeep Nallan Chakravarthula]]^^1^^, [[Haoqi Li|AUTHOR Haoqi Li]]^^1^^, [[Shao-Yen Tseng|AUTHOR Shao-Yen Tseng]]^^1^^, [[Maija Reblin|AUTHOR Maija Reblin]]^^2^^, [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Southern California, USA; ^^2^^Moffitt Cancer Center, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3073–3077&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Cancer impacts the quality of life of those diagnosed as well as their spouse caregivers, in addition to potentially influencing their day-to-day behaviors. There is evidence that effective communication between spouses can improve well-being related to cancer but it is difficult to efficiently evaluate the quality of daily life interactions using manual annotation frameworks. Automated recognition of behaviors based on the interaction cues of speakers can help analyze interactions in such couples and identify behaviors which are beneficial for effective communication. In this paper, we present and detail a dataset of dyadic interactions in 85 real-life cancer-afflicted couples and a set of observational behavior codes pertaining to interpersonal communication attributes. We describe and employ neural network-based systems for classifying these behaviors based on turn-level acoustic and lexical speech patterns. Furthermore, we investigate the effect of controlling for factors such as gender, patient/caregiver role and conversation content on behavior classification. Analysis of our preliminary results indicates the challenges in this task due to the nature of the targeted behaviors and suggests that techniques incorporating contextual processing might be better suited to tackle this problem.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ying Qin|AUTHOR Ying Qin]]^^1^^, [[Tan Lee|AUTHOR Tan Lee]]^^1^^, [[Anthony Pak Hin Kong|AUTHOR Anthony Pak Hin Kong]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^University of Central Florida, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3078–3082&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For automatic assessment of language impairment in natural speech, properly designed text-based features are needed. The feature design relies on experts’ domain knowledge and the feature extraction process may undesirably involve manual effort on transcribing. This paper describes a novel approach to automatic assessment of language impairment in narrative speech of people with aphasia (PWA), without explicit knowledge-driven feature design. A convolutional neural network (CNN) is used to extract language impairment related text features from the output of an automatic speech recognition (ASR) system or, if available, the manual transcription of input speech. To mitigate the adverse effect of ASR errors, confusion network is adopted to improve the robustness of embedding representation of ASR output. The proposed approach is evaluated on the task of discriminating severe PWA from mild PWA based on Cantonese narrative speech. Experimental results confirm the effectiveness of automatically learned text features. It is also shown that CNN models trained with text input and acoustic features are complementary to each other.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mary Pietrowicz|AUTHOR Mary Pietrowicz]]^^1^^, [[Carla Agurto|AUTHOR Carla Agurto]]^^1^^, [[Raquel Norel|AUTHOR Raquel Norel]]^^1^^, [[Elif Eyigoz|AUTHOR Elif Eyigoz]]^^1^^, [[Guillermo Cecchi|AUTHOR Guillermo Cecchi]]^^1^^, [[Zarina R. Bilgrami|AUTHOR Zarina R. Bilgrami]]^^2^^, [[Cheryl Corcoran|AUTHOR Cheryl Corcoran]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, USA; ^^2^^Icahn School of Medicine at Mount Sinai, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3028–3032&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>What if young people at risk for developing schizophrenia could be identified early, via a fast, automated, non-invasive test of language, which could be administered remotely? These youths could then receive intervention which might mitigate course and possibly prevent psychosis. Timed word fluency tests, in which individuals name words starting with a designated sound (typically F/A/S) or represent a given concept category (commonly animals/fruits/vegetables), have been used in the assessment of schizophrenia and its risk states, and in many other mental health conditions. Typically, psychologists manually record the number and size of valid phoneme clusters and switches observed in the phonemic tests and count the number of valid words belonging to a given category in the categorical tests. We present a new technique for automating the analysis of category fluency data and apply it to the problem of detecting youths at risk of developing schizophrenia, with best results over 85% accuracy when applying phonemic analysis to categorical data. The technique supports the separate quantification of structural and sequential phonemic similarity measures, supports an arbitrary range of pronunciations and dialects in the analysis, and may be extended to the assessment of other mental and physical health conditions, and their risk states.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Laetitia Jeancolas|AUTHOR Laetitia Jeancolas]]^^1^^, [[Graziella Mangone|AUTHOR Graziella Mangone]]^^2^^, [[Jean-Christophe Corvol|AUTHOR Jean-Christophe Corvol]]^^2^^, [[Marie Vidailhet|AUTHOR Marie Vidailhet]]^^2^^, [[Stéphane Lehéricy|AUTHOR Stéphane Lehéricy]]^^2^^, [[Badr-Eddine Benkelfat|AUTHOR Badr-Eddine Benkelfat]]^^1^^, [[Habib Benali|AUTHOR Habib Benali]]^^3^^, [[Dijana Petrovska-Delacrétaz|AUTHOR Dijana Petrovska-Delacrétaz]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SAMOVAR (UMR 5157), France; ^^2^^ICM (UMRS 1127 UMR 7225), France; ^^3^^Concordia University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3033–3037&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Vocal impairments are among the earliest symptoms in Parkinson’s Disease (PD). We adapted a method classically used in speech and speaker recognition, based on Mel-Frequency Cepstral Coefficients (MFCC) extraction and Gaussian Mixture Model (GMM) to detect recently diagnosed and pharmacologically treated PD patients. We classified early PD subjects from controls with an accuracy of 83%, using recordings obtained with a professional microphone. More interestingly, we were able to classify PD from controls with an accuracy of 75% based on telephone recordings. As far as we know, this is the first time that audio recordings from telephone network have been used for early PD detection. This is a promising result for a potential future telediagnosis of Parkinson’s disease.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Parvaneh Janbakhshi|AUTHOR Parvaneh Janbakhshi]], [[Ina Kodrasi|AUTHOR Ina Kodrasi]], [[Hervé Bourlard|AUTHOR Hervé Bourlard]]
</p><p class="cpabstractcardaffiliationlist">Idiap Research Institute, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3038–3042&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech intelligibility is an important assessment criterion of the communicative performance of pathological speakers. To assist clinicians in their assessment, time- and cost-efficient automatic intelligibility measures offering a repeatable and reliable assessment are desired. In this paper, we propose to automatically assess pathological speech intelligibility based on a distance measure between the subspaces of spectral patterns of the pathological speech signal and of a fully intelligible (healthy) speech signal. To extract the subspace of spectral patterns we investigate two linear decomposition methods, i.e., Principal Component Analysis and Approximate Joint Diagonalization. Pathological speech intelligibility is then derived using a Grassman distance measure which quantifies the difference between the extracted subspaces of pathological and healthy speech. Experiments on an English database of Cerebral Palsy patients show that the proposed intelligibility measure is significantly correlated with subjective intelligibility ratings. In addition, comparisons to state-of-the-art measures show that the proposed subspace-based measure achieves a high performance with a significantly lower computational cost and without imposing any constraints on the speech material of the speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Carolina De Pasquale|AUTHOR Carolina De Pasquale]]^^1^^, [[Charlie Cullen|AUTHOR Charlie Cullen]]^^2^^, [[Brian Vaughan|AUTHOR Brian Vaughan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technological University Dublin, Ireland; ^^2^^University of the West of Scotland, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3043–3047&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Therapeutic alliance, a concept closely related to rapport, is one of the most important variables in psychotherapy. High degrees of synchrony/coordination in the therapeutic session are considered to contribute to rapport, and have received attention in the psychotherapy literature.

Coordinative behaviours are observable in speech, and they manifest in phenomena such as prosodic accommodation, a dynamic phenomenon closely related to conversational success.

A preliminary investigation of interpersonal prosodic dynamics in psychotherapy was performed on a database obtained in collaboration with the University of Padua, consisting of 16 recordings making up the entire course of a brief psychodynamic psychotherapy intervention for a 25 year old female volunteer and a 41 years old male psychotherapist.

The data was analysed with Time Aligned Moving Averages, a method commonly used in interpersonal speech research. Issues of data sparsity are discussed, and preliminary results on the relationship between empathy and anxiety with interpersonal speech dynamics are presented.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alice Rueda|AUTHOR Alice Rueda]]^^1^^, [[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]]^^2^^, [[Cristian David Rios-Urrego|AUTHOR Cristian David Rios-Urrego]]^^3^^, [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]]^^2^^, [[Sridhar Krishnan|AUTHOR Sridhar Krishnan]]^^1^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Ryerson University, Canada; ^^2^^Universidad de Antioquia, Colombia; ^^3^^Universidad de Antioquia, Colombia; ^^4^^FAU Erlangen-Nürnberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3048–3052&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper focuses on selecting features that can best represent the pathophysiology of Parkinson’s disease (PD) dysarthria. PD dysarthria has often been the subject of feature selection and classification experiments, but rarely have the selected features been attempted to be matched to the pathophysiology of PD dysarthria. PD dysarthria manifests through changes in control of a person’s speech production muscles and affects respiration, articulation, resonance, and laryngeal properties, resulting in speech characteristics such as short phrases separated by pauses, reduced speed for non-repetitive syllables or supernormal speed of repetitive syllables, reduced resonance, irregular vowel generation, etc. Articulation, phonation, diadochokinesis (DDK) rhythm, and Empirical Mode Decomposition (EMD) features were extracted from the DDK and sustained /a/ recordings of the Spanish GITA Corpus. These recordings were captured from 50 healthy (HC) and 50 PD subjects. A two-stage filter-wrapper feature selection process was applied to reduce the number of features from 3,534 to 15. These 15 features mainly represent the instability of the voice and rhythm. SVM, Random Forest and Naive Bayes were used to test the discriminative power of the selected features. The results showed that these sustained /a/ and /pa-ta-ka/ stability features could successfully discriminate PD from HC with 70% accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Charles C. Onu|AUTHOR Charles C. Onu]], [[Jonathan Lebensold|AUTHOR Jonathan Lebensold]], [[William L. Hamilton|AUTHOR William L. Hamilton]], [[Doina Precup|AUTHOR Doina Precup]]
</p><p class="cpabstractcardaffiliationlist">McGill University, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3053–3057&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Despite continuing medical advances, the rate of newborn morbidity and mortality globally remains high, with over 6 million casualties every year. The prediction of pathologies affecting newborns based on their cry is thus of significant clinical interest, as it would facilitate the development of accessible, low-cost diagnostic tools. However, the inadequacy of clinically annotated datasets of infant cries limits progress on this task. This study explores a neural transfer learning approach to developing accurate and robust models for identifying infants that have suffered from perinatal asphyxia. In particular, we explore the hypothesis that representations learned from adult speech could inform and improve performance of models developed on infant speech. Our experiments show that models based on such representation transfer are resilient to different types and degrees of noise, as well as to signal loss in time and frequency domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hui-Ting Hong|AUTHOR Hui-Ting Hong]]^^1^^, [[Jeng-Lin Li|AUTHOR Jeng-Lin Li]]^^1^^, [[Yi-Ming Weng|AUTHOR Yi-Ming Weng]]^^2^^, [[Chip-Jin Ng|AUTHOR Chip-Jin Ng]]^^2^^, [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Tsing Hua University, Taiwan; ^^2^^Chang Gung Memorial Hospital, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3058–3062&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Pain is an internal construct with vocal manifestation that varies as a function of personal and clinical attributes. Understanding the vocal indicators of pain-levels is important in providing an objective analytic in clinical assessment and intervention. In this work, we focus on investigating the variability of voice quality as a function of multiple clinical parameters at different pain-levels, specifically for emergency room patients during triage. Their pain-induced pathological voice quality characteristics are naturally affected by an individual attributes such as age, gender and pain-sites. We conduct a detailed multivariate statistical analysis on a 181 unique patient’s vocal quality using recordings of real triage sessions. Our analysis show several important insights, 1) voice quality only varies statistically with pain-levels when interacting effect from other clinical parameters is considered, 2) senior group shows a higher value of voicing probability and shimmer when experiencing severe pain, 3) patients with abdomen pain have a lower jitter and shimmer during severe pain that is different from patients experiencing musculoskeletal pathology, and 4) there could be a relationship between the variation in the voice quality and the neural pathway of pain as evident by interacting with the pain-site factor.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[José Vicente Egas López|AUTHOR José Vicente Egas López]]^^1^^, [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]]^^2^^, [[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Szeged, Hungary; ^^2^^Universidad de Antioquia, Colombia; ^^3^^University of Szeged, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3063–3067&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Parkinson’s Disease (PD) is a neuro-degenerative disorder that affects primarily the motor system of the body. Besides other functions, the subject’s speech also deteriorates during the disease, which allows for a non-invasive way of automatic screening. In this study, we represent the utterances of subjects having PD and those of healthy controls by means of the Fisher Vector approach. This technique is very common in the area of image recognition, where it provides a representation of the local image descriptors via frequency and high order statistics. In the present work, we used four frame-level feature sets as the input of the FV method, and applied (linear) Support Vector Machines (SVM) for classifying the speech of subjects. We found that our approach offers superior performance compared to classification based on the i-vector and cosine distance approach, and it also provides an efficient combination of machine learning models trained on different feature sets or on different speaker tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhen Fu|AUTHOR Zhen Fu]], [[Xihong Wu|AUTHOR Xihong Wu]], [[Jing Chen|AUTHOR Jing Chen]]
</p><p class="cpabstractcardaffiliationlist">Peking University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3083–3087&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In Mandarin Chinese, lexical Tones are inherently bonded with vowels, making both spectral and temporal cues available for speech perception. Temporal cues provided by Tone contrast have been shown facilitating segregation in Mandarin concurrent-vowels identification (MCVI). The present study investigated the effect of spectral cue measured by vowel contrast within the syllable-pair on MCVI, both for normal-hearing (NH) and hearing-impaired (HI) listeners. Acoustic cues of duration and mean F0 difference were carefully controlled. Results exhibited that facilitation from vowel contrast existed for NH listeners but was reduced for HI listeners. Identification score positively correlated with the spectral envelope contrast of different vowel-pairs for both groups, but the coefficient for HI listeners was lower. Further analyses based on a power function model revealed more weighting of temporal cues than spectral cues for NH listeners, while the contributions were equal for HI listeners. These results suggested that the spectral cue provided by vowel contrast could facilitate the MCVI, and auditory processing of temporal cues might be more susceptible to hearing loss than that of spectral cues. These findings have instructions for designing speech processing algorithms for Mandarin-speaking HI listeners.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Maximillian Paulus|AUTHOR Maximillian Paulus]], [[Valerie Hazan|AUTHOR Valerie Hazan]], [[Patti Adank|AUTHOR Patti Adank]]
</p><p class="cpabstractcardaffiliationlist">University College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3128–3132&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Individual differences in talker acoustics substantially affect intelligibility in adverse listening conditions. Spectral enhancement has been found to reliably boost intelligibility in noise while temporal enhancement remains less effective. A potentially mediating factor that has been ignored so far is listening effort, as objectively assessed by the pupil dilation response. In two perception experiments, we measured intelligibility (keyword recall scores) and listening effort (pupil dilation) for two talkers in two listening conditions and with varying degrees of temporal modification. Results suggest that while keyword recall scores are sensitive to individual talker differences across listening conditions, the pupil dilation response reflects the degree of temporal and spectral distortion introduced by the signal processing techniques.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lauren Ward|AUTHOR Lauren Ward]]^^1^^, [[Catherine Robinson|AUTHOR Catherine Robinson]]^^2^^, [[Matthew Paradis|AUTHOR Matthew Paradis]]^^3^^, [[Katherine M. Tucker|AUTHOR Katherine M. Tucker]]^^2^^, [[Ben Shirley|AUTHOR Ben Shirley]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Salford, UK; ^^2^^BBC, UK; ^^3^^American Academy of Dramatic Arts, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3133–3137&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech in noise tests are an important clinical and research tool for understanding speech perception in realistic, adverse listening conditions. Though relatively simple to implement, their development is time and resource intensive. As a result, many tests still in use (and their corresponding recordings) are outdated and no longer fit for purpose. This work takes the popular Revised Speech Perception In Noise (RSPIN) Test and updates it with improved recordings and the addition of a female speaker. It outlines and evaluates a methodology which others can apply to legacy recordings of speech in noise tests to update them and ensure their ongoing usability. This paper describes the original test along with its use over the last four decades and the rationale for re-recording. The new speakers, new accent (Received Pronunciation) and recording methodology are then outlined. Subjective and objective analysis of the new recordings for normal hearing listeners are then given. The paper concludes with recommendations for using the R²SPIN.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fei Chen|AUTHOR Fei Chen]]
</p><p class="cpabstractcardaffiliationlist">SUSTech, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3138–3142&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For hearing-impaired listeners fitted with cochlear implants (CIs), they rely on electric (E) stimulation with primarily slow-varying temporal information but limited spectral information for their speech perception. Many recent studies showed that for those implanted listeners with residual low-frequency hearing, the combined electric-acoustic (E+A) stimulation could significantly improve their speech perception in adverse listening conditions. The present work assessed the contributions of consonant-vowel transitions to Mandarin tone identification via a vocoder based simulation of E+A stimulation. Isolated Mandarin words were processed to preserve full consonants and vowel onsets across consonant-vowel transitions, and replace the rest with noise. The two types of vocoded stimuli, simulating E and E+A stimulations, were presented to normal-hearing Mandarin-speaking listeners to identify lexical tones. Results consistently showed the advantage of E+A stimulation over E-only stimulation when full consonants and the same amount of vowel onset segments were preserved for lexical tone identification. In addition, compared with E stimulation with full vowel segments, the combined-stimulation advantage was observed even when only a small portion of vowel onset segments were presented. Results in this work suggested that in E+A stimulation, segmental contributions were able to provide tone identification benefit relative to E stimulation with the entire Mandarin words.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vicky Zayats|AUTHOR Vicky Zayats]], [[Trang Tran|AUTHOR Trang Tran]], [[Richard Wright|AUTHOR Richard Wright]], [[Courtney Mansfield|AUTHOR Courtney Mansfield]], [[Mari Ostendorf|AUTHOR Mari Ostendorf]]
</p><p class="cpabstractcardaffiliationlist">University of Washington, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3088–3092&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper explores contexts associated with errors in transcription of spontaneous speech, shedding light on human perception of disfluencies and other conversational speech phenomena. A new version of the Switchboard corpus is provided with disfluency annotations for careful speech transcripts, together with results showing the impact of transcription errors on evaluation of automatic disfluency detection.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sandra I. Parhammer|AUTHOR Sandra I. Parhammer]]^^1^^, [[Miriam Ebersberg|AUTHOR Miriam Ebersberg]]^^1^^, [[Jenny Tippmann|AUTHOR Jenny Tippmann]]^^2^^, [[Katja Stärk|AUTHOR Katja Stärk]]^^3^^, [[Andreas Opitz|AUTHOR Andreas Opitz]]^^4^^, [[Barbara Hinger|AUTHOR Barbara Hinger]]^^5^^, [[Sonja Rossi|AUTHOR Sonja Rossi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Medizinische Universität Innsbruck, Austria; ^^2^^Technische Universität Dresden, Germany; ^^3^^MPI for Psycholinguistics, The Netherlands; ^^4^^Universität Leipzig, Germany; ^^5^^Universität Innsbruck, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3093–3097&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The present study investigated the effects of selective attention on the processing of morphosyntactic errors in unattended parts of speech. Two groups of German native (L1) speakers participated in the present study. Participants listened to sentences in which irregular verbs were manipulated in three different conditions (correct, incorrect but attested ablaut pattern, incorrect and crosslinguistically unattested ablaut pattern). In order to track fast dynamic neural reactions to the stimuli, electroencephalography was used. After each sentence, participants in Experiment 1 performed a semantic judgement task, which deliberately distracted the participants from the syntactic manipulations and directed their attention to the semantic content of the sentence. In Experiment 2, participants carried out a syntactic judgement task, which put their attention on the critical stimuli. The use of two different attentional tasks allowed for investigating the impact of selective attention on speech processing and whether morphosyntactic processing steps are performed automatically. In Experiment 2, the incorrect attested condition elicited a larger N400 component compared to the correct condition, whereas in Experiment 1 no differences between conditions were found. These results suggest that the processing of morphosyntactic violations in irregular verbs is not entirely automatic but seems to be strongly affected by selective attention.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Valerie Hazan|AUTHOR Valerie Hazan]], [[Outi Tuomainen|AUTHOR Outi Tuomainen]], [[Linda Taschenberger|AUTHOR Linda Taschenberger]]
</p><p class="cpabstractcardaffiliationlist">University College London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3098–3102&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The impact of energetic (EM) and informational masking (IM) on speech communication is typically evaluated using perception tests that do not involve actual communication. Here, ratings of effort, concentration and degree of interference were obtained for 51 young, middle-aged and older adults after they had completed communicative tasks (Diapix) with another participant in conditions in which no noise, speech-shaped noise, or three voices were heard in the background. They also completed background sensory and cognitive tests and a quality of hearing questionnaire. The EM condition was perceived as less effortful, requiring less concentration and easier to ignore than those involving IM. Effects were generally greater for talkers taking the lead in the interaction. Even though the two older groups were more affected by IM than young adults in a speech in noise perception test, age did not impact on ratings of effort and ability to ignore the noise in the diapix communicative task. Only for concentration ratings, did the Older Adult group give similar ratings in quiet as when EM was present. Together, these results suggest that evaluations that purely assess receptive speech in older adults do not fully represent the impact of sources of interference on speech communication.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chris Davis|AUTHOR Chris Davis]], [[Jeesun Kim|AUTHOR Jeesun Kim]]
</p><p class="cpabstractcardaffiliationlist">Western Sydney University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3103–3107&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigated the perceptual salience of clear and Lombard speech adaptations by older adults (OA) communicating to a younger partner in a diapix task. The aim was to determine whether these two speech styles are perceptually distinct (for auditory and visual speech). The communication setting involved either the younger partner only in babble noise (BAB_partner) or both talkers in babble noise (BAB_both). In the control condition (NORM), both talkers heard normally. To determine how perceptible OA adaptions to these noise conditions were, short (1–4 s) auditory only and visual only recordings of the OA talking to their partner were presented in two perception experiments. In Experiment 1, half of the OA stimuli were from the BAB_partner and half from the NORM condition; and participants were asked to judge whether the older adult was taking to a person who could hear them well or to someone who has trouble hearing them. In Experiment 2 participants decided between NORM and BAB_both stimuli. Participants did both sound-only and visual-only versions. Results showed both adaptations were perceived better than chance; the BAB_both condition was discriminated better from NORM than the BAB_partner one, and auditory judgements were better than visual ones (although these were correlated).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[T. Arias-Vergara|AUTHOR T. Arias-Vergara]]^^1^^, [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]]^^1^^, [[Milos Cernak|AUTHOR Milos Cernak]]^^2^^, [[S. Gollwitzer|AUTHOR S. Gollwitzer]]^^3^^, [[M. Schuster|AUTHOR M. Schuster]]^^3^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universidad de Antioquia, Colombia; ^^2^^Logitech, Switzerland; ^^3^^LMU München, Germany; ^^4^^FAU Erlangen-Nürnberg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3108–3112&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>People with pre- and postlingual onset of deafness, i.e, age of occurrence of hearing loss, often present speech production problems even after hearing rehabilitation by cochlear implantation. In this paper, the speech of 20 prelinguals (aged between 18 to 71 years old), 20 postlinguals (aged between 33 to 78 years old) and 20 healthy control (aged between 31 to 62 years old) German native speakers are analyzed considering phone-attribute features extracted with pre-trained Deep Neural Networks. Speech signals are analyzed with reference to the manner of articulation of consonants according to 5 groups: nasals, sibilants, fricatives, voiced-stops, and voiceless-stops. According to the results, it is possible to detect alterations in the consonant production of CI users when compared with healthy speakers. A comprehensive evaluation of speech changes of CI users will help in the rehabilitation after deafening.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nao Hodoshima|AUTHOR Nao Hodoshima]]
</p><p class="cpabstractcardaffiliationlist">Tokai University, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3113–3117&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Public-address (PA) announcements are widely used, but noise and reverberation can render them unintelligible. Furthermore, in an emergency, textual information available to smartphone users or displayed on electronic bulletin boards may not coincide with PA announcements, and this mismatch may degrade the intelligibility of PA announcements. This study investigated how speech spoken in a normal/urgent style and preceding congruent/incongruent textual information affected word intelligibility and perceived urgency in noisy and reverberant environments. The results obtained from 18 participants showed that the word correct rate (WCR) was significantly higher for urgently spoken speech than for normal speech, and for congruent text than for incongruent/no text. However, there was no speaking style-text interaction, indicating that the improvement in WCR provided by urgent speech over normal speech was the same regardless of the preceding text condition. This suggests that listeners rely more on visual information when speech intelligibility is poor. The results for perceived urgency also showed that the congruent condition was rated “evacuate now”, while the incongruent condition was rated “wait and see”. These results suggest that simple combinations of speaking style and textual information decrease the intelligibility of emergency PA announcements, and audio-visual incongruence must be considered.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nursadul Mamun|AUTHOR Nursadul Mamun]], [[Ria Ghosh|AUTHOR Ria Ghosh]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3118–3122&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speaker recognition is a biometric modality that uses underlying speech information to determine the identity of the speaker. Speaker Identification (SID) under noisy conditions is one of the challenging topics in the field of speech processing, specifically when it comes to individuals with cochlear implants (CI). This study analyzes and quantifies the ability of CI-users to perform speaker identification based on direct electric auditory stimuli. CI users employ a limited number of frequency bands (8~22) and use electrodes to directly stimulate the Basilar Membrane/Cochlear in order to recognize the speech signal. The sparsity of electric stimulation within the CI frequency range is a prime reason for loss in human speech recognition, as well as SID performance. Therefore, it is assumed that CI-users might be unable to recognize and distinguish a speaker given dependent information such as formant frequencies, pitch etc. which are lost to un-simulated electrodes. To quantify this assumption, the input speech signal is processed using a CI Advanced Combined Encoder (ACE) signal processing strategy to construct the CI auditory electrodogram. The proposed study uses 50 speakers from each of three different databases for training the system using two different classifiers under quiet, and tested under both quiet and noisy conditions. The objective result shows that, the CI users can effectively identify a limited number of speakers. However, their performance decreases when more speakers are added in the system, as well as when noisy conditions are introduced. This information could therefore be used for improving CI-user signal processing techniques to improve human SID.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[E. Felker|AUTHOR E. Felker]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]], [[Mirjam Broersma|AUTHOR Mirjam Broersma]]
</p><p class="cpabstractcardaffiliationlist">Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3123–3127&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Lexically guided perceptual learning has traditionally been studied with ambiguous consonant sounds to which native listeners are exposed in a purely receptive listening context. To extend previous research, we investigate whether lexically guided learning applies to a vowel shift encountered by non-native listeners in an interactive dialogue. Dutch participants played a two-player game in English in either a control condition, which contained no evidence for a vowel shift, or a lexically constraining condition, in which onscreen lexical information required them to re-interpret their interlocutor’s /ɪ/ pronunciations as representing /ε/. A phonetic categorization pre-test and post-test were used to assess whether the game shifted listeners’ phonemic boundaries such that more of the /ε/-/ɪ/ continuum came to be perceived as /ε/. Both listener groups showed an overall post-test shift toward /ɪ/, suggesting that vowel perception may be sensitive to directional biases related to properties of the speaker’s vowel space. Importantly, listeners in the lexically constraining condition made relatively more post-test /ε/ responses than the control group, thereby exhibiting an effect of lexically guided adaptation. The results thus demonstrate that non-native listeners can adjust their phonemic boundaries on the basis of lexical information to accommodate a vowel shift learned in interactive conversation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shadi Pirhosseinloo|AUTHOR Shadi Pirhosseinloo]], [[Jonathan S. Brumberg|AUTHOR Jonathan S. Brumberg]]
</p><p class="cpabstractcardaffiliationlist">University of Kansas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3143–3147&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we propose a novel dilated convolutional neural network for enhancing speech in noisy and reverberant environments. The proposed model incorporates dilated convolutions for tracking a target speaker through context aggregations, skip connections, and residual learning for mapping-based monaural speech enhancement. The performance of our model was evaluated in a variety of simulated environments having different reverberation times and quantified using two objective measures. Experimental results show that the proposed model outperforms a long short-term memory (LSTM), a gated residual network (GRN) and convolutional recurrent network (CRN) model in terms of objective speech intelligibility and speech quality in noisy and reverberant environments. Compared to LSTM, CRN and GRN, our method has improved generalization to untrained speakers and noise, and has fewer training parameters resulting in greater computational efficiency.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xianyun Wang|AUTHOR Xianyun Wang]], [[Changchun Bao|AUTHOR Changchun Bao]]
</p><p class="cpabstractcardaffiliationlist">Beijing University of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3188–3192&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural network (DNN) has become a popular means for separating target speech from noisy speech due to its good performance for learning a mapping relationship between the training target and noisy speech. For the DNN-based methods, the time-frequency (T-F) mask commonly used as the training target has a significant impact on the performance of speech restoration. However, the T-F mask generally modifies magnitude spectrum of noisy speech and leaves phase spectrum unchanged in enhancing process. The recent studies have revealed that incorporating phase spectrum information into the T-F mask can effectively improve perceptual quality of the enhanced speech. So, in this paper, we present two T-F masks to simultaneously enhance magnitude and phase of speech spectrum based on non-correlation assumption of real part and imaginary part about speech spectrum, and use them as the training target of the DNN model. Experimental results show that, in comparison with the reference methods, the proposed method can obtain an effective improvement in speech quality for different signal to noise ratio (SNR) conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jorge Llombart|AUTHOR Jorge Llombart]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Luis Vicente|AUTHOR Luis Vicente]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]
</p><p class="cpabstractcardaffiliationlist">Universidad de Zaragoza, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3193–3197&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper studies the Speech Enhancement based on Deep Neural Networks. The proposed architecture gradually follows the signal transformation during enhancement by means of a visualization probe at each network block. Alongside the process, the enhancement performance is visually inspected and evaluated in terms of regression cost. This progressive scheme is based on Residual Networks. During the process, we investigate a residual connection with a constant number of channels, including internal state between blocks, and adding progressive supervision. The insights provided by the interpretation of the network enhancement process leads us to design an improved architecture for the enhancement purpose. Following this strategy, we are able to obtain speech enhancement results beyond the state-of-the-art, achieving a favorable trade-off between dereverberation and the amount of spectral distortion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chien-Feng Liao|AUTHOR Chien-Feng Liao]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^, [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]^^2^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica, Taiwan; ^^2^^National Taiwan University, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3148–3152&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we propose a novel noise adaptive speech enhancement (SE) system, which employs a domain adversarial training (DAT) approach to tackle the issue of a noise type mismatch between the training and testing conditions. Such a mismatch is a critical problem in deep-learning-based SE systems. A large mismatch may cause a serious performance degradation to the SE performance. Because we generally use a well-trained SE system to handle various unseen noise types, a noise type mismatch commonly occurs in real-world scenarios. The proposed noise adaptive SE system contains an encoder-decoder-based enhancement model and a domain discriminator model. During adaptation, the DAT approach encourages the encoder to produce noise-invariant features based on the information from the discriminator model and consequentially increases the robustness of the enhancement model to unseen noise types. Herein, we regard stationary noises as the source domain (with the ground truth of clean speech) and non-stationary noises as the target domain (without the ground truth). We evaluated the proposed system on TIMIT sentences. The experiment results show that the proposed noise adaptive SE system successfully provides significant improvements in PESQ (19.0%), SSNR (39.3%), and STOI (27.0%) over the SE system without an adaptation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meng Ge|AUTHOR Meng Ge]]^^1^^, [[Longbiao Wang|AUTHOR Longbiao Wang]]^^1^^, [[Nan Li|AUTHOR Nan Li]]^^1^^, [[Hao Shi|AUTHOR Hao Shi]]^^1^^, [[Jianwu Dang|AUTHOR Jianwu Dang]]^^1^^, [[Xiangang Li|AUTHOR Xiangang Li]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Tianjin University, China; ^^2^^DiDi Chuxing, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3153–3157&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement aims to keep the real speech signal and reduce noise for building robust communication systems. Under the success of DNN, significant progress has been made. Nevertheless, accuracy of the speech enhancement system is not satisfactory due to insufficient consideration of varied environmental and contextual information in complex cases. To address these problems, this research proposes an end-to-end environment-dependent attention-driven approach. The local frequency-temporal pattern via convolutional neural network is fully employed without pooling operation. It then integrates an attention mechanism into bidirectional long short-term memory to acquire the weighted dynamic context between consecutive frames. Furthermore, dynamic environment estimation and phase correction further improve the generalization ability. Extensive experimental results on REVERB challenge demonstrated that the proposed approach outperformed existing methods, improving PESQ from 2.56 to 2.87 and SRMR from 4.95 to 5.50 compared with conventional DNN.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Manuel Pariente|AUTHOR Manuel Pariente]], [[Antoine Deleforge|AUTHOR Antoine Deleforge]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]
</p><p class="cpabstractcardaffiliationlist">Loria (UMR 7503), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3158–3162&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recent studies have explored the use of deep generative models of speech spectra based on variational autoencoders (VAEs), combined with unsupervised noise models, to perform speech enhancement. These studies developed iterative algorithms involving either Gibbs sampling or gradient descent at each step, making them computationally expensive. This paper proposes a variational inference method to iteratively estimate the power spectrogram of the clean speech. Our main contribution is the analytical derivation of the variational steps in which the encoder of the pre-learned VAE can be used to estimate the variational approximation of the true posterior distribution, using the very same assumption made to train VAEs. Experiments show that the proposed method produces results on par with the aforementioned iterative methods using sampling, while decreasing the computational cost by a factor 36 to reach a given performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ju Lin|AUTHOR Ju Lin]]^^1^^, [[Sufeng Niu|AUTHOR Sufeng Niu]]^^2^^, [[Zice Wei|AUTHOR Zice Wei]]^^1^^, [[Xiang Lan|AUTHOR Xiang Lan]]^^1^^, [[Adriaan J. van Wijngaarden|AUTHOR Adriaan J. van Wijngaarden]]^^3^^, [[Melissa C. Smith|AUTHOR Melissa C. Smith]]^^1^^, [[Kuang-Ching Wang|AUTHOR Kuang-Ching Wang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Clemson University, USA; ^^2^^LinkedIn, USA; ^^3^^Nokia Bell Labs, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3163–3167&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech enhancement techniques that use a generative adversarial network (GAN) can effectively suppress noise while allowing models to be trained end-to-end. However, such techniques directly operate on time-domain waveforms, which are often highly-dimensional and require extensive computation. This paper proposes a novel GAN-based speech enhancement method, referred to as S-ForkGAN, that operates on log-power spectra rather than on time-domain speech waveforms, and uses a forked GAN structure to extract both speech and noise information. By operating on log-power spectra, one can seamlessly include conventional spectral subtraction techniques, and the parameter space typically has a lower dimension. The performance of S-ForkGAN is assessed for automatic speech recognition (ASR) using the TIMIT data set and a wide range of noise conditions. It is shown that S-ForkGAN outperforms existing GAN-based techniques and that it has a lower complexity.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ryandhimas E. Zezario|AUTHOR Ryandhimas E. Zezario]]^^1^^, [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]]^^1^^, [[Xugang Lu|AUTHOR Xugang Lu]]^^2^^, [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Academia Sinica, Taiwan; ^^2^^NICT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3168–3172&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous studies have shown that a specialized speech enhancement model can outperform a general model when the test condition is matched to the training condition. Therefore, choosing the correct (matched) candidate model from a set of ensemble models is critical to achieve generalizability. Although the best decision criterion should be based directly on the evaluation metric, the need for a clean reference makes it impractical for employment. In this paper, we propose a novel specialized speech enhancement model selection (SSEMS) approach that applies a non-intrusive quality estimation model, termed Quality-Net, to solve this problem. Experimental results first confirm the effectiveness of the proposed SSEMS approach. Moreover, we observe that the correctness of Quality-Net in choosing the most suitable model increases as input noisy SNR increases, and thus the results of the proposed systems outperform another auto-encoder-based model selection and a general model, particularly under high SNR conditions.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Fu-Kai Chuang|AUTHOR Fu-Kai Chuang]]^^1^^, [[Syu-Siang Wang|AUTHOR Syu-Siang Wang]]^^2^^, [[Jeih-weih Hung|AUTHOR Jeih-weih Hung]]^^3^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^4^^, [[Shih-Hau Fang|AUTHOR Shih-Hau Fang]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Yuan Ze University, Taiwan; ^^2^^Yuan Ze University, Taiwan; ^^3^^National Chi Nan University, Taiwan; ^^4^^Academia Sinica, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3173–3177&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous studies indicate that noise and speaker variations can degrade the performance of deep-learning-based speech-enhancement systems. To increase the system performance over environmental variations, we propose a novel speaker-aware system that integrates a deep denoising autoencoder (DDAE) with an embedded speaker identity. The overall system first extracts embedded speaker identity features using a neural network model; then the DDAE takes the augmented features as input to generate enhanced spectra. With the additional embedded features, the speech-enhancement system can be guided to generate the optimal output corresponding to the speaker identity. We tested the proposed speech-enhancement system on the TIMIT dataset. Experimental results showed that the proposed speech-enhancement system could improve the sound quality and intelligibility of speech signals from additive noise-corrupted utterances. In addition, the results suggested system robustness for unseen speakers when combined with speaker features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yun Liu|AUTHOR Yun Liu]]^^1^^, [[Hui Zhang|AUTHOR Hui Zhang]]^^1^^, [[Xueliang Zhang|AUTHOR Xueliang Zhang]]^^1^^, [[Yuhang Cao|AUTHOR Yuhang Cao]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Inner Mongolia University, China; ^^2^^Unisound, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3178–3182&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech separation aims to improve the speech quality of noisy speech. Deep learning based speech separation methods usually use mean square error (MSE) as the cost function, which measures the distance between model output and training target. However, the MSE does not match the evaluation metrics perfectly. Optimizing the MSE does not directly lead to improvement in the commonly used metrics, such as short-time objective intelligibility (STOI), perceptual evaluation of speech quality (PESQ), signal-to-noise ratio (SNR) and source-to-distortion ratio (SDR). In this study, we inspect some other cost function candidates which are based on divergence, e.g., Kullback-Leibler and Itakura-Saito divergence. A conjecture about the correlation between cost function and evaluation metrics is proposed and examined to explain why these cost functions behave differently. On the basis of the proposed conjecture, the optimal cost function candidate is selected. The experimental results validate our conjecture.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ziqiang Shi|AUTHOR Ziqiang Shi]]^^1^^, [[Huibin Lin|AUTHOR Huibin Lin]]^^1^^, [[Liu Liu|AUTHOR Liu Liu]]^^1^^, [[Rujie Liu|AUTHOR Rujie Liu]]^^1^^, [[Jiqing Han|AUTHOR Jiqing Han]]^^2^^, [[Anyan Shi|AUTHOR Anyan Shi]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fujitsu, China; ^^2^^Harbin Institute of Technology, China; ^^3^^ShuangFeng First, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3183–3187&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Monaural speech separation techniques are far from satisfactory and are a challenging task due to interference from multiple sources. Recently the deep dilated temporal convolutional networks (TCN) has proven to be very effective in sequence modeling. This work explores how to extend TCN to result a new, state-of-the-art monaural speech separation method. First, a new gating mechanism is introduced and added to generate a gated TCN. The gated activation controls the flow of information. Further in order to combine multiple training models to reduce the performance variance and improve the effect of speech separation, we propose to use the principle of ensemble learning in the gated TCN architecture by replacing the convolutional modules corresponding to each dilated factor with multiple identical branches of the convolutional components. For the sake of objectivity, we propose to train the network by directly optimizing in a permutation invariant training (PIT) style of the utterance level signal-to-distortion ratio (SDR). Our experiments with the public WSJ0-2mix data corpus resulted in an 18.2 dB improvement in SDR, indicating that our proposed network can improve the performance of speaker separation tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Langzhou Chen|AUTHOR Langzhou Chen]]^^1^^, [[Volker Leutnant|AUTHOR Volker Leutnant]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, UK; ^^2^^Amazon, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3198–3202&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work aims at bootstrapping acoustic model training for automatic speech recognition with small amounts of human-labeled speech data and large amounts of machine-labeled speech data. Semi-supervised learning is investigated to select the machine-transcribed training samples. Two semi-supervised learning methods are proposed: one is the local-global uncertainty based method which introduces both the local uncertainty from the current utterance and the global uncertainty from the whole data pool into the data selection; the other is the margin based data selection, which selects the utterances near to the decision boundary through language model tuning. The experimental results based on a Japanese far-field automatic speech recognition system indicate that the acoustic model trained by automatically transcribed speech data achieve about 17% relative gain when in-domain human annotated data was not available for initialization. While 3.7% relative gain was obtained when the initial acoustic model was trained by small amount of in-domain data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[David Harwath|AUTHOR David Harwath]], [[James Glass|AUTHOR James Glass]]
</p><p class="cpabstractcardaffiliationlist">MIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3242–3246&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Transfer learning aims to reduce the amount of data required to excel at a new task by re-using the knowledge acquired from learning other related tasks. This paper proposes a novel transfer learning scenario, which distills robust phonetic features from grounding models that are trained to tell whether a pair of image and speech are semantically correlated, without using any textual transcripts. As semantics of speech are largely determined by its lexical content, grounding models learn to preserve phonetic information while disregarding uncorrelated factors, such as speaker and channel. To study the properties of features distilled from different layers, we use them as input separately to train multiple speech recognition models. Empirical results demonstrate that layers closer to input retain more phonetic information, while following layers exhibit greater invariance to domain shift. Moreover, while most previous studies include training data for speech recognition for feature extractor training, our grounding models are not trained on any of those data, indicating more universal applicability to new domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gautam Mantena|AUTHOR Gautam Mantena]], [[Ozlem Kalinli|AUTHOR Ozlem Kalinli]], [[Ossama Abdel-Hamid|AUTHOR Ossama Abdel-Hamid]], [[Don McAllaster|AUTHOR Don McAllaster]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3203–3207&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we tackle the problem of handling narrowband and wideband speech by building a single acoustic model (AM), also called mixed bandwidth AM. In the proposed approach, an auxiliary input feature is used to provide the bandwidth information to the model, and bandwidth embeddings are jointly learned as part of acoustic model training. Experimental evaluations show that using bandwidth embeddings helps the model to handle the variability of the narrow and wideband speech, and makes it possible to train a mixed-bandwidth AM. Furthermore, we propose to use parallel convolutional layers to handle the mismatch between the narrow and wideband speech better, where separate convolution layers are used for each type of input speech signal. Our best system achieves 13% relative improvement on narrowband speech, while not degrading on wideband speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shreya Khare|AUTHOR Shreya Khare]]^^1^^, [[Rahul Aralikatte|AUTHOR Rahul Aralikatte]]^^2^^, [[Senthil Mani|AUTHOR Senthil Mani]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IBM, India; ^^2^^University of Copenhagen, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3208–3212&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Fooling deep neural networks with adversarial input have exposed a significant vulnerability in the current state-of-the-art systems in multiple domains. Both black-box and white-box approaches have been used to either replicate the model itself or to craft examples which cause the model to fail. In this work, we propose a framework which uses multi-objective evolutionary optimization to perform both targeted and un-targeted black-box attacks on Automatic Speech Recognition (ASR) systems. We apply this framework on two ASR systems: Deepspeech and Kaldi-ASR, which increases the Word Error Rates (WER) of these systems by upto 980%, indicating the potency of our approach. During both un-targeted and targeted attacks, the adversarial samples maintain a high acoustic similarity of 0.98 and 0.97 with the original audio.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bilal Soomro|AUTHOR Bilal Soomro]], [[Anssi Kanervisto|AUTHOR Anssi Kanervisto]], [[Trung Ngo Trong|AUTHOR Trung Ngo Trong]], [[Ville Hautamäki|AUTHOR Ville Hautamäki]]
</p><p class="cpabstractcardaffiliationlist">University of Eastern Finland, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3213–3217&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Deep neural networks (DNN) are able to successfully process and classify speech utterances. However, understanding the reason behind a classification by DNN is difficult. One such debugging method used with image classification DNNs is  activation maximization, which generates example-images that are classified as one of the classes. In this work, we evaluate applicability of this method to speech utterance classifiers as the means to understanding what DNN “listens to”. We trained a classifier using the speech command corpus and then use activation maximization to pull samples from the trained model. Then we synthesize audio from features using WaveNet vocoder for subjective analysis. We measure the quality of generated samples by objective measurements and crowd-sourced human evaluations. Results show that when combined with the prior of natural speech, activation maximization can be used to generate examples of different classes. Based on these results, activation maximization can be used to start opening up the DNN black-box in speech tasks.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haisong Ding|AUTHOR Haisong Ding]]^^1^^, [[Kai Chen|AUTHOR Kai Chen]]^^2^^, [[Qiang Huo|AUTHOR Qiang Huo]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Microsoft, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3218–3222&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Knowledge distillation (KD) has been widely used for model compression by learning a simpler student model to imitate the outputs or intermediate representations of a more complex teacher model. The most commonly used KD technique is to minimize a Kullback-Leibler divergence between the output distributions of the teacher and student models. When it is applied to compressing CTC-trained acoustic models, an assumption is made that the teacher and student share the same frame-wise feature-transcription alignment, which is usually not true due to the topology difference of the teacher and student models. In this paper, by making more appropriate assumptions, we propose two KD methods, namely dynamic frame-wise distillation and segment-wise N-best hypotheses imitation. Experimental results on Switchboard-I speech recognition task show that the segment-wise N-best hypotheses imitation outperforms the frame-level and other sequence-level distillation methods, and achieves a relative word error rate reduction of 5%–8% compared with models trained from scratch.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Iván López-Espejo|AUTHOR Iván López-Espejo]], [[Zheng-Hua Tan|AUTHOR Zheng-Hua Tan]], [[Jesper Jensen|AUTHOR Jesper Jensen]]
</p><p class="cpabstractcardaffiliationlist">Aalborg University, Denmark</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3223–3227&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword spotting (KWS) is experiencing an upswing due to the pervasiveness of small electronic devices that allow interaction with them via speech. Often, KWS systems are speaker-independent, which means that any person — user or not — might trigger them. For applications like KWS for hearing assistive devices this is unacceptable, as only the user must be allowed to handle them. In this paper we propose KWS for hearing assistive devices that is robust to external speakers. A state-of-the-art deep residual network for small-footprint KWS is regarded as a basis to build upon. By following a multi-task learning scheme, this system is extended to jointly perform KWS and users’ own-voice/external speaker detection with a negligible increase in the number of parameters. For experiments, we generate from the Google Speech Commands Dataset a speech corpus emulating hearing aids as a capturing device. Our results show that this multi-task deep residual network is able to achieve a KWS accuracy relative improvement of around 32% with respect to a system that does not deal with external speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mortaza Doulaty|AUTHOR Mortaza Doulaty]]^^1^^, [[Thomas Hain|AUTHOR Thomas Hain]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Microsoft, Germany; ^^2^^University of Sheffield, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3228–3232&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Selecting in-domain data from a large pool of diverse and out-of-domain data is a non-trivial problem. In most cases simply using all of the available data will lead to sub-optimal and in some cases even worse performance compared to carefully selecting a matching set. This is true even for data-inefficient neural models. Acoustic Latent Dirichlet Allocation (aLDA) is shown to be useful in a variety of speech technology related tasks, including domain adaptation of acoustic models for automatic speech recognition and entity labeling for information retrieval. In this paper we propose to use aLDA as a data similarity criterion in a data selection framework. Given a large pool of out-of-domain and potentially mismatched data, the task is to select the best-matching training data to a set of representative utterances sampled from a target domain. Our target data consists of around 32 hours of meeting data (both far-field and close-talk) and the pool contains 2k hours of meeting, talks, voice search, dictation, command-and-control, audio books, lectures, generic media and telephony speech data. The proposed technique for training data selection, significantly outperforms random selection, posterior-based selection as well as using all of the available data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wenjie Li|AUTHOR Wenjie Li]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]
</p><p class="cpabstractcardaffiliationlist">Chinese Academy of Sciences, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3233–3237&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>It is very challenging to do multi-talker automatic speech recognition (ASR). Some speaker-aware selective methods have been proposed to recover the speech of the target speaker, relying on the auxiliary speaker information provided by an anchor (a clean audio sample of the target speaker). But the performance is unstable depending on the quality of the provided anchors. To address this limitation, we propose to take advantage of the average speaker embeddings to build the target speaker recovery network (TRnet). The TRnet takes the mixed speech and the stable average speaker embeddings to produce the TF masks for the target speech. During training of the TRnet, we summarize the speaker embeddings on the whole training dataset for each speaker, instead of extracting on a randomly picked anchor. On the testing stage, one or very few anchors are enough to get decent recovery results. The results of the TRnet trained with average speaker embeddings show 13% and 12.5% relative improvements on WER and SDR, compared with the short-anchor trained model. Moreover, to mitigate the mismatch between the TRnet and the acoustic model (AM), we adopted two strategies: fine-tuning the AM and training an global TRnet. Both of them bring considerable reductions on WER. The results show that the global trained framework gets superior performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Motoyuki Suzuki|AUTHOR Motoyuki Suzuki]], [[Sho Tomita|AUTHOR Sho Tomita]], [[Tomoki Morita|AUTHOR Tomoki Morita]]
</p><p class="cpabstractcardaffiliationlist">Osaka Institute of Technology, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3238–3241&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Lyrics recognition from singing voice is one of the most important techniques for query-by-singing music information retrieval systems. Lyrics information realizes a higher retrieval performance than retrieval using only melody information.

However, recognizing a song lyrics from singing voice is very difficult. In order to improve recognition, a new method focused on correspondence between voice and notes has been proposed. Note boundary scores are calculated for each frame, and these values are included in feature vectors by expanding their dimensions. The marker HMM is defined to correspond to feature vectors located at note boundaries, and the marker HMM is inserted among all morae in a pronunciation dictionary. As a result, the recognizer restricts an individual mora to correspond to only one note.

We also modified the marker HMM in order to account for short pauses in a particular position. A short pause corresponding to a musical rest or breath may occur after any morae, even if inside a word. The short pause HMM is concatenated to the marker HMM, and a skip transition arc of the short pause HMM is also introduced.

From experimental results, the proposed model provided higher word accuracy than the baseline model. It improved word accuracy from 85.71% to 93.18%, which means that 52.3% of the word error rate decreased. Insertion errors, especially, were drastically suppressed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hui Luo|AUTHOR Hui Luo]], [[Jiqing Han|AUTHOR Jiqing Han]]
</p><p class="cpabstractcardaffiliationlist">Harbin Institute of Technology, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3247–3251&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper focuses on a cross-corpus speech emotion recognition (SER) task, in which there are some mismatches between the training corpus and the testing corpus. Meanwhile, the label information of the training corpus is known, while the label information of the testing corpus is entirely unknown. To alleviate the influence of these mismatches on the recognition system under this setting, we present a non-negative matrix factorization (NMF) based cross-corpus speech emotion recognition method, called semi-supervised adaptation regularized transfer NMF (SATNMF). The core idea of SATNMF is to incorporate the label information of training corpus into NMF, and seek a latent low-rank feature space, in which the marginal and conditional distribution differences between the two corpora can be minimized simultaneously. Specifically, in this induced feature space, the maximum mean discrepancy (MMD) criterion is used to measure the discrepancies of not only two corpora, but also each class within the two corpora. Moreover, to further exploit the knowledge of the marginal distributions, their underlying manifold structure is considered by using the manifold regularization. Experiments on four popular emotional corpora show that the proposed method achieves better recognition accuracies than state-of-the-art methods.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yi Zhao|AUTHOR Yi Zhao]]^^1^^, [[Atsushi Ando|AUTHOR Atsushi Ando]]^^2^^, [[Shinji Takaki|AUTHOR Shinji Takaki]]^^1^^, [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]^^1^^, [[Satoshi Kobashikawa|AUTHOR Satoshi Kobashikawa]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NII, Japan; ^^2^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3292–3296&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speakers usually adjust their way of talking in noisy environments involuntarily for effective communication. This adaptation is known as the Lombard effect. Although speech accompanying the Lombard effect can improve the intelligibility of a speaker’s voice, the changes in acoustic features (e.g. fundamental frequency, speech intensity, and spectral tilt) caused by the Lombard effect may also affect the listener’s judgment of emotional content. To the best of our knowledge, there is no published study on the influence of the Lombard effect in emotional speech. Therefore, we recorded parallel emotional speech waveforms uttered by 12 speakers under both quiet and noisy conditions in a professional recording studio in order to explore how the Lombard effect interacts with emotional speech. By analyzing confusion matrices and acoustic features, we aim to answer the following questions: 1) Can speakers express their emotions correctly even under adverse conditions? 2) Can listeners recognize the emotion contained in speech signals even under noise? 3) How does emotional speech uttered in noise differ from emotional speech uttered in quiet conditions in terms of acoustic characteristic?</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Soumaya Gharsellaoui|AUTHOR Soumaya Gharsellaoui]], [[Sid Ahmed Selouani|AUTHOR Sid Ahmed Selouani]], [[Mohammed Sidi Yakoub|AUTHOR Mohammed Sidi Yakoub]]
</p><p class="cpabstractcardaffiliationlist">Université de Moncton, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3297–3301&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, an evolutionary algorithm is used to select an optimal set of acoustic features for emotional speech recognition. A new algorithm that combines differential evolution (DE) optimization and linear discriminant analysis (LDA) is proposed to design an effective feature selection and classification model. An original acoustic feature framework based on auditory modeling is also presented. The auditory-based features are provided as inputs to the DE-LDA based emotional speech recognition system. To evaluate the effectiveness of the DE-LDA approach, a subset of the Emotion Prosody Speech and Transcript corpus covering five emotional states (happiness, anger, panic, sadness, and interest) is used throughout the experiments. The results show that the proposed DE-LDA model performs significantly better than the baseline systems. It achieves a classification rate of 91.6% using only 50 input parameters that are optimally selected from 128 original acoustic features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Saurabh Sahu|AUTHOR Saurabh Sahu]], [[Vikramjit Mitra|AUTHOR Vikramjit Mitra]], [[Nadee Seneviratne|AUTHOR Nadee Seneviratne]], [[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]
</p><p class="cpabstractcardaffiliationlist">University of Maryland at College Park, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3302–3306&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we plan to leverage multi-modal learning and automated speech recognition (ASR) systems toward building a speech-only emotion recognition model. Previous studies have shown that emotion recognition models using only acoustic features do not perform satisfactorily in detecting valence level. Text analysis has been shown to be helpful for sentiment classification. We compared classification accuracies obtained from an audio-only model, a text-only model and a multi-modal system leveraging both by performing a cross-validation analysis on IEMOCAP dataset. Confusion matrices show it’s the valence level detection that is being improved by incorporating textual information. In the second stage of experiments, we used two ASR application programming interfaces (APIs) to get the transcriptions. We compare the performances of multi-modal systems using the ASR transcriptions with each other and with that of one using ground truth transcription. We analyze the confusion matrices to determine the effect of using ASR transcriptions instead of ground truth ones on class-wise accuracies. We investigate the generalisability of such a model by performing a cross-corpus study.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Aniruddha Tammewar|AUTHOR Aniruddha Tammewar]]^^1^^, [[Alessandra Cervone|AUTHOR Alessandra Cervone]]^^1^^, [[Eva-Maria Messner|AUTHOR Eva-Maria Messner]]^^2^^, [[Giuseppe Riccardi|AUTHOR Giuseppe Riccardi]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Trento, Italy; ^^2^^Universität Ulm, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3252–3256&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automated prediction of valence, one key feature of a person’s emotional state, from individuals’ personal narratives may provide crucial information for mental healthcare (e.g. early diagnosis of mental diseases, supervision of disease course, etc.). In the Interspeech 2018 ComParE Self-Assessed Affect challenge, the task of valence prediction was framed as a three-class classification problem using 8 seconds fragments from individuals’ narratives. As such, the task did not allow for exploring contextual information of the narratives. In this work, we investigate the intrinsic information from multiple narratives recounted by the same individual in order to predict their current state-of-mind. Furthermore, with generalizability in mind, we decided to focus our experiments exclusively on textual information as the public availability of audio narratives is limited compared to text. Our hypothesis is that context modeling might provide insights about emotion triggering concepts (e.g. events, people, places) mentioned in the narratives that are linked to an individual’s state of mind. We explore multiple machine learning techniques to model narratives. We find that the models are able to capture inter-individual differences, leading to more accurate predictions of an individual’s emotional state, as compared to single narratives.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rupayan Chakraborty|AUTHOR Rupayan Chakraborty]], [[Ashish Panda|AUTHOR Ashish Panda]], [[Meghna Pandharipande|AUTHOR Meghna Pandharipande]], [[Sonal Joshi|AUTHOR Sonal Joshi]], [[Sunil Kumar Kopparapu|AUTHOR Sunil Kumar Kopparapu]]
</p><p class="cpabstractcardaffiliationlist">TCS Innovation Labs Mumbai, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3257–3261&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Front-end processing is one of the ways to impart noise robustness to speech emotion recognition systems in mismatched scenarios. Here, we implement and compare different frontend robustness techniques for their efficacy in speech emotion recognition. First, we use a feature compensation technique based on the Vector Taylor Series (VTS) expansion of noisy Mel-Frequency Cepstral Coefficients (MFCCs). Next, we improve upon the feature compensation technique by using the VTS expansion with auditory masking formulation. We have also looked into the applicability of 10^^th^^-root compression in MFCC computation. Further, a Time Delay Neural Network based Denoising Autoencoder (TDNN-DAE) is implemented to estimate the clean MFCCs from the noisy MFCCs. These techniques have not been investigated yet for their suitability to robust speech emotion recognition task. The performance of these front-end techniques are compared with the Non-Negative Matrix Factorization (NMF) based front-end. Relying on extensive experiments done on two standard databases (EmoDB and IEMOCAP), contaminated with 5 types of noise, we show that these techniques provide significant performance gain in emotion recognition task. We also show that along with front-end compensation, applying feature selection to non-MFCC high-level descriptors results in better performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xingfeng Li|AUTHOR Xingfeng Li]], [[Masato Akagi|AUTHOR Masato Akagi]]
</p><p class="cpabstractcardaffiliationlist">JAIST, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3262–3266&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The multi-layered perceptual process of emotion in human speech plays an essential role in the field of affective computing for underlying a speaker’s state. However, a comprehensive process analysis of emotion perception is still challenging due to the lack of powerful acoustic features allowing accurate inference of emotion across speaker and language diversities. Most previous research works study acoustic features mostly using Fourier transform, short time Fourier transform or linear predictive coding. Even though these features may be useful for stationary signal within short frames, they may not capture the localized event adequately as speech transmits emotion information dynamically over time. This case introduces a set of acoustic features via wavelet transform analysis of the speech signal, and specifically, models the perceptual process of emotion for language diversity. For this aim, the proposed features are analyzed in a three-layer emotion perception model across multiple languages. Experiments show that the proposed acoustic features significantly enhance the perceptual process of emotion and render a better result in multilingual emotion recognition when compared it to the widely used prosodic and spectral features, as well as their combination in literature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rajeev Rajan|AUTHOR Rajeev Rajan]], [[Haritha U. G.|AUTHOR Haritha U. G.]], [[Sujitha A. C.|AUTHOR Sujitha A. C.]], [[Rejisha T. M.|AUTHOR Rejisha T. M.]]
</p><p class="cpabstractcardaffiliationlist">College of Engineering Trivandrum, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3267–3271&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the design, the development of a new multilingual emotional speech corpus, TaMaR- EmoDB (Tamil Malayalam Ravula - Emotion DataBase) and its evaluation using a deep neural network (DNN)-baseline system. The corpus consists of utterances from three languages, namely, Malayalam, Tamil and Ravula, a tribal language. The database consists of short speech utterances in four emotions - anger, anxiety, happiness, and sadness, along with neutral utterances. The subset of the corpus is first evaluated using a perception test, in order to understand how well the emotional state in emotional speech is identified by humans. Later, machine testing is performed using the fusion of spectral and prosodic features with DNN framework. During the classification phase, the system reports an average precision of 0.78, 0.60, 0.61 and recall of 0.84, 0.61 and 0.53 for Malayalam, Tamil, and Ravula, respectively. This database can potentially be used as a new linguistic resource that will enable future research in speech emotion detection, corpus-based prosody analysis, and speech synthesis.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kusha Sridhar|AUTHOR Kusha Sridhar]], [[Carlos Busso|AUTHOR Carlos Busso]]
</p><p class="cpabstractcardaffiliationlist">University of Texas at Dallas, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3272–3276&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p> Speech emotion recognition (SER) for categorical descriptors is a difficult task when the recordings come from everyday spontaneous interactions. The boundaries between emotional classes are less clear, resulting in complex, mixed emotions. Since the performance of a SER system varies across speech recordings, it is important to understand the reliability associated with its prediction. An intriguing formulation in machine learning related to this problem is the reject option, where a classifier only provides predictions over samples with reliability above a given threshold. This paper proposes a classification technique with a reject option using  deep neural networks (DNNs) that increases its performance by selectively trading its coverage in the testing set. We use two different criteria to develop a SER system with a reject option, where it can accept or reject a sample as needed. Using the MSP-Podcast corpus, we evaluate this idea by comparing different classification performance as a function of coverage. By selectively defining a coverage of 75% of the samples, we obtain relative gains in F1-score of up to 25.71% for a five-class problem and 20.63% for an eight-class problem. The sentences that are rejected are analyzed in the evaluation, confirming that they have lower inter-evaluator agreement.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhenghao Jin|AUTHOR Zhenghao Jin]], [[Houwei Cao|AUTHOR Houwei Cao]]
</p><p class="cpabstractcardaffiliationlist">New York Institute of Technology, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3277–3281&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In emotion datasets, intended emotion labels and perceived emotion labels both contain valuable information about how human express and perceive emotions, and there is a considerable mismatch between the two. In this paper, we propose a novel method to derive relative labels for preference learning using both the intended labels during emotion expression and the perceived labels given by all raters during perceptual evaluation. Based on analyzing the agreement between the intended and perceived labels, as well as the consistence among all perceptual ratings, we propose three pairwise ranking rules to generate multi-scale relevant scores for preference learning. We further build three sets of rankers for six basic emotions based on the three ranking rules. Through evaluation on the CREMA-D database, we demonstrate that, by considering both intended and perceived labels, our proposed rankers significantly outperform the rankers only relying on the perceptual ratings. We further combine the ranking scores of individual emotions for multi-class classification. Through experiments, we show that the emotion classification systems with ranking information significantly outperform the conventional SVM classifiers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[John Gideon|AUTHOR John Gideon]]^^1^^, [[Heather T. Schatten|AUTHOR Heather T. Schatten]]^^2^^, [[Melvin G. McInnis|AUTHOR Melvin G. McInnis]]^^1^^, [[Emily Mower Provost|AUTHOR Emily Mower Provost]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Michigan, USA; ^^2^^Brown University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3282–3286&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Suicide is a serious public health concern in the U.S., taking the lives of over 47,000 people in 2017. Early detection of suicidal ideation is key to prevention. One promising approach to symptom monitoring is suicidal speech prediction, as speech can be passively collected and may indicate changes in risk. However, directly identifying suicidal speech is difficult, as characteristics of speech can vary rapidly compared with suicidal thoughts. Suicidal ideation is also associated with emotion dysregulation. Therefore, in this work, we focus on the detection of emotion from speech and its relation to suicide. We introduce the Ecological Measurement of Affect, Speech, and Suicide (EMASS) dataset, which contains phone call recordings of individuals recently discharged from the hospital following admission for suicidal ideation or behavior, along with controls. Participants self-report their emotion periodically throughout the study. However, the dataset is relatively small and has uncertain labels. Because of this, we find that most features traditionally used for emotion classification fail. We demonstrate how outside emotion datasets can be used to generate more relevant features, making this analysis possible. Finally, we use emotion predictions to differentiate healthy controls from those with suicidal ideation, providing evidence for suicidal speech detection using emotion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Deniece S. Nazareth|AUTHOR Deniece S. Nazareth]]^^1^^, [[Ellen Tournier|AUTHOR Ellen Tournier]]^^2^^, [[Sarah Leimkötter|AUTHOR Sarah Leimkötter]]^^1^^, [[Esther Janse|AUTHOR Esther Janse]]^^2^^, [[Dirk Heylen|AUTHOR Dirk Heylen]]^^1^^, [[Gerben J. Westerhof|AUTHOR Gerben J. Westerhof]]^^1^^, [[Khiet P. Truong|AUTHOR Khiet P. Truong]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universiteit Twente, The Netherlands; ^^2^^Radboud Universiteit Nijmegen, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3287–3291&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Analyzing emotional valence in spontaneous speech remains complex and challenging. We present an acoustic and lexical analysis of emotional valence in spontaneous speech of older adults. Data was collected by recalling autobiographical memories through a word association task. Due to the complex and personal nature of memories, we propose a novel coding scheme for emotional valence. We explore acoustic properties of speech as well as the use of affective words to predict emotional valence expressed in autobiographical memories. Using mixed-effect regression modelling, we compared predictive models based on acoustic information only, lexical information only, or a combination of both. Results show that the combined model accounts for the highest proportion of explained variance, with the acoustic features accounting for a smaller share of the total variance than the lexical features. Several acoustic and lexical features predicted valence. As a first attempt at analyzing spontaneous emotional speech in older adults autobiographical memories, the study provides more insight in which acoustic features can be used to predict valence (automatically) in a more ecologically valid setting.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Laura Spinu|AUTHOR Laura Spinu]]^^1^^, [[Maida Percival|AUTHOR Maida Percival]]^^2^^, [[Alexei Kochetov|AUTHOR Alexei Kochetov]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUNY Kingsborough Community College, USA; ^^2^^University of Toronto, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3307–3311&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study explores the articulatory characteristics of plain and palatalized fricatives in Romanian. Based on earlier acoustic findings, we hypothesize that there are differences in tongue raising and fronting depending on the primary place of articulation, with more subtle gestures produced in the vicinity of the palatal area. We also predict more individual variation in the realization of secondary palatalization in postalveolars, based on general cross-linguistic patterns.

Ten native speakers participated in an ultrasound experiment. The stimuli included real words containing labial, dental, and postalveolar fricatives. The fricatives at all three places were either plain or palatalized word-finally (the only position available for secondary palatalization in this language). Tongue contours at the consonant midpoint were compared using Smoothing Spline ANOVAs individually with radius distance from the ultrasound probe.

The findings indicate differences in tongue shape between plain and palatalized consonants, with stronger palatalization effects in labials compared to coronals, as well as in dentals compared to postalveolars. The latter also revealed higher individual variation. Our findings thus suggest that tongue configurations for secondary palatalization in Romanian differ by place of articulation. The contrast is also overall less robust in postalveolars, confirming previous reports and explaining its rarity cross-linguistically.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Louise Ratko|AUTHOR Louise Ratko]], [[Michael Proctor|AUTHOR Michael Proctor]], [[Felicity Cox|AUTHOR Felicity Cox]]
</p><p class="cpabstractcardaffiliationlist">Macquarie University, Australia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3312–3316&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The articulatory realisation of phonemic vowel length contrasts is still imperfectly understood. Australian English (AusE) /ɐː/ and /ɐ/ differ primarily in duration and therefore provide an ideal case for examining the articulatory properties of long vs. short vowels. Patterns of compression, acceleration ratios and VC coordination were examined using electromagnetic articulography (EMA) in /pVːp/ and /pVp/ syllables produced by three speakers of AusE at two speech rates. Short vowels were less compressible and had higher acceleration ratios than long vowels. VC rimes had proportionately earlier coda onsets than VːC rimes. These findings suggest that long and short vowels are characterised by different patterns of both intra- and intergestural organisation in AusE.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Andrea Deme|AUTHOR Andrea Deme]]^^1^^, [[Márton Bartók|AUTHOR Márton Bartók]]^^1^^, [[Tekla Etelka Gráczi|AUTHOR Tekla Etelka Gráczi]]^^2^^, [[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]^^3^^, [[Alexandra Markó|AUTHOR Alexandra Markó]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ELTE, Hungary; ^^2^^MTA-ELTE LingArt, Hungary; ^^3^^BME, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3317–3321&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In the present study we analyzed vowel variation induced by carryover V-to-V coarticulation under the effect of pitch-accent as a function of vowel quality (using a minimally constrained intervening consonant to maximize V-to-V effects). We tested if /i/ is more resistant to coarticulation than /u/, and if both vowels show increased coarticulatory resistance in pitch-accented syllables. Our approach was unprecedented in the sense that it involved the analysis of parallel acoustic (F,,2,,) and articulatory (x-axis dorsum position) data in a great number of speakers (9 speaker), and real words of Hungarian. To analyze the degree of coarticulation, we adopted the locus equation approach, and fitted linear models on vowel onset and midpoint data, and calculated the differences between coarticulated and non-coarticulated vowels in both domains. To measure variability, we calculated standard deviations of midpoint F,,2,, values and dorsum positions.

The results showed that accent clearly exerted an effect on the phonetic realization of vowels, but the effect we found was dependent on both the vowel quality, and the domain (articulation/acoustics) at hand. Observation of the patterns we found in parallel acoustic and articulatory data warrants for reconsideration of the term ‘coarticulatory resistance’, and how it should be conceptualized.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hannah King|AUTHOR Hannah King]]^^1^^, [[Emmanuel Ferragne|AUTHOR Emmanuel Ferragne]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CLILLAC-ARP (EA 3967), France; ^^2^^LPP (UMR 7018), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3322–3326&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Articulatory variation of /r/ has been widely observed in rhotic varieties of English, particularly with regards to tongue body shapes, which range from retroflex to bunched. However, little is known about the production of /r/ in modern non-rhotic varieties, particularly in Anglo-English. Although it is generally agreed that /r/ may be accompanied by lip protrusion, it is unclear whether there is a relationship between tongue shape and the accompanying degree of protrusion. We present acoustic and articulatory data (via ultrasound tongue imaging and lip videos) from Anglo-English /r/ produced in both hyper- and non-hyperarticulated speech. Hyperarticulation was elicited by engaging speakers in error resolution with a simulated “silent speech” recognition programme. Our analysis indicates that hyperarticulated /r/ induces more lip protrusion than non-hyperarticulated /r/. However, bunched /r/ variants present more protrusion than retroflex variants, regardless of hyperarticulation. Despite some methodological limitations, the use of Deep Neural Networks seems to confirm these results. An articulatory trading relation between tongue shape and accompanying lip protrusion is proposed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandra Markó|AUTHOR Alexandra Markó]]^^1^^, [[Márton Bartók|AUTHOR Márton Bartók]]^^2^^, [[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]]^^3^^, [[Tekla Etelka Gráczi|AUTHOR Tekla Etelka Gráczi]]^^3^^, [[Andrea Deme|AUTHOR Andrea Deme]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^ELTE, Hungary; ^^2^^ELTE, Hungary; ^^3^^MTA-ELTE LingArt, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3327–3331&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The aim of our study is to analyse the articulatory characteristics of /iː/ occurring in Hungarian monosyllabic harmonic and antiharmonic stems. In their frequently cited work, based on 3 speakers’ data, Beňuš and Gafos (2007) [1] claimed that the tongue position in transparent vowels of antiharmonic Hungarian stems is less advanced than that of the phonemically identical vowels in harmonic stems. In their study, the authors compared different harmonic and antiharmonic stems (even if the consonantal context was more or less controlled).

In the present study, we analysed two homophonous pairs of words /siːv/ and /ɲiːr/, which are antiharmonic in their verbal usage, but are harmonic as nouns. The words were produced by 4 speakers both (i) in isolation and (ii) in sentence-initial position, where they were followed by front and back vowels, in a well-controlled manner. The experiment was carried out using electromagnetic articulography. We compared the sequence of the horizontal position of four receiver coils (ttip, tbl, tbo1, tbo2) across the conditions with Generalized Additive Models. The results showed that the horizontal positions of the receivers did not vary as a function of the harmonicity of the stem in either the isolated or the coarticulated condition.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Conceição Cunha|AUTHOR Conceição Cunha]]^^1^^, [[Samuel Silva|AUTHOR Samuel Silva]]^^2^^, [[António Teixeira|AUTHOR António Teixeira]]^^2^^, [[Catarina Oliveira|AUTHOR Catarina Oliveira]]^^2^^, [[Paula Martins|AUTHOR Paula Martins]]^^2^^, [[Arun A. Joseph|AUTHOR Arun A. Joseph]]^^3^^, [[Jens Frahm|AUTHOR Jens Frahm]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^LMU München, Germany; ^^2^^Universidade de Aveiro, Portugal; ^^3^^MPI for Biophysical Chemistry, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3332–3336&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The characterisation of nasal vowels is not only a question of studying velar aperture. Recent work shows that oropharyngeal articulatory adjustments enhance the acoustics of nasal coupling or, at least, magnify differences between oral/nasal vowel congeners. Despite preliminary studies on the oral configurations of nasal vowels, for European Portuguese, a quantitative analysis is missing, particularly one to be applied systematically to a desirably large number of speakers. The main objective of this study is to adapt and extend previous methodological advances for the analysis of MRI data to further investigate: how velar changes affect oral configurations; the changes to the articulators and constrictions when compared with oral counterparts; and the closest oral counterpart. High framerate RT-MRI images (50fps) are automatically processed to extract the vocal tract contours and the position/configuration for the different articulators. These data are processed by evolving a quantitative articulatory analysis framework, previously proposed by the authors, extended to include information regarding constrictions (degree and place) and nasal port. For this study, while the analysis of data for more speakers is ongoing, we considered a set of two EP native speakers and addressed the study of oral and nasal vowels mainly in the context of stop consonants.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yan Xiong|AUTHOR Yan Xiong]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Chaitali Chakrabarti|AUTHOR Chaitali Chakrabarti]]
</p><p class="cpabstractcardaffiliationlist">Arizona State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3337–3341&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Overlapped speech poses a significant problem in a variety of applications in speech processing including speaker identification, speaker diarization, and speech recognition among others. To address it, existing systems combine source separation with algorithms for processing non-overlapped speech (e.g. source separation + follow-on speech recognition). In this paper we propose a modified network architecture to simultaneously recognize keywords from overlapped speech without explicitly having to perform source separation. We build our network by adding capsule layers to a ResNet architecture that has shown state-of-the-art performance on a traditional keyword recognition task. We evaluate the model on a series of 10-word overlapped keyword recognition experiments, using speaker dependent and speaker independent training. Results indicate that Residual + Capsule (ResCap) network shows marked improvement in recognizing overlapped speech, especially in experiments where there is a mismatch in the number of overlapped speakers between the training set and the test set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hansi Yang|AUTHOR Hansi Yang]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3382–3386&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Music genres are conventional categories that identify some pieces of music as belonging to a shared tradition or set of conventions. In this paper, we proposed an approach to improve music genre classification with convolutional neural networks (CNN). Using mel-scale spectrogram as the input, we used duplicate convolutional layers whose output will be applied to different pooling layers to provide more statistical information for classification. Also, we made some modifications on residual learning by taking more outputs from convolutional layers. By comparing two different network topologies, our experimental results on the GTZAN dataset show that the proposed method can effectively improve the classification accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Nehory Carmi|AUTHOR Nehory Carmi]], [[Azaria Cohen|AUTHOR Azaria Cohen]], [[Mireille Avigal|AUTHOR Mireille Avigal]], [[Anat Lerner|AUTHOR Anat Lerner]]
</p><p class="cpabstractcardaffiliationlist">OUI, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3387–3390&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Identifying acoustic properties that characterize reading literary genres can assist in giving a more personal and human tone to the speech of bots and automatic readings.

In this paper we consider the following question: given speech segments of audiobooks, how well can we classify them according to their literary genres? In this study we consider three different literary genres: children, horror and suspense, and humorous audio books, taken from two free audio books sites: Librivox and YouTube.

We ran four classification experiments: three for each pair of genres, and one for all three genres together. We repeated each experiment twice, with two different network architectures: Convolutional Neural Network (CNN) and Recurrent Neural Network (RNN).

Note that, throughout the reading, there are sections that are more typical to the book’s genre than others. As the samples were taken sequentially throughout the reading of the books and were short in duration, we did not expect high classification rates. Nevertheless, the accuracy of all the experiments were at least 72% for all the pair’s classifications; and at least 57% for both architectures for the three classes classifications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Che-Wei Huang|AUTHOR Che-Wei Huang]], [[Roland Maas|AUTHOR Roland Maas]], [[Sri Harish Mallidi|AUTHOR Sri Harish Mallidi]], [[Björn Hoffmeister|AUTHOR Björn Hoffmeister]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3342–3346&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we extend our previous work on device-directed utterance detection, which aims to distinguish voice queries intended for a smart-home device from background speech. The task can be phrased as a binary utterance-level classification problem that we approach with a DNN-LSTM model using acoustic features and features from the automatic speech recognition (ASR) decoder as input. In this work, we study the performance of the model for different dialog types and for different categories of decoder features. To address different dialog types, we found that a model with a separate output branch for each dialog type outperforms a model with a shared output branch by a relative 12.5% of equal error rate (EER) reduction. We also found the average number of arcs in a confusion network to be one of the most informative ASR decoder features. In addition, we explore different frequencies of backward propagation for training the acoustic embedding for every k frames (k=1,3,5,7), and mean and attention pooling methods for generating an utterance representation. We found that attention pooling provides the most discriminative utterance representation and outperforms mean pooling by a relative 4.97% of EER reduction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hang Su|AUTHOR Hang Su]], [[Borislav Dzodzo|AUTHOR Borislav Dzodzo]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]
</p><p class="cpabstractcardaffiliationlist">CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3347–3351&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Time allocated for lecturing and student discussions is an important indicator of classroom quality assessment. Automated classification of lecture and discussion recording segments can serve as an indicator of classroom activity in a flipped classroom setting. Segments of lecture are primarily the speech of the lecturer, while segments of discussion include student speech, silence and noise. Multiple audio recorders simultaneously document all class activities. Recordings are coarsely synchronized to a common start time. We note that the lecturer’s speech tends to be common across recordings, but student discussions are captured only in the nearby device(s). Therefore, we window each recording at 0.5 s to 5 s duration and 0.1 s analysis rate. We compute the normalized similarity between a given window and temporally proximate window segments in other recordings. Histogram plot categorizes higher similarity windows as lecture and lower ones as discussion. To improve the classification performance, high energy lecture windows and windows with very high and very low similarity are used to train a supervised model, in order to regenerate the classification results of remaining windows. Experimental results show that binary classification accuracy improves from 96.84% to 97.37%.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Takanori Ashihara|AUTHOR Takanori Ashihara]], [[Yusuke Shinohara|AUTHOR Yusuke Shinohara]], [[Hiroshi Sato|AUTHOR Hiroshi Sato]], [[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Kiyoaki Matsui|AUTHOR Kiyoaki Matsui]], [[Takaaki Fukutomi|AUTHOR Takaaki Fukutomi]], [[Yoshikazu Yamaguchi|AUTHOR Yoshikazu Yamaguchi]], [[Yushi Aono|AUTHOR Yushi Aono]]
</p><p class="cpabstractcardaffiliationlist">NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3352–3356&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a neural whispered-speech detection technique that offers utterance-level classification of whispered and non-whispered speech exhibiting imbalanced data distributions. Previous studies have shown that machine learning models trained on a large amount of whispered and non-whispered utterances perform remarkably well for whispered speech detection. However, it is often difficult to collect large numbers of whispered utterances. In this paper, we propose a method to train neural whispered speech detectors from a small amount of whispered utterances in combination with a large amount of non-whispered utterances. In doing so, special care is taken to ensure that severely imbalanced datasets can effectively train neural networks. Specifically, we use a class-aware sampling method for training neural networks. To evaluate the networks, we gather test samples recorded by both condenser and smartphone microphones at different distances from the speakers to simulate practical environments. Experiments show the importance of imbalanced learning in enhancing the performance of utterance level classifiers. </p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Christian Bergler|AUTHOR Christian Bergler]]^^1^^, [[Manuel Schmitt|AUTHOR Manuel Schmitt]]^^1^^, [[Rachael Xi Cheng|AUTHOR Rachael Xi Cheng]]^^2^^, [[Andreas Maier|AUTHOR Andreas Maier]]^^1^^, [[Volker Barth|AUTHOR Volker Barth]]^^3^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^FAU Erlangen-Nürnberg, Germany; ^^2^^IZW in FVB, Germany; ^^3^^Anthro-Media, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3357–3361&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Call type classification is an important instrument in bioacoustic research investigating group-specific vocal repertoire, behavioral patterns, and cultures of different animal groups. There is a growing need using robust machine-based techniques to replace human classification due to its advantages in handling large datasets, delivering consistent results, removing perceptual-based classification, and minimizing human errors. The current work is the first adopting a two-stage fully unsupervised approach on previous machine-segmented orca data to identify orca sound types using deep learning together with one of the largest bioacoustic datasets — the Orchive. The proposed methods include: (1) unsupervised feature learning using an undercomplete ResNet18-autoencoder trained on machine-annotated data, and (2) spectral clustering utilizing compressed orca feature representations. An existing human-labeled orca dataset was clustered, including 514 signals distributed over 12 classes. This two-stage fully unsupervised approach is an initial study to (1) examine machine-generated clusters against human-identified orca call type classes, (2) compare supervised call type classification versus unsupervised call type clustering, and (3) verify the general feasibility of a completely unsupervised approach based on machine-labeled orca data resulting in a major progress within the research field of animal linguistics, by deriving a much deeper understanding and facilitating totally new insights and opportunities.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Niccolò Sacchi|AUTHOR Niccolò Sacchi]]^^1^^, [[Alexandre Nanchen|AUTHOR Alexandre Nanchen]]^^2^^, [[Martin Jaggi|AUTHOR Martin Jaggi]]^^1^^, [[Milos Cernak|AUTHOR Milos Cernak]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^EPFL, Switzerland; ^^2^^Idiap Research Institute, Switzerland; ^^3^^Logitech, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3362–3366&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword Spotting (KWS) systems allow detecting a set of spoken (pre-defined) keywords. Open-vocabulary KWS systems search for the keywords in the set of word hypotheses generated by an automatic speech recognition (ASR) system which is computationally expensive and, therefore, often implemented as a cloud-based service. Besides, KWS systems could use also word classification algorithms that do not allow easily changing the set of words to be recognized, as the classes have to be defined a priori, even before training the system. In this paper, we propose the implementation of an open-vocabulary ASR-free KWS system based on speech and text encoders that allow matching the computed embeddings in order to spot whether a keyword has been uttered. This approach would allow choosing the set of keywords a posteriori while requiring low computational power. The experiments, performed on two different datasets, show that our method is competitive with other state of the art KWS systems while allowing for a flexibility of configuration and being computationally efficient.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Qiang Gao|AUTHOR Qiang Gao]], [[Shutao Sun|AUTHOR Shutao Sun]], [[Yaping Yang|AUTHOR Yaping Yang]]
</p><p class="cpabstractcardaffiliationlist">CUC, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3367–3371&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In Mandarin Chinese, correct pronunciation is the key to convey word meaning correctly and the correct pronunciation is closely related to the tone of text. Therefore, tone classification is a critical part of speech evaluation system. Traditional tone classification is based on F0 and energy or MFCCs. But the extraction of these features is often subject to noise and other uncontrollable environmental factors. Thus, in order to reduce the influence of environment, we designed a CNN network named ToneNet which adopts mel-spectrogram as a feature and uses a customed convolutional neural network and multi-layer perceptron to classify Chinese syllables into one of the four tones. We trained and tested ToneNet on the Syllable Corpus of Standard Chinese Dataset (SCSC). The result shows that the best accuracy and f1-score of our method have reached 99.16% and 99.11% respectively. Besides, ToneNet has achieved 97.07% of accuracy and 96.83% of f1-score with the condition of gaussian noise.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Seungwoo Choi|AUTHOR Seungwoo Choi]], [[Seokjun Seo|AUTHOR Seokjun Seo]], [[Beomjun Shin|AUTHOR Beomjun Shin]], [[Hyeongmin Byun|AUTHOR Hyeongmin Byun]], [[Martin Kersner|AUTHOR Martin Kersner]], [[Beomsu Kim|AUTHOR Beomsu Kim]], [[Dongyoung Kim|AUTHOR Dongyoung Kim]], [[Sungjoo Ha|AUTHOR Sungjoo Ha]]
</p><p class="cpabstractcardaffiliationlist">Hyperconnect, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3372–3376&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword spotting (KWS) plays a critical role in enabling speech-based user interactions on smart devices. Recent developments in the field of deep learning have led to wide adoption of convolutional neural networks (CNNs) in KWS systems due to their exceptional accuracy and robustness. The main challenge faced by KWS systems is the trade-off between high accuracy and low latency. Unfortunately, there has been little quantitative analysis of the actual latency of KWS models on mobile devices. This is especially concerning since conventional convolution-based KWS approaches are known to require a large number of operations to attain an adequate level of performance.

In this paper, we propose a temporal convolution for real-time KWS on mobile devices. Unlike most of the 2D convolution-based KWS approaches that require a deep architecture to fully capture both low- and high-frequency domains, we exploit temporal convolutions with a compact ResNet architecture. In Google Speech Command Dataset, we achieve more than 385× speedup on Google Pixel 1 and surpass the accuracy compared to the state-of-the-art model. In addition, we release the implementation of the proposed and the baseline models including an end-to-end pipeline for training models and evaluating them on mobile devices.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhiying Huang|AUTHOR Zhiying Huang]], [[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Ming Lei|AUTHOR Ming Lei]]
</p><p class="cpabstractcardaffiliationlist">Alibaba Group, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3377–3381&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio tagging aims to identify the presence or absence of audio events in the audio clip. Recently, a lot of researchers have paid attention to explore different model structures to improve the performance of audio tagging. Convolutional neural network (CNN) is the most popular choice among a wide variety of model structures, and it’s successfully applied to audio events prediction task. However, the model complexity of CNN is relatively high, which is not efficient enough to ship in real product. In this paper, compact Feedforward Sequential Memory Network (cFSMN) is proposed for audio tagging task. Experimental results show that cFSMN-based system yields a comparable performance with the CNN-based system. Meanwhile, an audio-to-audio ratio (AAR) based data augmentation method is proposed to further improve the classifier performance. Finally, with raw waveforms of the balanced training set of  Audio Set which is a published standard database, our system can achieve a state-of-the-art performance with AUC being 0.932. Moreover, cFSMN-based model has only 1.9 million parameters, which is only about 1/30 of the CNN-based model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Min-Jae Hwang|AUTHOR Min-Jae Hwang]]^^1^^, [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Search Solutions, Korea; ^^2^^Yonsei University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3391–3395&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a deep learning (DL)-based parameter enhancement method for a mixed excitation linear prediction (MELP) speech codec in noisy communication environment. Unlike conventional speech enhancement modules that are designed to obtain clean speech signal by removing noise components before speech codec processing, the proposed method directly enhances codec parameters on either the encoder or decoder side. As the proposed method has been implemented by a small network without any additional processes required in conventional enhancement systems, e.g., time-frequency (T-F) analysis/synthesis modules, its computational complexity is very low. By enhancing the noise-corrupted codec parameters with the proposed DL framework, we achieved an enhancement system that is much simpler and faster than conventional T-F mask-based speech enhancement methods, while the quality of its performance remains similar.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sebastian Möller|AUTHOR Sebastian Möller]]^^1^^, [[Gabriel Mittag|AUTHOR Gabriel Mittag]]^^1^^, [[Thilo Michael|AUTHOR Thilo Michael]]^^1^^, [[Vincent Barriac|AUTHOR Vincent Barriac]]^^2^^, [[Hitoshi Aoki|AUTHOR Hitoshi Aoki]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Technische Universität Berlin, Germany; ^^2^^Orange Labs, France; ^^3^^NTT, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3436–3440&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In order to plan speech communication services regarding the quality experienced by their users, parametric models have been used since a long time. These models predict the overall quality experienced by a communication partner on the basis of parameters describing the elements of the transmission channel and the terminal equipment. The mostly used model is the E-model which is standardized in ITU-T Rec. G.107 for narrowband and in ITU-T Rec. G.107.1 for wideband scenarios. However, with the advent of super-wideband and fullband transmission, the E-model needs to be extended. In this paper, we propose a first version of an extended E-model which addresses both super-wideband and fullband scenarios, and which predicts the effects of speech codecs, packet loss, and delay as the most important degradations to be expected in such scenarios. Predictions are compared to the results of listening-only and conversational tests as well as to signal-based predictions, showing a reasonable prediction accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kai Zhen|AUTHOR Kai Zhen]]^^1^^, [[Jongmo Sung|AUTHOR Jongmo Sung]]^^2^^, [[Mi Suk Lee|AUTHOR Mi Suk Lee]]^^2^^, [[Seungkwon Beack|AUTHOR Seungkwon Beack]]^^2^^, [[Minje Kim|AUTHOR Minje Kim]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Indiana University Bloomington, USA; ^^2^^ETRI, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3396–3400&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech codecs learn compact representations of speech signals to facilitate data transmission. Many recent deep neural network (DNN) based end-to-end speech codecs achieve low bitrates and high perceptual quality at the cost of model complexity. We propose a cross-module residual learning (CMRL) pipeline as a module carrier with each module reconstructing the residual from its preceding modules. CMRL differs from other DNN-based speech codecs, in that rather than modeling speech compression problem in a single large neural network, it optimizes a series of less-complicated modules in a two-phase training scheme. The proposed method shows better objective performance than AMR-WB and the state-of-the-art DNN-based speech codec with a similar network architecture. As an end-to-end model, it takes raw PCM signals as an input, but is also compatible with linear predictive coding (LPC), showing better subjective quality at high bitrates than AMR-WB and OPUS. The gain is achieved by using only 0.9 million trainable parameters, a significantly less complex architecture than the other DNN-based codecs in the literature.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tom Bäckström|AUTHOR Tom Bäckström]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3401–3405&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech coding is the most commonly used application of speech processing. Accumulated layers of improvements have however made codecs so complex that optimization of individual modules becomes increasingly difficult. This work introduces machine learning methodology to speech and audio coding, such that we can optimize quality in terms of overall entropy. We can then use conventional quantization, coding and perceptual models without modification such that the codec adheres to conventional requirements on algorithmic complexity, latency and robustness to packet loss. Experiments demonstrate that end-to-end optimization of quantization accuracy of the spectral envelope can be used for a lossless reduction in bitrate of 0.4 kbits/s.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jean-Marc Valin|AUTHOR Jean-Marc Valin]]^^1^^, [[Jan Skoglund|AUTHOR Jan Skoglund]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Amazon, USA; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3406–3410&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Neural speech synthesis algorithms are a promising new approach for coding speech at very low bitrate. They have so far demonstrated quality that far exceeds traditional vocoders, at the cost of very high complexity. In this work, we present a low-bitrate neural vocoder based on the LPCNet model. The use of linear prediction and sparse recurrent networks makes it possible to achieve real-time operation on general-purpose hardware. We demonstrate that LPCNet operating at 1.6 kb/s achieves significantly higher quality than MELP and that uncompressed LPCNet can exceed the quality of a waveform codec operating at low bitrate. This opens the way for new codec designs based on neural synthesis models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Guillaume Fuchs|AUTHOR Guillaume Fuchs]]^^1^^, [[Chamran Ashour|AUTHOR Chamran Ashour]]^^2^^, [[Tom Bäckström|AUTHOR Tom Bäckström]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Fraunhofer IIS, Germany; ^^2^^Ericsson, Sweden; ^^3^^Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3411–3415&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Significant improvements in the quality of speech coders have been achieved by widening the coded frequency range from narrowband to wideband. However, existing speech coders still employ a limited band source-filter model extended by parametric coding of the higher band. In the present work, a superwideband source-filter model running at 32 kHz is considered and especially its spectral magnitude envelope modeling. To match super-wideband operating mode, we adapted and compared two methods; Linear Predictive Coding (LPC) and Distribution Quantization (DQ). LPC uses autoregressive modeling, while DQ quantifies the energy ratios between different parts of the spectrum. Parameters of both methods were quantized with a multi-stage vector quantization. Objective and subjective evaluations indicate that both methods used in a super-wideband source-filter coding scheme offer the same quality range, making them an attractive alternative to conventional speech coders that require additional bandwidth extension.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinyu Li|AUTHOR Xinyu Li]], [[Venkata Chebiyyam|AUTHOR Venkata Chebiyyam]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3416–3420&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automatic bandwidth extension (restoring high-frequency information from low sample rate audio) has a number of applications in speech processing. We introduce an end-to-end deep learning based system for speech bandwidth extension for use in a downstream automatic speech recognition (ASR) system. Specifically we propose a conditional generative adversarial network enriched with ASR-specific loss functions designed to upsample the speech audio while maintaining good ASR performance. Evaluations on the speech commands dataset and the LibriSpeech corpus show that our approach outperforms a number of traditional bandwidth extension methods with respect to word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Deepika Gupta|AUTHOR Deepika Gupta]], [[Hanumant Singh Shekhawat|AUTHOR Hanumant Singh Shekhawat]]
</p><p class="cpabstractcardaffiliationlist">IIT Guwahati, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3421–3425&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work proposes a new method for artificial bandwidth extension (ABE) that aims to extend the bandwidth of speech signals in narrowband voice communications. We extract a signal model which consists of the wideband information. Using the signal model, we obtain an infinite impulse response (IIR) interpolation filter with the help of H∞ optimization. Interpolation filters are going to be distinct for the speech signals because of their non-stationary (time-variant) nature. In narrowband communications, only narrowband signal is accessible. Hence, a codebook approach is intended to keep the IIR interpolation filters information (wideband feature) together with their corresponding narrowband signal characteristic (narrowband attribute). For that, the Gaussian mixture modeling (GMM) codebook approach is utilized to estimate the wideband feature for a given narrowband attribute of the signal. Performances are assessed for the two sorts of narrowband attributes.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Sebastian Möller|AUTHOR Sebastian Möller]]
</p><p class="cpabstractcardaffiliationlist">Technische Universität Berlin, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3426–3430&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a single-ended quality diagnosis model for super-wideband speech communication networks, which predicts the perceived  Noisiness,  Coloration, and  Discontinuity of transmitted speech. The model is an extension to the single-ended speech quality prediction model NISQA and can additionally indicate the cause of quality degradation. Service providers can use the model independently of the communication system’s technology since it is based on universal perceptual quality dimensions. The prediction model consists of a convolutional neural network that firstly calculates per-frame features of a speech signal and subsequently aggregates the features over time with a recurrent neural network, to estimate the speech quality dimensions. The proposed diagnosis model achieves promising results with an average RMSE* of 0.24.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Li Chai|AUTHOR Li Chai]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3431–3435&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>One challenging problem of robust automatic speech recognition (ASR) is how to measure the goodness of a speech enhancement algorithm without calculating word error rate (WER) due to the high costs of manual transcriptions, language modeling and decoding process. In this study, a novel cross-entropy-guided (CEG) measure is proposed for assessing if enhanced speech predicted by a speech enhancement algorithm would produce a good performance for robust ASR. CEG consists of three consecutive steps, namely the low-level representations via the feature extraction, high-level representations via the nonlinear mapping with the acoustic model, and the final CEG calculation between the high-level representations of clean and enhanced speech. Specifically, state posterior probabilities from the output of the neural network for the acoustic model are adopted as the high-level representations and a cross-entropy criterion is used to calculate CEG. Experimental results show that CEG could consistently yield the highest correlations with WER and achieve the most accurate assessment of the ASR performance when compared to distortion measures based on human auditory perception and an acoustic confidence measure. Potentially, CEG could be adopted to guide the parameter optimization of deep learning based speech enhancement algorithms to further improve the ASR performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Samik Sadhu|AUTHOR Samik Sadhu]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3441–3445&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we demonstrate the robustness of Modulation Vectors, in domain mismatches between the training and test conditions in an Automatic Speech Recognition (ASR) system. Our work focuses on the specific task of dealing with mismatches caused by reverberation. We use simulated data from TIMIT and real reverberant speech from the REVERB challenge data to evaluate the performance of our system. The paper also describes a multistream system to combine information from Mel Frequency Cepstral Coefficient (MFCC) and M-vectors to improve the ASR performance in both matched and mismatched datasets. The proposed multistream system achieves a relative improvement of 25% in recognition accuracy on the mismatched condition, while a M-vector trained hybrid ASR system shows a 7–8% improvement in recognition accuracy, both w.r.t. a MFCC trained hybrid ASR system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chenda Li|AUTHOR Chenda Li]], [[Yanmin Qian|AUTHOR Yanmin Qian]]
</p><p class="cpabstractcardaffiliationlist">Shanghai Jiao Tong University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3446–3450&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Children’s speech recognition remains a big challenge for automatic speech recognition. Due to the more difficult process and higher cost on data collection, most current ASR systems are optimized only using lots of adult speech with limited or even none children’s speech. Accordingly, the acoustic mismatch between children’s and adult speech is the primary reason for the ASR performance degradation when facing children’s speech. To overcome this problem, we proposed several approaches to improve children’s speech recognition without using any children’s speech data. A better utilization strategy on prosody-based features is developed. First, pitch and prosody modification is explored in both training and testing respectively, which can significantly reduce the mismatch between two types of speech. Furthermore, joint-decoding with both the prosody modified speech and the original speech is designed to get a more robust performance on both children’s and adult speech. Experiments are evaluated on a Mandarin speech recognition task, with only 400-hour adult speech in the training. The results show that our proposed method can obtain a large gain on children’s speech, with relative ~20% WER reduction compared to the baseline, and also no obvious degradation is observed on the adult speech for the proposed system.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Purvi Agrawal|AUTHOR Purvi Agrawal]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]
</p><p class="cpabstractcardaffiliationlist">Indian Institute of Science, India</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3451–3455&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we propose a deep representation learning approach using the raw speech waveform in an unsupervised learning paradigm. The first layer of the proposed deep model performs acoustic filtering while the subsequent layer performs modulation filtering. The acoustic filterbank is implemented using cosine-modulated Gaussian filters whose parameters are learned. The modulation filtering is performed on log transformed outputs of the first layer and this is achieved using a skip connection based architecture. The outputs from this two layer filtering are fed to the variational autoencoder model. All the model parameters including the filtering layers are learned using the VAE cost function. We employ the learned representations (second layer outputs) in a speech recognition task. Experiments are conducted on Aurora-4 (additive noise with channel artifact) and CHiME-3 (additive noise with reverberation) databases. In these experiments, the learned representations from the proposed framework provide significant improvements in ASR results over the baseline filterbank features and other robust front-ends (average relative improvements of 16% and 6% in word error rate over baseline features on clean and multi-condition training, respectively on Aurora-4 dataset, and 21% over the baseline features on CHiME-3 database).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David B. Ramsay|AUTHOR David B. Ramsay]]^^1^^, [[Kevin Kilgour|AUTHOR Kevin Kilgour]]^^2^^, [[Dominik Roblek|AUTHOR Dominik Roblek]]^^2^^, [[Matthew Sharifi|AUTHOR Matthew Sharifi]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^MIT, USA; ^^2^^Google, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3456–3459&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Low power digital signal processors (DSPs) typically have a very limited amount of memory in which to cache data. In this paper we develop efficient bottleneck feature (BNF) extractors that can be run on a DSP, and retrain a baseline large-vocabulary continuous speech recognition (LVCSR) system to use these BNFs with only a minimal loss of accuracy. The small BNFs allow the DSP chip to cache more audio features while the main application processor is suspended, thereby reducing the overall battery usage. Our presented system is able to reduce the footprint of standard, fixed point DSP spectral features by a factor of 10 without any loss in word error rate (WER) and by a factor of 64 with only a 5.8% relative increase in WER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alexandre Riviello|AUTHOR Alexandre Riviello]], [[Jean-Pierre David|AUTHOR Jean-Pierre David]]
</p><p class="cpabstractcardaffiliationlist">Polytechnique Montréal, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3460–3464&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Keyword spotting is a classification task which aims to detect a specific set of spoken words. In general, this type of task runs on a power-constrained device such as a smartphone. One method to reduce the power consumption of a keyword spotting algorithm (typically a neural network) is to reduce the precision of the network weights and activations. In this paper, we propose a new representation of speech features which is more adapted to low-precision networks and compatible with binary/ternary neural networks. The new representation is based on the log-Mel spectrogram and models the variation of power over time. Tested on a ResNet, this representation produces results nearly as accurate as full-precision MFCCs, which are traditionally used in speech recognition applications.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Steffen Schneider|AUTHOR Steffen Schneider]], [[Alexei Baevski|AUTHOR Alexei Baevski]], [[Ronan Collobert|AUTHOR Ronan Collobert]], [[Michael Auli|AUTHOR Michael Auli]]
</p><p class="cpabstractcardaffiliationlist">Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3465–3469&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We explore unsupervised pre-training for speech recognition by learning representations of raw audio. wav2vec is trained on large amounts of unlabeled audio data and the resulting representations are then used to improve acoustic model training. We pre-train a simple multi-layer convolutional neural network optimized via a noise contrastive binary classification task. Our experiments on WSJ reduce WER of a strong character-based log-mel filterbank baseline by up to 36%when only a few hours of transcribed data is available. Our approach achieves 2.43% WER on the nov92 test set. This outperforms Deep Speech 2, the best reported character-based system in the literature while using three orders of magnitude less labeled training data.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sunghye Cho|AUTHOR Sunghye Cho]]^^1^^, [[Mark Liberman|AUTHOR Mark Liberman]]^^1^^, [[Yong-cheol Lee|AUTHOR Yong-cheol Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Pennsylvania, USA; ^^2^^Cheongju University, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3470–3474&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Focus, which is usually modulated by prosodic prominence, highlights a particular element within a sentence for emphasis or contrast. Despite its importance in communication, it has received little attention in the field of speech recognition. This paper developed an automatic detection system of prosodic focus in American English, using telephone-number strings. Our data were 100 10-digit phone number strings read by 5 speakers (3 females and 2 males). We extracted 18 prosodic features from each digit within the strings and one categorical variable and trained a Random Forest model to detect where the focused digit is within a given string. We also compared the model performance to human judgment rates from a perception experiment with 67 native speakers of American English. Our final model shows 92% of accuracy in detecting the location of prosodic focus, which is slightly lower than the human perception (97.2%) but much better than the chance level (10%). We discuss the predictive features in our model and potential features to add in the future study.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Raghav Menon|AUTHOR Raghav Menon]]^^1^^, [[Herman Kamper|AUTHOR Herman Kamper]]^^1^^, [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]]^^1^^, [[John Quinn|AUTHOR John Quinn]]^^2^^, [[Thomas Niesler|AUTHOR Thomas Niesler]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Stellenbosch University, South Africa; ^^2^^United Nations Global Pulse, Uganda</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3475–3479&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We compare features for dynamic time warping (DTW) when used to bootstrap keyword spotting (KWS) in an almost zero-resource setting. Such quickly-deployable systems aim to support United Nations (UN) humanitarian relief efforts in parts of Africa with severely under-resourced languages. Our objective is to identify acoustic features that provide acceptable KWS performance in such environments. As supervised resource, we restrict ourselves to a small, easily acquired and independently compiled set of isolated keywords. For feature extraction, a multilingual bottleneck feature (BNF) extractor, trained on well-resourced out-of-domain languages, is integrated with a correspondence autoencoder (CAE) trained on extremely sparse in-domain data. On their own, BNFs and CAE features are shown to achieve a more than 2% absolute performance improvement over baseline MFCCs. However, by using BNFs as input to the CAE, even better performance is achieved, with a more than 11% absolute improvement in ROC AUC over MFCCs and more than twice as many top-10 retrievals for two evaluated languages, English and Luganda. We conclude that integrating BNFs with the CAE allows both large out-of-domain and sparse in-domain resources to be exploited for improved ASR-free keyword spotting.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]
</p><p class="cpabstractcardaffiliationlist">University of Edinburgh, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3480–3484&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We investigate the problem of direct waveform modelling using parametric kernel-based filters in a convolutional neural network (CNN) framework, building on SincNet, a CNN employing the cardinal sine (sinc) function to implement learnable bandpass filters. To this end, the general problem of learning a filterbank consisting of modulated kernel-based baseband filters is studied. Compared to standard CNNs, such models have fewer parameters, learn faster, and require less training data. They are also more amenable to human interpretation, paving the way to embedding some perceptual prior knowledge in the architecture. We have investigated the replacement of the rectangular filters of SincNet with triangular, gammatone and Gaussian filters, resulting in higher model flexibility and a reduction to the phone error rate. We also explore the properties of the learned filters learned for TIMIT phone recognition from both perceptual and statistical standpoints. We find that the filters in the first layer, which directly operate on the waveform, are in accord with the prior knowledge utilised in designing and engineering standard filters such as mel-scale triangular filters. That is, the networks learn to pay more attention to perceptually significant spectral neighbourhoods where the data centroid is located, and the variance and Shannon entropy are highest.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lyan Verwimp|AUTHOR Lyan Verwimp]]^^1^^, [[Jerome R. Bellegarda|AUTHOR Jerome R. Bellegarda]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Katholieke Universiteit Leuven, Belgium; ^^2^^Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3485–3489&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Natural language processing (NLP) tasks tend to suffer from a paucity of suitably annotated training data, hence the recent success of transfer learning across a wide variety of them. The typical recipe involves: (i) training a deep, possibly bidirectional, neural network with an objective related to language modeling, for which training data is plentiful; and (ii) using the trained network to derive contextual representations that are far richer than standard linear word embeddings such as word2vec, and thus result in important gains. In this work, we wonder whether the opposite perspective is also true: can contextual representations trained for different NLP tasks improve language modeling itself? Since language models (LMs) are predominantly locally optimized, other NLP tasks may help them make better predictions based on the entire semantic fabric of a document. We test the performance of several types of pre-trained embeddings in neural LMs, and we investigate whether it is possible to make the LM more aware of global semantic information through embeddings pre-trained with a domain classification model. Initial experiments suggest that as long as the proper objective criterion is used during training, pre-trained embeddings are likely to be beneficial for neural language modeling.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sandy Ritchie|AUTHOR Sandy Ritchie]]^^1^^, [[Richard Sproat|AUTHOR Richard Sproat]]^^2^^, [[Kyle Gorman|AUTHOR Kyle Gorman]]^^2^^, [[Daan van Esch|AUTHOR Daan van Esch]]^^2^^, [[Christian Schallhart|AUTHOR Christian Schallhart]]^^1^^, [[Nikos Bampounis|AUTHOR Nikos Bampounis]]^^1^^, [[Beno^ıt Brard|AUTHOR Beno^ıt Brard]]^^1^^, [[Jonas Fromseier Mortensen|AUTHOR Jonas Fromseier Mortensen]]^^1^^, [[Millie Holt|AUTHOR Millie Holt]]^^1^^, [[Eoin Mahon|AUTHOR Eoin Mahon]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Google, UK; ^^2^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3530–3534&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We describe a new approach to converting written tokens to their spoken form, which can be shared by automatic speech recognition (ASR) and text-to-speech synthesis (TTS) systems. Both ASR and TTS need to map from the written to the spoken domain, and we present an approach that enables us to share verbalization grammars between the two systems while exploiting linguistic commonalities to provide simple default verbalizations. We also describe improvements to an induction system for number names grammars. Between these shared ASR/TTS verbalizers and the improved induction system for number names grammars, we achieve significant gains in development time and scalability across languages.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dravyansh Sharma|AUTHOR Dravyansh Sharma]]^^1^^, [[Melissa Wilson|AUTHOR Melissa Wilson]]^^2^^, [[Antoine Bruguier|AUTHOR Antoine Bruguier]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Carnegie Mellon University, USA; ^^2^^University of Oklahoma, USA; ^^3^^Google, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3535–3539&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Prediction of morphological forms is a well-studied problem and can lead to better speech systems either directly by rescoring models for correcting morphology, or indirectly by more accurate dialog systems with improved natural language generation and understanding. This includes both lemmatization, i.e. deriving the lemma or root word from a given surface form as well as morphological inflection, i.e. deriving surface forms from the lemma. We train and evaluate various language-agnostic end-to-end neural sequence-to-sequence models for these tasks and compare their effectiveness. We further augment our models with pronunciation information which is typically available in speech systems to further improve the accuracies of the same tasks. We present the results across both morphologically modest and rich languages to show robustness of our approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Zhehuai Chen|AUTHOR Zhehuai Chen]]^^1^^, [[Mahaveer Jain|AUTHOR Mahaveer Jain]]^^2^^, [[Yongqiang Wang|AUTHOR Yongqiang Wang]]^^2^^, [[Michael L. Seltzer|AUTHOR Michael L. Seltzer]]^^2^^, [[Christian Fuegen|AUTHOR Christian Fuegen]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Shanghai Jiao Tong University, China; ^^2^^Facebook, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3490–3494&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>End-to-end approaches to automatic speech recognition, such as Listen-Attend-Spell (LAS), blend all components of a traditional speech recognizer into a unified model. Although this simplifies training and decoding pipelines, a unified model is hard to adapt when mismatch exists between training and test data, especially if this information is dynamically changing. The Contextual LAS (CLAS) framework tries to solve this problem by encoding contextual entities into fixed-dimensional embeddings and utilizing an attention mechanism to model the probabilities of seeing these entities. In this work, we improve the CLAS approach by proposing several new strategies to extract embeddings for the contextual entities. We compare these embedding extractors based on graphemic and phonetic input and/or output sequences and show that an encoder-decoder model trained jointly towards graphemes and phonemes outperforms other approaches. Leveraging phonetic information obtains better discrimination for similarly written graphemic sequences and also helps the model generalize better to graphemic sequences unseen in training. We show significant improvements over the original CLAS approach and also demonstrate that the proposed method scales much better to a large number of contextual entities across multiple domains.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Chang Liu|AUTHOR Chang Liu]]^^1^^, [[Zhen Zhang|AUTHOR Zhen Zhang]]^^2^^, [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]]^^1^^, [[Yonghong Yan|AUTHOR Yonghong Yan]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Chinese Academy of Sciences, China; ^^2^^CNCERT, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3495–3499&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Uyghur and Turkish are two typical agglutinative languages, which suffer heavily from the data sparsity problem. Due to this, we first apply a statistical morphological segmentation and change the number of morphs to get a better sub-word level automatic speech recognition (ASR) system. The best systems, which yield 2.03% and 1.65% absolute WER reductions from the word level systems for Uyghur and Turkish respectively, are used for further n-best rescoring. To further alleviate the data sparsity problem, we use both convolutional neural network (CNN) based and bi-directional long short-term memory (BLSTM) based character-aware language models on the two languages. In order to alleviate the information missing of the middle steps of the BLSTM based character aware language model, we propose to use the weighted average of each time-steps’ outputs. The proposed weighting methods can be divided into three categories: decay based, position-based and attention-based. Results show that the decay based weighting method leads to the most significant WER reductions, which are 2.38% and 1.96%, compared with the sub-word level 1-pass ASR system for Uyghur and Turkish respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ernest Pusateri|AUTHOR Ernest Pusateri]], [[Christophe Van Gysel|AUTHOR Christophe Van Gysel]], [[Rami Botros|AUTHOR Rami Botros]], [[Sameer Badaskar|AUTHOR Sameer Badaskar]], [[Mirko Hannemann|AUTHOR Mirko Hannemann]], [[Youssef Oualil|AUTHOR Youssef Oualil]], [[Ilya Oparin|AUTHOR Ilya Oparin]]
</p><p class="cpabstractcardaffiliationlist">Apple, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3500–3504&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this work, we uncover a theoretical connection between two language model interpolation techniques, count merging and Bayesian interpolation. We compare these techniques as well as linear interpolation in three scenarios with abundant training data per component model. Consistent with prior work, we show that both count merging and Bayesian interpolation outperform linear interpolation. We include the first (to our knowledge) published comparison of count merging and Bayesian interpolation, showing that the two techniques perform similarly. Finally, we argue that other considerations will make Bayesian interpolation the preferred approach in most circumstances.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yerbolat Khassanov|AUTHOR Yerbolat Khassanov]]^^1^^, [[Zhiping Zeng|AUTHOR Zhiping Zeng]]^^2^^, [[Van Tung Pham|AUTHOR Van Tung Pham]]^^3^^, [[Haihua Xu|AUTHOR Haihua Xu]]^^2^^, [[Eng Siong Chng|AUTHOR Eng Siong Chng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NTU, Singapore; ^^2^^Temasek Laboratories @ NTU, Singapore; ^^3^^NTU, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3505–3509&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The neural language models (NLM) achieve strong generalization capability by learning the dense representation of words and using them to estimate probability distribution function. However, learning the representation of rare words is a challenging problem causing the NLM to produce unreliable probability estimates. To address this problem, we propose a method to enrich representations of rare words in pre-trained NLM and consequently improve its probability estimation performance. The proposed method augments the word embedding matrices of pre-trained NLM while keeping other parameters unchanged. Specifically, our method updates the embedding vectors of rare words using embedding vectors of other semantically and syntactically similar words. To evaluate the proposed method, we enrich the rare street names in the pre-trained NLM and use it to rescore 100-best hypotheses output from the Singapore English speech recognition system. The enriched NLM reduces the word error rate by 6% relative and improves the recognition accuracy of the rare words by 16% absolute as compared to the baseline NLM.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jianwei Yu|AUTHOR Jianwei Yu]]^^1^^, [[Max W.Y. Lam|AUTHOR Max W.Y. Lam]]^^1^^, [[Shoukang Hu|AUTHOR Shoukang Hu]]^^1^^, [[Xixin Wu|AUTHOR Xixin Wu]]^^1^^, [[Xu Li|AUTHOR Xu Li]]^^1^^, [[Yuewen Cao|AUTHOR Yuewen Cao]]^^1^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^CUHK, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3510–3514&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Recurrent neural network language models (RNNLMs) have shown superior performance across a range of tasks, including speech recognition. The hidden layer of RNNLMs plays a vital role in learning the suitable representation of contexts for word prediction. However, the deterministic model parameters and fixed hidden vectors in conventional RNNLMs have limited power in modeling the uncertainty over hidden representations. In order to address this issue, in this paper, a comparative study of parametric and hidden representation uncertainty modeling approaches based on Bayesian gates and variational RNNLMs respectively is investigated on long short-term memory (LSTM) and gated recurrent units (GRU) LMs. Experimental results are presented on two tasks: PennTreebank (PTB) corpus, Switchboard conversational telephone speech (SWBD). Consistent performance improvements were obtained over conventional RNNLMs in terms of both perplexity and word error rate.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wiehan Agenbag|AUTHOR Wiehan Agenbag]], [[Thomas Niesler|AUTHOR Thomas Niesler]]
</p><p class="cpabstractcardaffiliationlist">Stellenbosch University, South Africa</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3515–3519&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a method of improving the performance of automatically induced lexicons for highly agglutinating languages. Our previous work demonstrated the feasibility of using automatic sub-word unit discovery and lexicon induction to enable ASR for under-resourced languages. However, a particularly challenging case for such approaches is found in agglutinating languages, which have large vocabularies of infrequently used words. In this study, we address the unfavorable vocabulary distribution of such languages by performing data-driven morphological segmentation of the orthography prior to lexicon induction. We apply this novel step to a corpus of recorded radio broadcasts in Luganda, which is a highly agglutinating and severely under-resourced language. The intervention leads to a 10% (relative) reduction in WER, which puts the resulting ASR performance on par with an expert lexicon. When context is added to the morphological segments prior to lexicon induction, a further 1% WER reduction is achieved. This demonstrates that it is feasible to perform ASR in an under-resourced setting using an automatically induced lexicon even in the case of a highly agglutinating language.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Alejandro Coucheiro-Limeres|AUTHOR Alejandro Coucheiro-Limeres]], [[Fernando Fernández-Martínez|AUTHOR Fernando Fernández-Martínez]], [[Rubén San-Segundo|AUTHOR Rubén San-Segundo]], [[Javier Ferreiros-López|AUTHOR Javier Ferreiros-López]]
</p><p class="cpabstractcardaffiliationlist">Universidad Politécnica de Madrid, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3520–3524&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We propose three architectures for a word vector prediction system (WVPS) built with LSTMs that consider both past and future contexts of a word for predicting a vector in an embedded space where its surrounding area is semantically related to the considered word. We introduce an attention mechanism in one of the architectures so the system is able to assess the specific contribution of each context word to the prediction. All the architectures are trained under the same conditions and the same training material, following a curricular-learning fashion in the presentation of the data. For the inputs, we employ pre-trained word embeddings. We evaluate the systems after the same number of training steps, over two different corpora composed of ground-truth speech transcriptions in Spanish language from TCSTAR and TV recordings used in the Search on Speech Challenge of IberSPEECH 2018. The results show that we are able to reach significant differences between the architectures, consistently across both corpora. The attention-based architecture achieves the best results, suggesting its adequacy for the task. Also, we illustrate the usefulness of the systems for resolving out-of-vocabulary (OOV) regions marked by an ASR system capable of detecting OOV occurrences.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yingying Gao|AUTHOR Yingying Gao]], [[Junlan Feng|AUTHOR Junlan Feng]], [[Ying Liu|AUTHOR Ying Liu]], [[Leijing Hou|AUTHOR Leijing Hou]], [[Xin Pan|AUTHOR Xin Pan]], [[Yong Ma|AUTHOR Yong Ma]]
</p><p class="cpabstractcardaffiliationlist">China Mobile, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3525–3529&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Code-switching has become a common linguistic phenomenon. Comparing to monolingual ASR tasks, insufficient data is a major challenge for code-switching speech recognition. In this paper, we propose an approach to compositionally employ the Bidirectional Encoder Representations from Transformers (Bert) model and Generative Adversarial Net (GAN) model for code-switching text data generation. It improves upon previous work by (1) applying Bert as a masked language model to predict the mixed-in foreign words and (2) basing on the GAN framework with Bert for both the generator and discriminator to further assure the generated sentences similar enough to the natural examples. We evaluate the effectiveness of the generated data by its contribution to ASR. Experiments show our approach can reduce the English word error rate by 1.5% with the Mandarin-English code-switching spontaneous speech corpus OC16-CE80.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anke Sennema|AUTHOR Anke Sennema]]^^1^^, [[Silke Hamann|AUTHOR Silke Hamann]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Wien, Austria; ^^2^^Universiteit van Amsterdam, The Netherlands</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3540–3543&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous observations from didactic studies have indicated that Vietnamese learners of German as a foreign language often fail to realize consonantal clusters in German [1, 2, 3]. The present study investigated whether this problem occurs already at the level of perception, i.e., whether Vietnamese learners find it difficult to perceive the difference between a cluster and a single consonant. We focused on the discrimination between the German cluster /ʃt/ and the single consonants /t/ and /ʃ/, both in onset and coda position. Due to different phonotactic restrictions on coda consonants in Vietnamese, we expected the coda position to pose a bigger challenge for correct discrimination than the onset position. With an AX discrimination task, we tested how 83 university students from Hanoi perceived these contrasts. Our findings show that only the distinction between /ʃt/-/ʃ/ in coda position posed a real challenge to our listeners. We attribute this difficulty to the weak and non-native auditory cues for the plosive in this position. For all other contrasts our participants performed surprisingly well. We propose that this is due to the influence of English as first L2 that facilitates the acquisition of phonological contrasts in German as an L3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bogdan Ludusan|AUTHOR Bogdan Ludusan]]^^1^^, [[Annett Jorschick|AUTHOR Annett Jorschick]]^^1^^, [[Reiko Mazuka|AUTHOR Reiko Mazuka]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Bielefeld, Germany; ^^2^^RIKEN, Japan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3584–3588&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Infant-directed speech (IDS) is thought to play a facilitating role in language acquisition, by simplifying the input infants receive. In particular, the hypothesis that the acoustic level is enhanced to make the input more clear for infants, has been extensively studied in the case of vowels, but less so in the case of consonants. An investigation into how nasal consonants can be discriminated in infant- compared to adult-directed speech (ADS) was performed, on a corpus of Japanese mother-infant spontaneous conversations, by examining all bilabial and alveolar nasals occurring in intervocalic position. The Pearson correlation between corresponding spectrum slices of nasal consonants, in identical vowel contexts, was employed as similarity measure and a statistical model was fit using this information. It revealed a decrease in similarity between the nasal classes, in IDS compared to ADS, although the effect was not statistically significant. We confirmed these results, using an unsupervised machine learning algorithm to discriminate between the two nasal classes, obtaining similar classification performance in IDS and ADS. We discuss our findings in the context of the current literature on infant-directed speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ellen Marklund|AUTHOR Ellen Marklund]], [[Johan Sjons|AUTHOR Johan Sjons]], [[Lisa Gustavsson|AUTHOR Lisa Gustavsson]], [[Elísabet Eir Cortes|AUTHOR Elísabet Eir Cortes]]
</p><p class="cpabstractcardaffiliationlist">Stockholm University, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3589–3593&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Distributional learning is a perceptual process hypothesized to underlie the phenomena of phonetic recalibration and selective adaptation, as well as infant speech sound category learning. However, in order to be conclusively tied to the earliest stages of speech sound category development, that is, the formation of novel perceptual categories, distributional learning must be shown to operate on stimuli for which there are no pre-existing categories. We investigated this in a previous study, finding no evidence of distributional learning in adults from unattended listening to non-speech. Since attention to stimuli impacts distributional learning, the present study focused on distributional learning from attended listening to non-speech. The same paradigm was used as in the previous study, except that participants’ attention was directed towards stimuli by means of a cover task. Non-speech stimuli were spectrally rotated vowels and the mismatch negativity was used to measure perceptual categorization. No distributional learning was found, that is, no effect of attention on distributional learning was demonstrated. This could mean that the distributional learning process does not operate on stimuli where perceptual categories do not already exist, or that the mismatch negativity measure does not capture the earliest stages of perceptual category development.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Okko Räsänen|AUTHOR Okko Räsänen]], [[Khazar Khorrami|AUTHOR Khazar Khorrami]]
</p><p class="cpabstractcardaffiliationlist">Tampere University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3594–3598&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Earlier research has suggested that human infants might use statistical dependencies between speech and non-linguistic multimodal input to bootstrap their language learning before they know how to segment words from running speech. However, feasibility of this hypothesis in terms of real-world infant experiences has remained unclear. This paper presents a step towards a more realistic test of the multimodal bootstrapping hypothesis by describing a neural network model that can learn word segments and their meanings from referentially ambiguous acoustic input. The model is tested on recordings of real infant-caregiver interactions using utterance-level labels for concrete visual objects that were attended by the infant when caregiver spoke an utterance containing the name of the object, and using random visual labels for utterances during absence of attention. The results show that beginnings of lexical knowledge may indeed emerge from individually ambiguous learning scenarios. In addition, the hidden layers of the network show gradually increasing selectivity to phonetic categories as a function of layer depth, resembling models trained for phone recognition in a supervised manner.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dan Du|AUTHOR Dan Du]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]]
</p><p class="cpabstractcardaffiliationlist">BLCU, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3599–3603&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Previous studies have shown that learners with different native language backgrounds have common difficulties in learning Chinese affricates but demonstrate in various patterns. While few studies investigated this issue of native Urdu speakers. To address the production of Chinese affricates /ts/ and /ts^^h^^/ by native Urdu speakers, speech materials, produced by two groups of subjects with different Chinese proficiency, were selected from the BLCU-SAIT speech corpus. The error rate and error types of their production of Chinese affricates /ts/ and /ts^^h^^/ have been discussed after transcription and data analysis. The results show that though there are no counterparts of Chinese affricates /ts/ and /ts^^h^^/ in Urdu, the error and the acquisition pattern of these two affricates, to some extent, affected by individual differences of their roles in Urdu except universal similarities and differences between two languages. The findings of this study shed some light on second language learning and teaching.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Scott Lewis|AUTHOR Scott Lewis]], [[Adib Mehrabi|AUTHOR Adib Mehrabi]], [[Esther de Leeuw|AUTHOR Esther de Leeuw]]
</p><p class="cpabstractcardaffiliationlist">Queen Mary University of London, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3544–3548&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This study explores L2 acquisition of socially conditioned phonetic variation in 13 German-English sequential bilinguals residing in London, UK. The phonetic variable analysed is GOOSE-fronting, i.e. the more front pronunciation of /u/ in words like ‘goose’, acoustically manifested through an increased F2 frequency. In the South of England, GOOSE-fronting is a sound change considered to be led by young females. We investigated whether bilinguals adhered to this pattern, e.g. whether younger female German-English bilinguals exhibited a relatively higher F2 frequency in words like ‘goose’ than other bilinguals. The bilinguals’ English /u/ productions were compared against their German /u/ (lower F2 as more back) and /y/ (higher F2 as more front) to determine the degree of GOOSE-fronting and whether their F2 values were closer to /y/ than /u/. Normalised formant values were considered in relation to lingual measurements obtained using ultrasound tongue imaging. The acoustic and articulatory results revealed that female bilinguals indeed produced more front English /u/ vowels than their male counterparts. Within female speakers, age and length of residence in the UK were found to be significant, with younger speakers who had lived in the UK longer than five years displaying the greatest degree of GOOSE-fronting.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sabrina Jenne|AUTHOR Sabrina Jenne]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]
</p><p class="cpabstractcardaffiliationlist">Universität Stuttgart, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3549–3553&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Articulation-based pronunciation error detection is concerned with the task of diagnosing mispronounced segments in non-native speech on the level of broad phonological properties, such as place of articulation or voicing. Using acoustic features and visual spectrograms extracted from native English utterances, we train several neural classifiers that deduce articulatory properties from segments extracted from non-native English utterances. Visual cues are thereby processed by convolutional neural networks, whereas acoustic cues are processed by recurrent neural networks.

We show that combining both modalities increases performance over using models in isolation, with important implications for user satisfaction. Furthermore, we test the impact of alignment quality on model performance by comparing results on manually corrected segments and force-aligned segments, showing that the proposed pipeline can dispense with manual correction.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Anouschka Foltz|AUTHOR Anouschka Foltz]]^^1^^, [[Sarah Cooper|AUTHOR Sarah Cooper]]^^2^^, [[Tamsin M. McKelvey|AUTHOR Tamsin M. McKelvey]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Universität Graz, Austria; ^^2^^Bangor University, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3554–3558&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Native speakers of a language can use prosodic phrasing to disambiguate syntactically ambiguous sentences [1]. The current paper explores whether prosodic phrasing can help learners determine within-clause word order differences in a new language. Unlike many previous studies, we did not train participants in an artificial language, but exploited word order differences that occur in German. Native English speakers with no knowledge of German were trained with simple main clause sentences as well as complex sentences containing a subordinate clause. During training, prosodic phrasing of complex sentences either aligned or did not align with the sentences’ clause structure. The results from two experiments showed that the non-aligned prosodic phrasing helps learners discover clause internal word order differences in German, but only if syntactic variability in the test sessions is low. Overall, the results suggest that learners can exploit prosodic structure to learn word order alternations in certain contexts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ann R. Bradlow|AUTHOR Ann R. Bradlow]]
</p><p class="cpabstractcardaffiliationlist">Northwestern University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3559–3563&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Using a corpus of multilingual recordings of a standard text (the North Wind and the Sun passage, NWS) in 11 languages, speaking rate (SR, syllables/second) and information density (ID, number of syllables for the NWS text) were examined in first-language (L1) and second-language (L2) speech. Replicating prior work, cross-language comparison of L1 speech showed a trade-off between SR and ID such that relatively low density languages (many syllables for the NWS text) tended to be produced at relatively fast rates, and vice versa. Furthermore, L2 English was characterized by both slower rate and lower ID than L1 English. That is, L2 English involved more syllables than L1 English for the same NWS text. A comparison of the number of acoustic syllables (i.e. amplitude peaks) with the number of orthographic syllables (i.e. dictionary-based syllable counts for the NWS text) indicated that L1 speech involved substantial syllable reduction (fewer acoustic than orthographic syllables) while L2 speech involved substantial syllable epenthesis (more acoustic than orthographic syllables). These findings suggest that L2 speech production involves temporal restructuring beyond increased segment, syllable and word durations, and that the resultant information rate (information bits transmitted/second) of L2 speech diverges substantially from that of L1 speech.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Calbert Graham|AUTHOR Calbert Graham]], [[Francis Nolan|AUTHOR Francis Nolan]]
</p><p class="cpabstractcardaffiliationlist">University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3564–3568&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Automated evaluation of non-native pronunciation provides a consistent and more cost-efficient alternative to human evaluation. To that end, there is considerable interest in deriving metrics that are based on the cues human listeners use to judge pronunciation. Previous research reported the use of phonetic features such as vowel characteristics in automated spoken language evaluation. The present study extends this line of work on the significance of phonetic features in automated evaluation of L2 speech (both assessment and feedback). Predictive modelling techniques examined the relationship between various articulation rate metrics one the one hand, and the proficiency and L1 background of non-native English speakers on the other. It was found that the optimal predictive model was one in which the phonetic details of phoneme articulation were factored in the analysis of articulation rate. Model performance varied also according to the L1 background of speakers. The implications for assessment and feedback are discussed.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haiyang Xu|AUTHOR Haiyang Xu]]^^1^^, [[Hui Zhang|AUTHOR Hui Zhang]]^^1^^, [[Kun Han|AUTHOR Kun Han]]^^2^^, [[Yun Wang|AUTHOR Yun Wang]]^^3^^, [[Yiping Peng|AUTHOR Yiping Peng]]^^1^^, [[Xiangang Li|AUTHOR Xiangang Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^DiDi Chuxing, China; ^^2^^DiDi Research America, USA; ^^3^^Peking University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3569–3573&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech emotion recognition is a challenging problem because human convey emotions in subtle and complex ways. For emotion recognition on human speech, one can either extract emotion related features from audio signals or employ speech recognition techniques to generate text from speech and then apply natural language processing to analyze the sentiment. Further, emotion recognition will be beneficial from using audio-textual multimodal information, it is not trivial to build a system to learn from multimodality. One can build models for two input sources separately and combine them in a decision level, but this method ignores the interaction between speech and text in the temporal domain. In this paper, we propose to use an attention mechanism to learn the alignment between speech frames and text words, aiming to produce more accurate multimodal feature representations. The aligned multimodal features are fed into a sequential model for emotion recognition. We evaluate the approach on the IEMOCAP dataset and the experimental results show the proposed approach achieves the state-of-the-art performance on the dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sharon Peperkamp|AUTHOR Sharon Peperkamp]], [[Monica Hegde|AUTHOR Monica Hegde]], [[Maria Julia Carbajal|AUTHOR Maria Julia Carbajal]]
</p><p class="cpabstractcardaffiliationlist">LSCP (UMR 8554), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3574–3578&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In spoken language, words can have different surface realizations due to the application of language-specific phonological rules. Young children must acquire these rules in order to be able to undo their effects and recognize the intended words during language processing. Evidence so far suggests that they achieve this early on, but the learning mechanisms that they exploit are unknown. As a first step in examining this question, it is necessary to know to what extent phonological rules occur in their input. Here, we investigate the occurrence of liquid deletion, i.e. the optional deletion of the liquid in word-final obstruent-liquid clusters, in French child-directed speech. Analyzing a corpus from the Childes database that contains video recordings, we find that words finishing in obstruent-liquid clusters occur on average once every 13 utterances, and that in more than half of the cases the liquid is deleted. As in adult-directed speech, deletion applies more often before consonants than before vowels and pauses. Furthermore, pairs of tokens of the same word with and without deletion tend to cluster together, with a median distance of 49 seconds of speech. This clustering could be a powerful cue in the process of the acquisition of liquid deletion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Amanda Seidl|AUTHOR Amanda Seidl]]^^1^^, [[Anne S. Warlaumont|AUTHOR Anne S. Warlaumont]]^^2^^, [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]]^^3^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Purdue University, USA; ^^2^^University of California at Los Angeles, USA; ^^3^^LSCP (UMR 8554), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3579–3583&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Theoretical, empirical, and intervention research requires access to a large, unbiased, annotated dataset of infant vocalizations for training speech technology to detect and differentiate consonant-vowel (canonical) syllables in infants’ vocalizations from less mature vocalizations. Citizen scientists could help us to achieve the goal of this dataset, if classification is accurate regardless of coders’ native language and training and can be completed on clips short enough to avoid revealing personal identifying information. Three groups of coders participated in an experiment: trained native, semi-trained native, and minimally-trained foreign. When vocalizations were presented whole, reliability was highest across the trained coders, with little difference between the semi-trained and minimally-trained coders. Among minimally-trained coders, reliability for 400ms-long clips was very similar to that found for full clips, with lower values for 200 and 600ms clips. Finally, error rates were minimized when 400ms-long clips were used. In sum, minimally-trained coders can achieve fairly reliable and accurate results, even when their native language does not match infants’ target language and when provided with very short clips. Since shorter clips protect the identity of the child and her family, this manner of data annotation may provide us with a way of building a large, unbiased dataset of infant vocalizations.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinyu Li|AUTHOR Xinyu Li]], [[Venkata Chebiyyam|AUTHOR Venkata Chebiyyam]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]
</p><p class="cpabstractcardaffiliationlist">Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3604–3608&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Environmental sound classification systems often do not perform robustly across different sound classification tasks and audio signals of varying temporal structures. We introduce a multi-stream convolutional neural network with temporal attention that addresses these problems. The network relies on three input streams consisting of raw audio and spectral features and utilizes a temporal attention function computed from energy changes over time. Training and classification utilizes decision fusion and data augmentation techniques that incorporate uncertainty. We evaluate this network on three commonly used datasets for environmental sound and audio scene classification and achieve new state-of-the-art performance without any changes in network architecture or front-end preprocessing, thus demonstrating better generalizability.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shilei Zhang|AUTHOR Shilei Zhang]], [[Yong Qin|AUTHOR Yong Qin]], [[Kewei Sun|AUTHOR Kewei Sun]], [[Yonghua Lin|AUTHOR Yonghua Lin]]
</p><p class="cpabstractcardaffiliationlist">IBM, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3649–3653&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Few-shot learning is a very promising and challenging field of machine learning as it aims to understand new concepts from very few labeled examples. In this paper, we propose attentional framework to extend recently proposed few-shot learning with graph neural network [1] in audio classification scenario. The objective of proposed attentional framework is to introduce a flexible framework to implement selectively concentration procedure on support examples for each query process. we also present an empirical study on confidence measure for few-shot learning application by combining posterior probability with normalized entropy of the network’s probability output. The efficiency of the proposed method is demonstrated with experiments on balanced training set of Audio set for training and a 5-way test set composed of about 5-hour audio data for testing.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Kangkang Lu|AUTHOR Kangkang Lu]], [[Chuan-Sheng Foo|AUTHOR Chuan-Sheng Foo]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]], [[Vijay Ramaseshan Chandrasekhar|AUTHOR Vijay Ramaseshan Chandrasekhar]]
</p><p class="cpabstractcardaffiliationlist">A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3654–3658&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Consistency-based semi-supervised learning methods such as the Mean Teacher method are state-of-the-art on image datasets, but have yet to be applied to audio data. Such methods encourage model predictions to be consistent on perturbed input data. In this paper, we incorporate audio-specific perturbations into the Mean Teacher algorithm and demonstrate the effectiveness of the resulting method on audio classification tasks. Specifically, we perturb audio inputs by mixing in other environmental audio clips, and leverage other training examples as sources of noise. Experiments on the Google Speech Command Dataset and UrbanSound8K Dataset show that the method can achieve comparable performance to a purely supervised approach while using only a fraction of the labels.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gianmarco Cerutti|AUTHOR Gianmarco Cerutti]], [[Rahul Prasad|AUTHOR Rahul Prasad]], [[Alessio Brutti|AUTHOR Alessio Brutti]], [[Elisabetta Farella|AUTHOR Elisabetta Farella]]
</p><p class="cpabstractcardaffiliationlist">FBK, Italy</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3609–3613&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In most classification tasks, wide and deep neural networks perform and generalize better than their smaller counterparts, in particular when they are exposed to large and heterogeneous training sets. However, in the emerging field of Internet of Things memory footprint and energy budget pose severe limits on the size and complexity of the neural models that can be implemented on embedded devices. The Student-Teacher approach is an attractive strategy to distill knowledge from a large network into smaller ones, that can fit on low-energy low-complexity embedded IoT platforms. In this paper, we consider the outdoor sound event detection task as a use case. Building upon the VGGish network, we investigate different distillation strategies to substantially reduce the classifier’s size and computational cost with minimal performance losses. Experiments on the  UrbanSound8K dataset show that extreme compression factors (up to 4.2 · 10^^-4^^ for parameters and 1.2 · 10^^-3^^ for operations with respect to VGGish) can be achieved, limiting the accuracy degradation from 75% to 70%. Finally, we compare different embedded platforms to analyze the trade-off between available resources and achievable accuracy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xugang Lu|AUTHOR Xugang Lu]]^^1^^, [[Peng Shen|AUTHOR Peng Shen]]^^1^^, [[Sheng Li|AUTHOR Sheng Li]]^^1^^, [[Yu Tsao|AUTHOR Yu Tsao]]^^2^^, [[Hisashi Kawai|AUTHOR Hisashi Kawai]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NICT, Japan; ^^2^^Academia Sinica, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3614–3618&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Designing good feature extraction and classifier models is essential for obtaining high performances of acoustic event detection (AED) systems. Current state-of-the-art algorithms are based on deep neural network models that jointly learn the feature representation and classifier models. As a typical pipeline in these algorithms, several network layers with nonlinear transforms are stacked for feature extraction, and a classifier layer with a softmax transform is applied on top of these extracted features to obtain normalized probability outputs. This pipeline is directly connected to a final goal for class discrimination without explicitly considering how the features should be distributed for inter-class and intra-class samples. In this paper, we explicitly add a distance metric constraint on feature extraction process with a goal to reduce intra-class sample distances and increase inter-class sample distances. Rather than estimating the pair-wise distances of samples, the distances are efficiently calculated between samples and class cluster centroids. With this constraint, the learned features have a good property for improving the generalization of the classification models. AED experiments on an urban sound classification task were carried out to test the algorithm. Results showed that the proposed algorithm efficiently improved the performance on the current state-of-the-art deep learning algorithms.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xue Bai|AUTHOR Xue Bai]]^^1^^, [[Jun Du|AUTHOR Jun Du]]^^1^^, [[Zi-Rui Wang|AUTHOR Zi-Rui Wang]]^^1^^, [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^USTC, China; ^^2^^Georgia Tech, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3619–3623&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>For the acoustic scenes classification, the main challenge is distinguishing similar acoustic segments between different scenes. To solve this problem, many deep learning based approaches have been proposed without considering the relevance of different acoustic scenes. In this paper, we propose a novel acoustic segment model (ASM) for acoustic scene classification. ASM aims at giving finer segmentation and covering all acoustic scenes through searching for the underlying phoneme like acoustic units. Furthermore, acoustic segments are modeled by Hidden Markov Models (HMMs) and each audio is decoded into ASM sequences without prior linguistic knowledge. Similar to the term vector of a text document, these ASM sequences are converted into co-occurrence statistics feature vectors and SVM/DNN is used as classifier back-end. Validated on the DCASE 2018 task, the proposed approach can achieve a competitive performance with single model and no data augment. By using visualization analysis, we excavate the potential similar units hidden in auditory sense.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ke-Xin He|AUTHOR Ke-Xin He]], [[Yu-Han Shen|AUTHOR Yu-Han Shen]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]
</p><p class="cpabstractcardaffiliationlist">Tsinghua University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3624–3628&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sound event detection with weakly labeled data is considered as a problem of multi-instance learning. And the choice of pooling function is the key to solving this problem. In this paper, we proposed a hierarchical pooling structure to improve the performance of weakly labeled sound event detection system. Proposed pooling structure has made remarkable improvements on three types of pooling function without adding any parameters. Moreover, our system has achieved competitive performance on Task 4 of Detection and Classification of Acoustic Scenes and Events (DCASE) 2017 Challenge using hierarchical pooling structure.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Wei Xia|AUTHOR Wei Xia]]^^1^^, [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Texas at Dallas, USA; ^^2^^Microsoft, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3629–3633&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we introduce a convolutional time-frequency-channel “Squeeze and Excitation” (tfc-SE) module to explicitly model inter-dependencies between the time-frequency domain and multiple channels. The tfc-SE module consists of two parts: tf-SE block and c-SE block which are designed to provide attention on time-frequency and channel domain, respectively, for adaptively recalibrating the input feature map. The proposed tfc-SE module, together with a popular Convolutional Recurrent Neural Network (CRNN) model, are evaluated on a multi-channel sound event detection task with overlapping audio sources: the training and test data are synthesized TUT Sound Events 2018 datasets, recorded with microphone arrays. We show that the tfc-SE module can be incorporated into the CRNN model at a small additional computational cost and bring significant improvements on sound event detection accuracy. We also perform detailed ablation studies by analyzing various factors that may influence the performance of the SE blocks. We show that with the best tfc-SE block, error rate (ER) decreases from 0.2538 to 0.2026, relative 20.17% reduction of ER, and 5.72% improvement of F1 score. The results indicate that the learned acoustic embeddings with the tfc-SE module efficiently strengthen time-frequency and channel-wise feature representations to improve the discriminative performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lam Pham|AUTHOR Lam Pham]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Huy Phan|AUTHOR Huy Phan]], [[Ramaswamy Palaniappan|AUTHOR Ramaswamy Palaniappan]]
</p><p class="cpabstractcardaffiliationlist">University of Kent, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3634–3638&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic scene classification (ASC) using front-end time-frequency features and back-end neural network classifiers has demonstrated good performance in recent years. However a profusion of systems has arisen to suit different tasks and datasets, utilising different feature and classifier types. This paper aims at a robust framework that can explore and utilise a range of different time-frequency features and neural networks, either singly or merged, to achieve good classification performance. In particular, we exploit three different types of front-end time-frequency feature; log energy Mel filter, Gammatone filter and constant Q transform. At the back-end we evaluate effective a two-stage model that exploits a Convolutional Neural Network for pre-trained feature extraction, followed by Deep Neural Network classifiers as a post-trained feature adaptation model and classifier. We also explore the use of a data augmentation technique for these features that effectively generates a variety of intermediate data, reinforcing model learning abilities, particularly for marginal cases. We assess performance on the DCASE2016 dataset, demonstrating good classification accuracies exceeding 90%, significantly outperforming the DCASE2016 baseline and highly competitive compared to state-of-the-art systems.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Bowen Shi|AUTHOR Bowen Shi]]^^1^^, [[Ming Sun|AUTHOR Ming Sun]]^^2^^, [[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]]^^2^^, [[Viktor Rozgic|AUTHOR Viktor Rozgic]]^^2^^, [[Spyros Matsoukas|AUTHOR Spyros Matsoukas]]^^2^^, [[Chao Wang|AUTHOR Chao Wang]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^TTIC, USA; ^^2^^Amazon, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3639–3643&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Acoustic Event Detection (AED), aiming at detecting categories of events based on audio signals, has found application in many intelligent systems. Recently deep neural network significantly advances this field and reduces detection errors to a large scale. However how to efficiently execute deep models in AED has received much less attention. Meanwhile state-of-the-art AED models are based on large deep models, which are computational demanding and challenging to deploy on devices with constrained computational resources. In this paper, we present a simple yet effective compression approach which jointly leverages  knowledge distillation and  quantization to compress larger network (teacher model) into compact network (student model). Experimental results show proposed technique not only lowers error rate of original compact network by 15% through distillation but also further reduces its model size to a large extent (2% of teacher, 12% of full-precision student) through quantization.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jiaxu Chen|AUTHOR Jiaxu Chen]], [[Jing Hao|AUTHOR Jing Hao]], [[Kai Chen|AUTHOR Kai Chen]], [[Di Xie|AUTHOR Di Xie]], [[Shicai Yang|AUTHOR Shicai Yang]], [[Shiliang Pu|AUTHOR Shiliang Pu]]
</p><p class="cpabstractcardaffiliationlist">Hikvision, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3644–3648&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Audio classification can distinguish different kinds of sounds, which is helpful for intelligent applications in daily life. However, it remains a challenging task since the sound events in an audio clip is probably multiple, even overlapping. This paper introduces an end-to-end audio classification system based on raw waveforms and mix-training strategy. Compared to human-designed features which have been widely used in existing research, raw waveforms contain more complete information and are more appropriate for multi-label classification. Taking raw waveforms as input, our network consists of two variants of ResNet structure which can learn a discriminative representation. To explore the information in intermediate layers, a multi-level prediction with attention structure is applied in our model. Furthermore, we design a mix-training strategy to break the performance limitation caused by the amount of training data. Experiments show that the mean average precision of the proposed audio classification system on Audio Set dataset is 37.2%. Without using extra training data, our system exceeds the state-of-the-art multi-level attention model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jan Mizgajski|AUTHOR Jan Mizgajski]], [[Adrian Szymczak|AUTHOR Adrian Szymczak]], [[Robert Głowski|AUTHOR Robert Głowski]], [[Piotr Szymański|AUTHOR Piotr Szymański]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Łukasz Augustyniak|AUTHOR Łukasz Augustyniak]], [[Mikołaj Morzy|AUTHOR Mikołaj Morzy]], [[Yishay Carmiel|AUTHOR Yishay Carmiel]], [[Jeff Hodson|AUTHOR Jeff Hodson]], [[Łukasz Wójciak|AUTHOR Łukasz Wójciak]], [[Daniel Smoczyk|AUTHOR Daniel Smoczyk]], [[Adam Wróbel|AUTHOR Adam Wróbel]], [[Bartosz Borowik|AUTHOR Bartosz Borowik]], [[Adam Artajew|AUTHOR Adam Artajew]], [[Marcin Baran|AUTHOR Marcin Baran]], [[Cezary Kwiatkowski|AUTHOR Cezary Kwiatkowski]], [[Marzena Żyła-Hoppe|AUTHOR Marzena Żyła-Hoppe]]
</p><p class="cpabstractcardaffiliationlist">Avaya, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3659–3660&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Avaya Conversational Intelligence (ACI) is an end-to-end, cloud-based solution for real-time Spoken Language Understanding for call centers. It combines large vocabulary, real-time speech recognition, transcript refinement, and entity and intent recognition in order to convert live audio into a rich, actionable stream of structured events. These events can be further leveraged with a business rules engine, thus serving as a foundation for real-time supervision and assistance applications. After the ingestion, calls are enriched with unsupervised keyword extraction, abstractive summarization, and business-defined attributes, enabling offline use cases, such as business intelligence, topic mining, full-text search, quality assurance, and agent training. ACI comes with a pretrained, configurable library of hundreds of intents and a robust intent training environment that allows for efficient, cost-effective creation and customization of customer-specific intents.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shounan An|AUTHOR Shounan An]], [[Youngsoo Kim|AUTHOR Youngsoo Kim]], [[Hu Xu|AUTHOR Hu Xu]], [[Jinwoo Lee|AUTHOR Jinwoo Lee]], [[Myungwoo Lee|AUTHOR Myungwoo Lee]], [[Insoo Oh|AUTHOR Insoo Oh]]
</p><p class="cpabstractcardaffiliationlist">Netmarble, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3661–3662&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present an effective method to solve a small-footprint keyword spotting (KWS) task via deep neural network for mobile game. Our goal is to improve the accuracy of KWS in various environments. To this end, we propose a new neural network layer named recycle-pooling. Extensive experiments indicate that our recycle-pooling based convolutional neural network (RP-CNN) indeed improves the performance of KWS in both clean and noisy data for mobile game. We will perform live demonstration of RP-CNN based KWS integrated into a full-sized, production-quality mobile game  A3: Still Alive, which is one of the major games from Netmarble this year and will be available on market soon.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Adam Chýlek|AUTHOR Adam Chýlek]], [[Luboš Šmídl|AUTHOR Luboš Šmídl]], [[Jan Švec|AUTHOR Jan Švec]]
</p><p class="cpabstractcardaffiliationlist">University of West Bohemia, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3663–3664&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present a multimodal dialog system capable of information retrieval from the large audiovisual archive MALACH of Holocaust testimonies. The users can use spoken natural language queries to search the archive. A graphical user interface allows the users to quickly view footage with the answers and explore their context. The dialog was deployed in two languages — English and Czech. The system uses automatic speech recognition and natural language processing for knowledge base construction and for processing of the user’s input.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sarfaraz Jelil|AUTHOR Sarfaraz Jelil]]^^1^^, [[Abhishek Shrivastava|AUTHOR Abhishek Shrivastava]]^^1^^, [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]]^^2^^, [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]^^1^^, [[Rohit Sinha|AUTHOR Rohit Sinha]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^IIT Guwahati, India; ^^2^^NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3665–3666&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This work describes a multi-level speaker verification (SV) framework that is accessible via a graphical user interface (GUI) with attendance as an application. This framework has three different modalities of SV system, namely, voice-password, text-dependent and text-independent. The decision for attendance marking can be taken from each of the modalities or by fusion. There are two operating modes of the developed GUI, which are user and debug modes. The user mode is for general users to mark attendance, whereas the debug mode is to study the behavior of the three modalities from deployment point of view. The speech waveforms, different plots and scores can be analyzed in the debug mode for analysis. The system has been deployed successfully for regular attendance marking among a closed group in a laboratory environment.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jibin Wu|AUTHOR Jibin Wu]]^^1^^, [[Zihan Pan|AUTHOR Zihan Pan]]^^1^^, [[Malu Zhang|AUTHOR Malu Zhang]]^^1^^, [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]]^^1^^, [[Yansong Chua|AUTHOR Yansong Chua]]^^2^^, [[Haizhou Li|AUTHOR Haizhou Li]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^NUS, Singapore; ^^2^^A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3667–3668&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Humans perform remarkably well at sound classification that is used as cues to support high-level cognitive functions. Inspired by the anatomical structure of human cochlea and auditory attention mechanism, we present a novel neuromorphic sound recognition system that integrates an event-driven auditory front-end and a biologically plausible spiking neural network classifier (SNN) for robust sound and speech recognition. Due to its event-driven nature, the SNN classifier is several orders of magnitude more energy efficient than deep learning classifier, therefore, it is suitable for many applications in wearable devices.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Shoukang Hu|AUTHOR Shoukang Hu]]^^1^^, [[Shansong Liu|AUTHOR Shansong Liu]]^^1^^, [[Heng Fai Chang|AUTHOR Heng Fai Chang]]^^2^^, [[Mengzhe Geng|AUTHOR Mengzhe Geng]]^^1^^, [[Jiani Chen|AUTHOR Jiani Chen]]^^1^^, [[Lau Wing Chung|AUTHOR Lau Wing Chung]]^^1^^, [[To Ka Hei|AUTHOR To Ka Hei]]^^1^^, [[Jianwei Yu|AUTHOR Jianwei Yu]]^^1^^, [[Ka Ho Wong|AUTHOR Ka Ho Wong]]^^1^^, [[Xunying Liu|AUTHOR Xunying Liu]]^^1^^, [[Helen Meng|AUTHOR Helen Meng]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^CUHK, China; ^^2^^University of Cambridge, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3669–3670&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Speech disorders affect many people around the world and introduce a negative impact on their quality of life. Dysarthria is a neural-motor speech disorder that obstructs the normal production of speech. Current automatic speech recognition (ASR) systems are developed for normal speech. They are not suitable for accurate recognition of disordered speech. To the best of our knowledge, the majority of disordered speech recognition systems developed to date are for English. In this paper, we present two disordered speech recognition systems for both English and Cantonese. Both systems demonstrate competitive performance when compared with the Google speech recognition API and human recognition results.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Florian Schiel|AUTHOR Florian Schiel]], [[Thomas Kisler|AUTHOR Thomas Kisler]]
</p><p class="cpabstractcardaffiliationlist">LMU München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3671–3672&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this Show&Tell contribution we will demonstrate two new public web services provided by the CLARIN centre Bavarian Archive for Speech Signals at the university of Munich. ‘Subtitle’ is a service that allows users to automatically create and add a subtitle track to video recordings; ‘Anonymizer’ can be applied to media files and their respective annotations in order to mask user-defined spoken terms in the signal as well as in the annotation. Both services are accessible via a RESTful API or a user-friendly web-interface. In the demo we will demonstrate both services independently and in combination (anonymizing subtitles) using the web interface.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jana Voße|AUTHOR Jana Voße]], [[Petra Wagner|AUTHOR Petra Wagner]]
</p><p class="cpabstractcardaffiliationlist">Universität Bielefeld, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3673–3674&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Annotating prominence phenomena in speech corpora is a challenging task, as it requires many resources. Therefore, several approaches have emerged in the past decades to automatise the process of detecting and annotating prominence. Among these, [1] propose a fully automatically operating acoustic prominence detection and annotation tool that yields promising results. The present work aims at making this tool accessible to a broader community and more inviting in the manipulation of features. To do so, we re-implemented the prominence annotation approach of [1] in the programming language of the software Praat [2], which is commonly used for speech analysis purposes within several areas of research. By implementing a user-friendly interface, the Praat-based prominence detection and annotation tool can be controlled without any source code interaction, which makes it accessible to users with differing levels of programming experience. More experienced users have the option to directly work with the comprehensively commented and documented source code to manipulate or add features within the prominence detection and annotation process. Providing a more accessible and easier to manipulate re-implementation of the tool of [1], we want to contribute to further developments in the area of automatic prominence detection and annotation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mónica Domínguez|AUTHOR Mónica Domínguez]], [[Patrick Louis Rohrer|AUTHOR Patrick Louis Rohrer]], [[Juan Soler-Company|AUTHOR Juan Soler-Company]]
</p><p class="cpabstractcardaffiliationlist">Universitat Pompeu Fabra, Spain</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3675–3676&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>PyToBI is introduced as a user-friendly toolkit for the automatic annotation of intonation contours using the Tones and Breaks Indexes convention, known as ToBI.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Golan Levy|AUTHOR Golan Levy]], [[Raquel Sitman|AUTHOR Raquel Sitman]], [[Ido Amir|AUTHOR Ido Amir]], [[Eduard Golshtein|AUTHOR Eduard Golshtein]], [[Ran Mochary|AUTHOR Ran Mochary]], [[Eilon Reshef|AUTHOR Eilon Reshef]], [[Roi Reichart|AUTHOR Roi Reichart]], [[Omri Allouche|AUTHOR Omri Allouche]]
</p><p class="cpabstractcardaffiliationlist">Gong.io, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3677–3678&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>With the dramatic improvement in automated speech recognition (ASR) accuracy, a variety of machine learning (ML) and natural language processing (NLP) algorithms are designed for human conversation data. Supervised machine learning and particularly deep neural networks (DNNs) require large annotated datasets in order to train high quality models. In this paper we describe Gecko, a tool for annotation of speech and language features of conversations. Gecko allows efficient and effective segmentation of the voice signal by speaker as well as annotation of the linguistic content of the conversation. A key feature of Gecko is the presentation of the output of automatic segmentation and transcription systems in an intuitive user interface for editing. Gecko allows annotation of Voice Activity Detection (VAD), Diarization, Speaker Identification and ASR outputs on a large scale. Both annotators and data scientists have reported improvement in the speed and accuracy of work. Gecko is publicly available for the benefit of the community at https://github.com/gong-io/gecko.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Roger Yu-Hsiang Lo|AUTHOR Roger Yu-Hsiang Lo]], [[Kathleen Currie Hall|AUTHOR Kathleen Currie Hall]]
</p><p class="cpabstractcardaffiliationlist">University of British Columbia, Canada</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3679–3680&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the features of a free, open-source software tool,  Sign Language Phonetic Annotator+Analyzer (SLP-AA), which is designed to facilitate phonetic/phonological transcription and analysis on sign languages.

The software supports two modes: the Annotator mode allows the user to build phonetically transcribed corpora of sign languages, and the Analyzer mode lets the user perform phonological searches or analyses on the built corpora. We give a detailed description of one type of phonological search — the extended finger search function — and point out a potential application of this function with respect to sign language research.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Xinjian Li|AUTHOR Xinjian Li]], [[Zhong Zhou|AUTHOR Zhong Zhou]], [[Siddharth Dalmia|AUTHOR Siddharth Dalmia]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Florian Metze|AUTHOR Florian Metze]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3681–3682&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>While low resource speech recognition has attracted a lot of attention from the speech community, there are a few tools available to facilitate low resource speech collection. In this work, we present SANTLR: Speech Annotation Toolkit for Low Resource Languages. It is a web-based toolkit which allows researchers to easily collect and annotate a corpus of speech in a low resource language. Annotators may use this toolkit for two purposes: transcription or recording. In transcription, annotators would transcribe audio files provided by the researchers; in recording, annotators would record their voice by reading provided texts. We highlight two properties of this toolkit. First, SANTLR has a very user-friendly User Interface (UI). Both researchers and annotators may use this simple web interface to interact. There is no requirement for the annotators to have any expertise in audio or text processing. The toolkit would handle all preprocessing and postprocessing steps. Second, we employ a multi-step ranking mechanism facilitate the annotation process. In particular, the toolkit would give higher priority to utterances which are easier to annotate and are more beneficial to achieving the goal of the annotation, e.g. quickly training an acoustic model.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Martin Grůber|AUTHOR Martin Grůber]], [[Jakub Vít|AUTHOR Jakub Vít]], [[Jindřich Matoušek|AUTHOR Jindřich Matoušek]]
</p><p class="cpabstractcardaffiliationlist">University of West Bohemia, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3683–3684&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents a web-based GUI frontend for a backend TTS system, including an editor of the synthesized speech. The tool allows synthesizing speech from general texts using all available synthesis methods with both modifications within the speech synthesis process and subsequent modifications of the synthesized speech targeting for instance speech prolongation, shortening, pitch or volume increasing or decreasing, etc.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Olivier Perrotin|AUTHOR Olivier Perrotin]]^^1^^, [[Ian McLoughlin|AUTHOR Ian McLoughlin]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^GIPSA-lab (UMR 5216), France; ^^2^^University of Kent, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3685–3686&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This article introduces  GFM-Voc, a new system that allows high-quality and real-time voice modification, including both vocalic formants shifting, and voice quality manipulation. In particular, the system is based on the implementation of a newly developed source-filter decomposition method, called GFM-IAIF, that allows the extraction of both vocal tract and glottis spectral envelopes as a compact set of filter parameters. The latter are then controllable through a GUI, before re-synthesis of the speech with the modified parameters. The system requires no training, and operates on any voice, male or female, without tuning. Given the close link between spectral parameters and speech perception, this system provides an intuitive way to independently manipulate the vocalic formants and the spectral shape of the glottal flow that is responsible for voice quality perception. Additionally, rules have been implemented to link the glottis parameters to high-level voice quality parameters such as vocal force and tenseness. Examples of applications for this system include expressive speech synthesis, by adding the system at the end of a speech synthesiser pipeline, auditory feedback perturbation to study a speaker’s response to modified speech, and speech therapy.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Éva Székely|AUTHOR Éva Székely]], [[Gustav Eje Henter|AUTHOR Gustav Eje Henter]], [[Jonas Beskow|AUTHOR Jonas Beskow]], [[Joakim Gustafson|AUTHOR Joakim Gustafson]]
</p><p class="cpabstractcardaffiliationlist">KTH, Sweden</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3687–3688&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Extemporaneous speech is a delivery type in public speaking which uses a structured outline but is otherwise delivered conversationally, off the cuff. This demo uses a natural-sounding spontaneous conversational speech synthesiser to simulate this delivery style. We resynthesised the beginnings of two Interspeech keynote speeches with TTS that produces multiple different versions of each utterance that vary in fluency and filled-pause placement. The platform allows the user to mark the samples according to any perceptual aspect of interest, such as certainty, authenticity, confidence, etc. During the speech delivery, they can decide on the fly which realisation to play, addressing their audience in a connected, conversational fashion. Our aim is to use this platform to explore speech synthesis evaluation options from a production perspective and in situational contexts.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Lucas Kessler|AUTHOR Lucas Kessler]], [[Cecilia Ovesdotter Alm|AUTHOR Cecilia Ovesdotter Alm]], [[Reynold Bailey|AUTHOR Reynold Bailey]]
</p><p class="cpabstractcardaffiliationlist">RIT, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3689–3690&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Utilizing a existing neural text-to-speech synthesis architecture to generate person names and comparing them to reference names read aloud in a formal context, we explore how bias resulting from training data impacts the synthesis of person names, focusing on frequency and origin of names. Long-term, we aim to apply voice conversion of person names to aid the effective reading aloud of such names in celebratory ceremonies.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Luís Bernardo|AUTHOR Luís Bernardo]]^^1^^, [[Mathieu Giquel|AUTHOR Mathieu Giquel]]^^1^^, [[Sebastião Quintas|AUTHOR Sebastião Quintas]]^^2^^, [[Paulo Dimas|AUTHOR Paulo Dimas]]^^1^^, [[Helena Moniz|AUTHOR Helena Moniz]]^^1^^, [[Isabel Trancoso|AUTHOR Isabel Trancoso]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Unbabel, Portugal; ^^2^^Universidade de Lisboa, Portugal</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3691–3692&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Unbabel Talk is a speech-to-speech translation application that provides human certified translations for voice instant messaging (IM) in multilingual scenarios. By combining Unbabel’s translation pipeline with state-of-the-art automatic speech recognition (ASR) and text-to-speech (TTS) models, Unbabel Talk can be used to send a voice message in a language of choice through popular messaging platforms. The app further ensures that translations have high quality, either by certifying them through Unbabel’s own quality estimation (QE) tool and/or through Unbabel’s community of translators. There are two versions of the app. On version 1, the app synthesizes audio that can be delivered with male or female standard voices. Version 2 has features that are currently being developed, such as voice morphing and transcription correction through Unbabel’s community.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Azam Rabiee|AUTHOR Azam Rabiee]], [[Tae-Ho Kim|AUTHOR Tae-Ho Kim]], [[Soo-Young Lee|AUTHOR Soo-Young Lee]]
</p><p class="cpabstractcardaffiliationlist">KAIST, Korea</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 3693–3694&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Emotion is not limited to discrete categories of happy, sad, angry, fear, disgust, surprise, and so on. Instead, each emotion category is projected into a set of nearly independent dimensions, named pleasure (or valence), arousal, and dominance, known as PAD. The value of each dimension varies from -1 to 1, such that the neutral emotion is in the center with all-zero values. Training an emotional continuous text-to-speech (TTS) synthesizer on the independent dimensions provides the possibility of emotional speech synthesis with unlimited emotion categories. Our end-to-end neural speech synthesizer is based on the well-known Tacotron. Empirically, we have found the optimum network architecture for injecting the 3D PADs. Moreover, the PAD values are adjusted for the speech synthesis purpose.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Björn W. Schuller|AUTHOR Björn W. Schuller]]^^1^^, [[Anton Batliner|AUTHOR Anton Batliner]]^^2^^, [[Christian Bergler|AUTHOR Christian Bergler]]^^3^^, [[Florian B. Pokorny|AUTHOR Florian B. Pokorny]]^^4^^, [[Jarek Krajewski|AUTHOR Jarek Krajewski]]^^5^^, [[Margaret Cychosz|AUTHOR Margaret Cychosz]]^^6^^, [[Ralf Vollmann|AUTHOR Ralf Vollmann]]^^7^^, [[Sonja-Dana Roelen|AUTHOR Sonja-Dana Roelen]]^^8^^, [[Sebastian Schnieder|AUTHOR Sebastian Schnieder]]^^8^^, [[Elika Bergelson|AUTHOR Elika Bergelson]]^^9^^, [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]]^^10^^, [[Amanda Seidl|AUTHOR Amanda Seidl]]^^11^^, [[Anne S. Warlaumont|AUTHOR Anne S. Warlaumont]]^^12^^, [[Lisa Yankowitz|AUTHOR Lisa Yankowitz]]^^13^^, [[Elmar Nöth|AUTHOR Elmar Nöth]]^^3^^, [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]]^^2^^, [[Simone Hantke|AUTHOR Simone Hantke]]^^2^^, [[Maximilian Schmitt|AUTHOR Maximilian Schmitt]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Imperial College London, UK; ^^2^^Universität Augsburg, Germany; ^^3^^FAU Erlangen-Nürnberg, Germany; ^^4^^Medizinische Universität Graz, Austria; ^^5^^Bergische Universität Wuppertal, Germany; ^^6^^University of California at Berkeley, USA; ^^7^^Universität Graz, Austria; ^^8^^Rheinische Fachhochschule Köln, Germany; ^^9^^Duke University, USA; ^^10^^LSCP (UMR 8554), France; ^^11^^Purdue University, USA; ^^12^^University of California at Los Angeles, USA; ^^13^^University of Pennsylvania, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2378–2382&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The INTERSPEECH 2019 Computational Paralinguistics Challenge addresses four different problems for the first time in a research competition under well-defined conditions: In the  Styrian Dialects Sub-Challenge, three types of Austrian-German dialects have to be classified; in the  Continuous Sleepiness Sub-Challenge, the sleepiness of a speaker has to be assessed as regression problem; in the  Baby Sound Sub-Challenge, five types of infant sounds have to be classified; and in the  Orca Activity Sub-Challenge, orca sounds have to be detected. We describe the Sub-Challenges and baseline feature extraction and classifiers, which include data-learnt (supervised) feature representations by the ‘usual’ ComParE and BoAWfeatures, and deep unsupervised representation learning using the  auDeep toolkit.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Dominik Schiller|AUTHOR Dominik Schiller]], [[Tobias Huber|AUTHOR Tobias Huber]], [[Florian Lingenfelser|AUTHOR Florian Lingenfelser]], [[Michael Dietz|AUTHOR Michael Dietz]], [[Andreas Seiderer|AUTHOR Andreas Seiderer]], [[Elisabeth André|AUTHOR Elisabeth André]]
</p><p class="cpabstractcardaffiliationlist">Universität Augsburg, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2423–2427&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Underwater sounds provide essential information for marine researchers to study sea mammals. During long-term studies large amounts of sound signals are being recorded using hydrophones. To facilitate the time consuming process of manually evaluating the recorded data, computational systems are often employed. Recent approaches utilize Convolutional Neural Networks (CNNs) to analyze spectrograms extracted from the audio signal. In this paper we explore the potential of relevance analysis to enhance the performance of existing CNN approaches. For this purpose, we present a fusion system that utilizes intermediate outputs of three state of the art CNNs, which are fine tuned to recognize whale sounds in spectrograms. Hereby we use Explainable Artificial Intelligence (XAI) to asses the relevance of each feature within the obtained representations. Based on those relevance values, we create novel masking algorithms to extract significant subsets of respective representations. These subsets are used to train an ensemble of classification systems that are serving as input for the final fusion step. We observe that a classification system can benefit from the inclusion of Relevance-based Feature Masking in terms of improved performance and reduced input dimensionality. The presented work is part of the INTERSPEECH 2019 Computational Paralinguistics Challenge.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Marie-José Caraty|AUTHOR Marie-José Caraty]], [[Claude Montacié|AUTHOR Claude Montacié]]
</p><p class="cpabstractcardaffiliationlist">STIH (EA 4509), France</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2428–2432&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The INTERSPEECH 2019 Orca Activity Challenge consists in the detection of the Orca sounds from underwater audio signal. Orca can produce a wide variety of sounds categorized in clicks, whistles and pulsed calls. Clicks are useful for echolocation, whistles and pulsed calls are used as social signals. Experiments were conducted on DeepAL Fieldwork Data (DLFD). Underwater sounds were recorded in northern British Columbia by a hydrophones array. Recordings were labeled by marine biologists in Orca sounds or Noise. We have investigated multiresolution analysis according to the three main relevant acoustic levels: spatial, temporal and spectral. For this purpose, we studied the beamforming array analysis, the multitemporal resolution and the multilevel wavelet decomposition. For the spatial level, a beamforming algorithm was used for denoising the underwater audio signal. For the temporal level, two sets of multitemporal three-level features were extracted using pyramidal representation. For the spectral level, in order to detect transient sound, wavelet analysis was computed using various wavelet families. At last, an Orca Activity detector was designed combining ComParE set with multitemporal and multilevel wavelet features. Experiments on the Test set have shown a significant improvement of 0.051, compared to the baseline performance of the Challenge (0.866).</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Haiwei Wu|AUTHOR Haiwei Wu]], [[Weiqing Wang|AUTHOR Weiqing Wang]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2433–2437&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper introduces our approaches for the orca activity and continuous sleepiness tasks in the Interspeech ComParE Challenge 2019. For the orca activity detection task, we extract deep embeddings using several deep convolutional neural networks, followed by the Support Vector Machine (SVM) based back end classifier. Both STFT spectrogram and log mel-spectrogram are explored as input features. To increase the size of training data and deal with the data imbalance, we propose four kinds of data augmentation. We also investigate the different ways of fusion for multi-channel input data. Besides the official baseline system, to better evaluate the performance of our deep embedding system, we employ the Fisher Vector (FV) encoding on various kinds of acoustic features as an alternative baseline. Experimental results show that our proposed methods significantly outperform the baselines and achieve 0.948 AUC and 0.365 Spearman’s Correlation Coefficient on the orca activity and continuous sleepiness evaluation data, respectively.</p></div>
\rules except wikilink

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[S. Pavankumar Dubagunta|AUTHOR S. Pavankumar Dubagunta]], [[Mathew Magimai-Doss|AUTHOR Mathew Magimai-Doss]]
</p><p class="cpabstractcardaffiliationlist">Idiap Research Institute, Switzerland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2383–2387&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper addresses the Styrian Dialect sub-challenge of the INTERSPEECH 2019 Computational Paralinguistics Challenge. We treat this challenge as dialect identification with no linguistic resources/knowledge and with limited acoustic resources, and develop end-to-end raw waveform modelling based methods that incorporate knowledge related to speech production. In this direction, we investigate two methods: (a) modelling the signals after source system decomposition and (b) transferring knowledge from articulatory feature models trained on English language. Our investigations show that the proposed approaches on the ComParE 2019 Styrian dialect data yield systems that perform better than low level descriptor-based and bag-of-audio-word representation based approaches and comparable to sequence-to-sequence auto-encoder based approach.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Daniel Elsner|AUTHOR Daniel Elsner]], [[Stefan Langer|AUTHOR Stefan Langer]], [[Fabian Ritz|AUTHOR Fabian Ritz]], [[Robert Mueller|AUTHOR Robert Mueller]], [[Steffen Illium|AUTHOR Steffen Illium]]
</p><p class="cpabstractcardaffiliationlist">LMU München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2388–2392&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Detecting sleepiness from spoken language is an ambitious task, which is addressed by the Interspeech 2019 Computational Paralinguistics Challenge (ComParE). We propose an end-to-end deep learning approach to detect and classify patterns reflecting sleepiness in the human voice. Our approach is based solely on a moderately complex deep neural network architecture. It may be applied directly on the audio data without requiring any specific feature engineering, thus remaining transferable to other audio classification tasks. Nevertheless, our approach performs similar to state-of-the-art machine learning models.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Thomas Kisler|AUTHOR Thomas Kisler]], [[Raphael Winkelmann|AUTHOR Raphael Winkelmann]], [[Florian Schiel|AUTHOR Florian Schiel]]
</p><p class="cpabstractcardaffiliationlist">LMU München, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2393–2397&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Many classifiers struggle when confronted with a high dimensional feature space like in the data sets provided for the Interspeech ComParE challenge. This is because most features do not significantly contribute to the prediction. To alleviate this problem, we propose a feature selection based on a Genetic Algorithm (GA) that uses an SVM as the fitness function. We show that this yields a reduced subset (1) which results in an Unweighted Average Recall (UAR) that beats the challenge baseline on the development set for the 3-class classification problem. Further, we extract an additional per-phoneme feature set, where the features are inspired by the ComParE features. On this set the same GA-based feature selection is performed and the resulting set is used for training in isolation (2) and in combination with the aforementioned reduced challenge features (3). Five classifiers were tested on the three subsets, namely SVMs, DNNs, GBMs, RFs, and regularized regression. All classifiers achieved a UAR above the baseline on all three sets. The best performance on set (1) was achieved by an SVM using an RBF kernel and on sets (2) and (3) by a fusion of classifiers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sung-Lin Yeh|AUTHOR Sung-Lin Yeh]]^^1^^, [[Gao-Yi Chao|AUTHOR Gao-Yi Chao]]^^1^^, [[Bo-Hao Su|AUTHOR Bo-Hao Su]]^^1^^, [[Yu-Lin Huang|AUTHOR Yu-Lin Huang]]^^1^^, [[Meng-Han Lin|AUTHOR Meng-Han Lin]]^^1^^, [[Yin-Chun Tsai|AUTHOR Yin-Chun Tsai]]^^1^^, [[Yu-Wen Tai|AUTHOR Yu-Wen Tai]]^^1^^, [[Zheng-Chi Lu|AUTHOR Zheng-Chi Lu]]^^1^^, [[Chieh-Yu Chen|AUTHOR Chieh-Yu Chen]]^^2^^, [[Tsung-Ming Tai|AUTHOR Tsung-Ming Tai]]^^2^^, [[Chiu-Wang Tseng|AUTHOR Chiu-Wang Tseng]]^^2^^, [[Cheng-Kuang Lee|AUTHOR Cheng-Kuang Lee]]^^2^^, [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^National Tsing Hua University, Taiwan; ^^2^^NVIDIA, Taiwan</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2398–2402&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this study, we present extensive attention-based networks with data augmentation methods to participate in the INTERSPEECH 2019 ComPareE Challenge, specifically the three Sub-challenges: Styrian Dialect Recognition, Continuous Sleepiness Regression, and Baby Sound Classification. For Styrian Dialect Sub-challenge, these dialects are classified into Northern Styrian (NorthernS), Urban Sytrian (UrbanS), and Eastern Styrian (EasternS). Our proposed model achieves an UAR 49.5% on the test set, which is 2.5% higher than the baseline. For Continuous Sleepiness Sub-challenge, it is defined as a regression task with score range from 1 (extremely alert) to 9 (very sleepy). In this work, our proposed architecture achieves a Spearman correlation 0.369 on the test set, which surpasses the baseline model by 0.026. For Baby Sound Sub-challenge, the infant sounds are classified into canonical babbling, non-canonical babbling, crying, laughing and junk/other, and our proposed augmentation framework achieves an UAR of 62.39% on the test set, which outperforms the baseline by about 3.7%. Overall, our analyses demonstrate that by fusing attention network models with conventional support vector machine benefits the test set robustness, and the recognition rates of these paralinguistic attributes generally improve when performing data augmentation.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Peter Wu|AUTHOR Peter Wu]], [[SaiKrishna Rallabandi|AUTHOR SaiKrishna Rallabandi]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Eric Nyberg|AUTHOR Eric Nyberg]]
</p><p class="cpabstractcardaffiliationlist">Carnegie Mellon University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2403–2407&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we present our submission to the INTERSPEECH 2019 ComParE Sleepiness challenge. By nature, the given speech dataset is an archetype of one with relatively limited samples, a complex underlying data distribution, and subjective ordinal labels. We propose a novel approach termed ordinal triplet loss (OTL) that can be readily added to any deep architecture in order to address the above data constraints. Ordinal triplet loss implicitly maps inputs into a space where similar samples are closer to each other than different ones. We demonstrate the efficacy of our approach on the aforementioned task.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Vijay Ravi|AUTHOR Vijay Ravi]], [[Soo Jin Park|AUTHOR Soo Jin Park]], [[Amber Afshan|AUTHOR Amber Afshan]], [[Abeer Alwan|AUTHOR Abeer Alwan]]
</p><p class="cpabstractcardaffiliationlist">University of California at Los Angeles, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2408–2412&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Sleepiness monitoring and prediction has many potential applications, such as being a safety feature in driver-assistance systems. In this study, we address the ComparE 2019 Continuous Sleepiness task of estimating the degree of sleepiness from voice data. The voice quality feature set was proposed to capture the acoustic characteristics related to the degree of sleepiness of a speaker, and between-frame entropy was proposed as an instantaneous measure of the speaking rate. An outlier elimination on the training data using between-frame entropy enhanced the system robustness in all conditions. This was followed by a regression system to predict the degree of sleepiness. Utterances were represented using i-vectors computed from voice quality features. Similar systems were also developed using mel-frequency cepstral coefficients and the ComParE16 feature set. These three systems were combined using score-level fusion. Results suggested complementarity between these feature sets. The complete system outperformed the baseline system which used the ComParE16 feature set. A relative improvement of 19.5% and 5.4% was achieved on the development and the test datasets, respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]
</p><p class="cpabstractcardaffiliationlist">MTA-SZTE RGAI, Hungary</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2413–2417&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The 2019 INTERSPEECH Computational Paralinguistics Challenge (ComParE) consists of four Sub-Challenges, where the tasks are to identify different German (Austrian) dialects, estimate how sleepy the speaker is, what type of sound a given baby uttered, and whether there is a sound of an orca (killer whale) present in the recording. Following our team’s last year entry, we continue our research by looking for feature set types that might be employed on a wide variety of tasks without alteration. This year, besides the standard 6373-sized ComParE functionals, we experimented with the Fisher vector representation along with the Bag-of-Audio-Words technique. To adapt Fisher vectors from the field of image processing, we utilized them on standard MFCC features instead of the originally intended SIFT attributes (which describe local objects found in the image). Our results indicate that using these feature representation techniques was indeed beneficial, as we could outperform the baseline values in three of the four Sub-Challenges; the performance of our approach seems to be even higher if we consider that the baseline scores were obtained by combining different methods as well.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]
</p><p class="cpabstractcardaffiliationlist">NUS, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2418–2422&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The orca activity detection is a challenging task that prevails in underwater acoustics. Signal level discrimination of orca activity to that of noise signal is minimum, hence a topic of interest. The orca activity detection is a subtask of Computational Paralinguistics Challenge (ComParE) 2019. In this work, we study a few novel acoustic cues based on phase and long-term information to capture the artifacts from signal to detect orca activity. The phase of signal possesses definite signal characteristics which is completely random in case of noise signal. In this regard, we investigate instantaneous phase as an artifact for orca activity detection. Additionally, we believe that the long-term features can be more helpful to detect such artifacts than the conventional short-term acoustic features. We explore these two directions along with the state-of-the-art baselines on ComParE functionals, bag-of-audio-words and auDeep features for ComParE 2019. The studies reveal that the instantaneous phase as a single feature can perform better than the fusion of three baselines given as a benchmark for the challenge. Further, we perform a score level fusion of the acoustic features and the three baselines that further enhances the performance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]]^^1^^, [[Julien van Hout|AUTHOR Julien van Hout]]^^1^^, [[Colleen Richey|AUTHOR Colleen Richey]]^^1^^, [[Mitchell McLaren|AUTHOR Mitchell McLaren]]^^1^^, [[Maria A. Barrios|AUTHOR Maria A. Barrios]]^^2^^, [[Aaron Lawson|AUTHOR Aaron Lawson]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^SRI International, USA; ^^2^^Lab41, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>The VOiCES from a Distance Challenge 2019 was designed to foster research in the area of speaker recognition and automatic speech recognition (ASR) with a special focus on single-channel distant/far-field audio under various noisy conditions. The challenge was based on the recently released VOiCES corpus, with 60 international teams involved, of which 24 teams participated in the evaluation. In this paper, we separately present the challenge’s speaker recognition and ASR tasks. For each task, we outline the training, development, and test data, as well as the evaluation metrics. Then, we report and discuss the results in light of the participant-provided system descriptions, to highlight the major factors contributing to high performance in distant speech processing.

This paper also appears in session Wed-SS-7-3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yulong Liang|AUTHOR Yulong Liang]], [[Lin Yang|AUTHOR Lin Yang]], [[Xuyang Wang|AUTHOR Xuyang Wang]], [[Yingjie Li|AUTHOR Yingjie Li]], [[Chen Jia|AUTHOR Chen Jia]], [[Junjie Wang|AUTHOR Junjie Wang]]
</p><p class="cpabstractcardaffiliationlist">Lenovo, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2483–2487&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes our submission to the “VOiCES from a Distance Challenge 2019”, which is designed to foster research in the area of speaker recognition and automatic speech recognition (ASR) with a special focus on single channel distant/far-field audio under noisy conditions. We focused on the ASR task under a fixed condition in which the training data was clean and small, but the development data and test data were noisy and unmatched. Thus we developed the following major technical points for our system, which included data augmentation, weighted-prediction-error based speech enhancement, acoustic models based on different networks, TDNN or LSTM based language model rescore, and ROVER. Experiments on the development set and the evaluation set showed that the front-end processing, data augmentation and system fusion made the main contributions for the performance increasing, and the final word error rate results based on our system scored 15.91% and 19.6% respectively.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yiming Wang|AUTHOR Yiming Wang]], [[David Snyder|AUTHOR David Snyder]], [[Hainan Xu|AUTHOR Hainan Xu]], [[Vimal Manohar|AUTHOR Vimal Manohar]], [[Phani Sankar Nidadavolu|AUTHOR Phani Sankar Nidadavolu]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2488–2492&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the system developed by the JHU team for automatic speech recognition (ASR) of the VOiCES from a Distance Challenge 2019, focusing on single channel distant/farfield audio under noisy conditions. We participated in the Fixed Condition track, where the systems are only trained on an 80-hour subset of the Librispeech corpus provided by the organizer. The training data was first augmented with both background noises and simulated reverberation. We then trained factorized TDNN acoustic models that differed only in their use of i-vectors for adaptation. Both systems utilized RNN language models trained on original and reversed text for rescoring. We submitted three systems: the system using i-vectors with WER 19.4% on the development set, the system without i-vectors that achieved WER 19.0%, and the their lattice-level fusion with WER 17.8%. On the evaluation set, our best system achieves 23.9% WER.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Danwei Cai|AUTHOR Danwei Cai]], [[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Weicheng Cai|AUTHOR Weicheng Cai]], [[Ming Li|AUTHOR Ming Li]]
</p><p class="cpabstractcardaffiliationlist">Duke Kunshan University, China</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2493–2497&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper, we present the DKU system for the speaker recognition task of the VOiCES from a distance challenge 2019. We investigate the whole system pipeline for the far-field speaker verification, including data pre-processing, short-term spectral feature representation, utterance-level speaker modeling, backend scoring, and score normalization. Our best single system employs a residual neural network trained with angular softmax loss. Also, the weighted prediction error algorithms can further improve performance. It achieves 0.3668 minDCF and 5.58% EER on the evaluation set by using a simple cosine similarity scoring. Finally, the submitted primary system obtains 0.3532 minDCF and 4.96% EER on the evaluation set.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sergey Novoselov|AUTHOR Sergey Novoselov]]^^1^^, [[Aleksei Gusev|AUTHOR Aleksei Gusev]]^^1^^, [[Artem Ivanov|AUTHOR Artem Ivanov]]^^1^^, [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]]^^1^^, [[Andrey Shulipa|AUTHOR Andrey Shulipa]]^^2^^, [[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]]^^1^^, [[Vladimir Volokhov|AUTHOR Vladimir Volokhov]]^^1^^, [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^ITMO University, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>This paper presents the Speech Technology Center (STC) speaker recognition (SR) systems submitted to the VOiCES From a Distance challenge 2019. The challenge’s SR task is focused on the problem of speaker recognition in single channel distant/far-field audio under noisy conditions. In this work we investigate different deep neural networks architectures for speaker embedding extraction to solve the task. We show that deep networks with residual frame level connections outperform more shallow architectures. Simple energy based speech activity detector (SAD) and automatic speech recognition (ASR) based SAD are investigated in this work. We also address the problem of data preparation for robust embedding extractors training. The reverberation for the data augmentation was performed using automatic room impulse response generator. In our systems we used discriminatively trained cosine similarity metric learning model as embedding backend. Scores normalization procedure was applied for each individual subsystem we used. Our final submitted systems were based on the fusion of different subsystems. The results obtained on the VOiCES development and evaluation sets demonstrate effectiveness and robustness of the proposed systems when dealing with distant/far-field audio under noisy conditions.

This paper also appears in session Wed-SS-7-3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Pavel Matějka|AUTHOR Pavel Matějka]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Hossein Zeinali|AUTHOR Hossein Zeinali]], [[Ladislav Mošner|AUTHOR Ladislav Mošner]], [[Anna Silnova|AUTHOR Anna Silnova]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Ondřej Novotný|AUTHOR Ondřej Novotný]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]]
</p><p class="cpabstractcardaffiliationlist">Brno University of Technology, Czech Republic</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is a post-evaluation analysis of our efforts in VOiCES 2019 Speaker Recognition challenge. All systems in the fixed condition are based on x-vectors with different features and DNN topologies. The single best system reaches minDCF of 0.38 (5.25% EER) and a fusion of 3 systems yields minDCF of 0.34 (4.87% EER).We also analyze how speaker verification (SV) systems evolved in last few years and show results also on SITW 2016 Challenge. EER on the core-core condition of the SITW 2016 challenge dropped from 5.85% to 1.65% for system fusions submitted for SITW 2016 and VOiCES 2019, respectively. The less restrictive open condition allowed us to use external data for PLDA adaptation and achieve additional small performance improvement. In our submission to open condition, we used three x-vector systems and also one system based on i-vectors.

This paper also appears in session Wed-SS-7-3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Ivan Medennikov|AUTHOR Ivan Medennikov]]^^1^^, [[Yuri Khokhlov|AUTHOR Yuri Khokhlov]]^^1^^, [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]]^^2^^, [[Ivan Sorokin|AUTHOR Ivan Sorokin]]^^1^^, [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]]^^1^^, [[Vladimir Bataev|AUTHOR Vladimir Bataev]]^^1^^, [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]]^^1^^, [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]]^^1^^, [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]]^^1^^, [[Oleg Petrov|AUTHOR Oleg Petrov]]^^3^^, [[Alexander Zatvornitskiy|AUTHOR Alexander Zatvornitskiy]]^^4^^
</p><p class="cpabstractcardaffiliationlist">^^1^^STC-innovations, Russia; ^^2^^ITMO University, Russia; ^^3^^ITMO University, Russia; ^^4^^Speech Technology Center, Russia</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is a description of the Speech Technology Center (STC) automatic speech recognition (ASR) system for the “VOiCES from a Distance Challenge 2019”. We participated in the Fixed condition of the ASR task, which means that the only training data available was an 80-hour subset of the LibriSpeech corpus. The main difficulty of the challenge is a mismatch between clean training data and distant noisy development/ evaluation data. In order to tackle this, we applied room acoustics simulation and weighted prediction error (WPE) dereverberation. We also utilized well-known speaker adaptation using x-vector speaker embeddings, as well as novel room acoustics adaptation with R-vector room impulse response (RIR) embeddings. The system used a lattice-level combination of 6 acoustic models based on different pronunciation dictionaries and input features. N-best hypotheses were rescored with 3 neural network language models (NNLMs) trained on both words and sub-word units. NNLMs were also explored for out-of-vocabulary (OOV) words handling by means of artificial texts generation. The final system achieved Word Error Rate (WER) of 14.7% on the evaluation data, which is the best result in the challenge.

This paper also appears in session Wed-SS-7-3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Tze Yuang Chong|AUTHOR Tze Yuang Chong]], [[Kye Min Tan|AUTHOR Kye Min Tan]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Chang Huai You|AUTHOR Chang Huai You]], [[Hanwu Sun|AUTHOR Hanwu Sun]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]
</p><p class="cpabstractcardaffiliationlist">A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the development of the automatic speech recognition (ASR) system for the submission to the VOiCES from a Distance Challenge 2019. In this challenge, we focused on the fixed condition, where the task is to recognize reverberant and noisy speech based on a limited amount of clean training data. In our system, the mismatch between the training and testing conditions was reduced by using multi-style training where the training data was artificially contaminated with different reverberation and noise sources. Also, the Weighted Prediction Error (WPE) algorithm was used to reduce the reverberant effect in the evaluation data. To boost the system performance, acoustic models of different neural network architectures were trained and the respective systems were fused to give the final output. Moreover, an LSTM language model was used to rescore the lattice to compensate the weak n-gram model trained from only the transcription text. Evaluated on the development set, our system showed an average word error rate (WER) of 27.04%.

This paper also appears in session Wed-SS-7-3.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Arindam Jati|AUTHOR Arindam Jati]], [[Raghuveer Peri|AUTHOR Raghuveer Peri]], [[Monisankha Pal|AUTHOR Monisankha Pal]], [[Tae Jin Park|AUTHOR Tae Jin Park]], [[Naveen Kumar|AUTHOR Naveen Kumar]], [[Ruchir Travadi|AUTHOR Ruchir Travadi]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]
</p><p class="cpabstractcardaffiliationlist">University of Southern California, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2463–2467&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>The paper aims to address the task of speaker verification with single-channel, noisy and far-field speech by learning an embedding or feature representation that is invariant to different acoustic environments. We approach from two different directions. First, we adopt a newly proposed discriminative model that hybridizes Deep Neural Network (DNN) and Total Variability Model (TVM) with the goal of integrating their strengths. DNN helps learning a unique variable length representation of the feature sequence while TVM accumulates them into a fixed dimensional vector. Second, we propose a multitask training scheme with cross entropy and triplet losses in order to obtain good classification performance as well as distinctive speaker embeddings. The multi-task training is applied on both the DNN-TVM model and state-of-the-art x-vector system. The results on the development and evaluation sets of the  VOiCES challenge reveal that the proposed multi-task training helps improving models that are solely based on cross entropy, and it works better with DNN-TVM architecture than x-vector for the current task. Moreover, the multi-task models tend to show complementary relationship with cross entropy models, and thus improved performance is observed after fusion.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[David Snyder|AUTHOR David Snyder]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Nanxin Chen|AUTHOR Nanxin Chen]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Najim Dehak|AUTHOR Najim Dehak]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]
</p><p class="cpabstractcardaffiliationlist">Johns Hopkins University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2468–2472&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes the systems developed by the JHU team for the speaker recognition track of the 2019 VOiCES from a Distance Challenge. On this far-field task, we achieved good performance using systems based on state-of-the-art deep neural network (DNN) embeddings. In this paradigm, a DNN maps variable-length speech segments to speaker embeddings, called x-vectors, that are then classified using probabilistic linear discriminant analysis (PLDA). Our submissions were composed of three x-vector-based systems that differed primarily in the DNN architecture, temporal pooling mechanism, and training objective function. On the evaluation set, our best single-system submission used an extended time-delay architecture, and achieved 0.435 in actual DCF, the primary evaluation metric. A fusion of all three x-vector systems was our primary submission, and it obtained an actual DCF of 0.362.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jonathan Huang|AUTHOR Jonathan Huang]]^^1^^, [[Tobias Bocklet|AUTHOR Tobias Bocklet]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Intel, USA; ^^2^^Intel, Germany</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2473–2477&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper describes Intel’s speaker recognition systems for the VOiCES from a Distance Challenge 2019. Our submission consists of a Resnet50, and four Xvector systems trained with different data augmentation and input features. Our novel contributions include the use of additive margin softmax loss function and the use of invariant representation learning for some of our systems. To our knowledge, this has not been proposed for speaker recognition. We found that such complementary subsystems greatly improved the performance on the development set by late fusion on score level based on linear logistic regression. After fusion our system achieved on the development set EER, minDCF and actDCF of 2.2%, 0.27 and 0.27; and on the evaluation set 6.08%, 0.451 and 0.458, respectively. We discuss our results and give some insight on accuracy with respect to recording distance.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Hanwu Sun|AUTHOR Hanwu Sun]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Ivan Kukanov|AUTHOR Ivan Kukanov]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]
</p><p class="cpabstractcardaffiliationlist">A*STAR, Singapore</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2478–2482&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>This paper is about the I2R’s submission to the VOiCES from a distance speaker recognition challenge 2019. The submissions were based on the fusion of two x-vectors and two i-vectors subsystems. Main efforts have been focused on the frontend de-reverberation processing, PLDA backend design, score normalization and fusion studies in order to improve the system performance on single channel distant/far-field audio, under noisy conditions. We contribute to the fixed condition task under specific training and development data set. The experimental results showed that the de-reverberation approach can achieve 5% to 10% relative improvement on both EER and DCF for all subsystems and more than 10% improvement in the final fusion system on the Dev dataset and more than 15% relative improvement on the final evaluation dataset. Our final fusion system achieved about 2% EER rate and 0.240  minDCF on the Development Dataset.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Yermiyahu Hauptman|AUTHOR Yermiyahu Hauptman]]^^1^^, [[Ruth Aloni-Lavi|AUTHOR Ruth Aloni-Lavi]]^^1^^, [[Itshak Lapidot|AUTHOR Itshak Lapidot]]^^1^^, [[Tanya Gurevich|AUTHOR Tanya Gurevich]]^^2^^, [[Yael Manor|AUTHOR Yael Manor]]^^2^^, [[Stav Naor|AUTHOR Stav Naor]]^^2^^, [[Noa Diamant|AUTHOR Noa Diamant]]^^2^^, [[Irit Opher|AUTHOR Irit Opher]]^^1^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Afeka College, Israel; ^^2^^Ichilov, Israel</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2498–2502&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>In this paper we try to identify spectral and acoustic features that are distinctive of Parkinson’s disease patients’ speech. We investigate the contribution of several features’ families to a simple classification task that distinguishes between two balanced groups — patients with Parkinson’s disease and their age and gender matched group of Healthy Controls, both uttering sustained vowels. We achieve over 75% correct classification using a combination of acoustic and spectral features. We show that combining a few statistical functionals of these features yields very good results. This can be explained by two reasons: the first is that the statistics of Parkinson’s disease patients’ speech defer from those of Healthy people’s speech; the second and more important one is the gradual nature of the Parkinsonian speech that is manifested by the changes within an utterance. We speculate that the feature families that most contribute to the classification task are the most distinctive for detecting the disease and suggest testing this hypothesis by performing long-term analysis of both patient and healthy control subjects. Similar accuracy is obtained when analyzing spontaneous speech where each utterance is represented by a single normalized i-vector.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Carlo Drioli|AUTHOR Carlo Drioli]]^^1^^, [[Philipp Aichinger|AUTHOR Philipp Aichinger]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Università di Udine, Italy; ^^2^^Medizinische Universität Wien, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2503–2507&nbsp;&nbsp;&nbsp;&nbsp;
<a href="./IS2019/MEDIA/2338" class="externallinkbutton" target="_blank">{{$:/causal/Multimedia Button}}</a>
</span></p></div>

<div class="cpabstractcardabstract"><p>We discuss the representation of anterior-posterior (A-P) phase differences in vocal cord oscillations through a numerical biomechanical model involving lumped elements as well as distributed elements, i.e., delay lines. A dynamic glottal source model is illustrated in which the fold displacement along the vertical and the longitudinal dimensions is explicitly modeled by numerical waveguide components representing the propagation on the fold cover tissue. In contrast to other models of the same class, in which the reproduction of longitudinal phase differences are intrinsically impossible (e.g., in two-mass models) or not easy to control explicitly (e.g., in 3D 16-mass and multi-mass models in general), the one proposed here provides direct control over the amount of phase delay between folds oscillations at the posterior and anterior side of the glottis, while keeping the dynamic model simple and computationally efficient. The model is assessed by addressing the reproduction of typical oscillatory patterns observed in high-speed videoendoscopic data, in which A-P phase differences are observed. Experimental results are provided which demonstrate the ability of the approach to effectively reproduce different oscillatory patterns of the vocal folds.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sudarsana Reddy Kadiri|AUTHOR Sudarsana Reddy Kadiri]], [[Paavo Alku|AUTHOR Paavo Alku]]
</p><p class="cpabstractcardaffiliationlist">Aalto University, Finland</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2508–2512&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Voice source characteristics in different phonation types vary due to the tension of laryngeal muscles along with the respiratory effort. This study investigates the use of mel-frequency cepstral coefficients (MFCCs) derived from voice source waveforms for classification of phonation types in speech. The cepstral coefficients are computed using two source waveforms: (1) glottal flow waveforms estimated by the quasi-closed phase (QCP) glottal inverse filtering method and (2) approximate voice source waveforms obtained using the zero frequency filtering (ZFF) method. QCP estimates voice source waveforms based on the source-filter decomposition while ZFF yields source waveforms without explicitly computing the source-filter decomposition. Experiments using MFCCs computed from the two source waveforms show improved accuracy in classification of phonation types compared to the existing voice source features and conventional MFCC features. Further, it is observed that the proposed features have complimentary information to the existing features.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Sunghye Cho|AUTHOR Sunghye Cho]]^^1^^, [[Mark Liberman|AUTHOR Mark Liberman]]^^1^^, [[Neville Ryant|AUTHOR Neville Ryant]]^^1^^, [[Meredith Cola|AUTHOR Meredith Cola]]^^2^^, [[Robert T. Schultz|AUTHOR Robert T. Schultz]]^^2^^, [[Julia Parish-Morris|AUTHOR Julia Parish-Morris]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^University of Pennsylvania, USA; ^^2^^Children’s Hospital of Philadelphia, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2513–2517&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Autism Spectrum Disorder (ASD) is increasingly prevalent [1], but long waitlists hinder children’s access to expedient diagnosis and treatment. To begin addressing this problem, we developed an automated system to detect ASD using acoustic and text features drawn from short, unstructured conversations with naïve conversation partners (confederates). Seventy children (35 with ASD and 35 typically developing (TD)) discussed a range of generic topics (e.g., pets, family, hobbies, and sports) with confederates for approximately 5 minutes. A total of 624 features (352 acoustic + 272 text) were incorporated into a Gradient Boosting Model. To reduce dimensionality and avoid overfitting, we dropped insignificant features and applied feature reduction using Principal Component Analysis. Our final model was accurate substantially above chance levels. Predictive features were both acoustic-phonetic and lexical, from both participants and confederates. The goal of this project is to develop an automatic detection system for ASD that relies on very brief, generic, and natural conversations, which can eventually be used for ASD prescreening and triage in real-world settings such as doctor’s offices and schools.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Jean Schoentgen|AUTHOR Jean Schoentgen]]^^1^^, [[Philipp Aichinger|AUTHOR Philipp Aichinger]]^^2^^
</p><p class="cpabstractcardaffiliationlist">^^1^^Université libre de Bruxelles, Belgium; ^^2^^Medizinische Universität Wien, Austria</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2518–2522&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Perturbations of the strict periodicity of the glottal vibrations are relevant features of the voice quality of normophonic and dysphonic speakers. Vocal perturbations in healthy speakers are assigned different names according to the range of the typical perturbation frequencies. The objective of the presentation is to model jitter and flutter, which are in the > 20Hz and 10Hz – 20Hz range respectively, via a simulation of the fluctuations of the tension of the thyro-arytenoid muscle and compare simulated perturbations to jitter and flutter observed in vowels sustained by normophonic speakers. Perturbations of the strict periodicity of the glottal vibrations are relevant features of the voice quality of normophonic and dysphonic speakers. Vocal perturbations in healthy speakers are assigned different names according to the range of the typical perturbation frequencies. The objective of the presentation is to model jitter and flutter, which are in the > 20Hz and 10Hz – 20Hz range respectively, via a simulation of the fluctuations of the tension of the thyro-arytenoid muscle and compare simulated perturbations to jitter and flutter observed in vowels sustained by normophonic speakers.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Felix Schaeffler|AUTHOR Felix Schaeffler]], [[Stephen Jannetts|AUTHOR Stephen Jannetts]], [[Janet Beck|AUTHOR Janet Beck]]
</p><p class="cpabstractcardaffiliationlist">Queen Margaret University, UK</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2523–2527&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>Smartphones have become powerful tools for data capture due to their computational power, internet connectivity, high quality sensors and user-friendly interfaces. This also makes them attractive for the recording of voice data that can be analysed for clinical or other voice health purposes. This however requires detailed assessment of the reliability of voice parameters extracted from smartphone recordings. In a previous study we analysed reliability of measures of periodicity and periodicity deviation, with very mixed results across parameters. In the present study we extended this analysis to measures of added noise and spectral tilt. We analysed systematic and random error for six frequently used acoustic parameters in clinical acoustic voice quality analysis. 22 speakers recorded sustained [a] and a short passage with a studio microphone and four popular smartphones simultaneously. Acoustic parameters were extracted with Praat and smartphone recordings were compared to the studio microphone. Results indicate a small systematic error for almost all parameters and smartphones. Random errors differed substantially between parameters. Our results suggest that extraction of acoustic voice parameters with mobile phones is not without problems and different parameters show substantial differences in reliability. Careful individual assessment of parameters is therefore recommended before use in practice.</p></div>
\rules except wikilink
<div class="cpabstractcardauthorarea"><p class="cpabstractcardauthornames">[[Meredith Moore|AUTHOR Meredith Moore]], [[Michael Saxon|AUTHOR Michael Saxon]], [[Hemanth Venkateswara|AUTHOR Hemanth Venkateswara]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Sethuraman Panchanathan|AUTHOR Sethuraman Panchanathan]]
</p><p class="cpabstractcardaffiliationlist">Arizona State University, USA</p></div>

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{||$:/causal/View PDF File Button}}&nbsp;&nbsp;&nbsp;&nbsp;page 2528–2532&nbsp;&nbsp;&nbsp;&nbsp;
</span></p></div>

<div class="cpabstractcardabstract"><p>We present a new metadataset which provides insight into where and how two ASR systems make errors on several different speech datasets. By making this data readily available to researchers, we hope to stimulate research in the area of WER estimation models, in order to gain a deeper understanding of how intelligibility is encoded in speech. Using this dataset, we attempt to estimate intelligibility using a state-of-the-art model for speech quality estimation and found that this model did not work to model speech intelligibility. This finding sheds light on the relationship between how speech quality is encoded in acoustic features and how intelligibility is encoded. It shows that we have a lot more to learn in how to effectively model intelligibility. It is our hope that the metadataset we present will stimulate research into creating systems that more effectively model intelligibility.</p></div>
\rules except wikilink

<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Current Session Button}}{{||$:/causal/Preceding Paper Button}}
&nbsp;<span class="cpprevnextanchortext">PAPER</span>
&nbsp;{{||$:/causal/Next Paper Button}}
</p>
<p class="lineheightforbuttons"><span class="cpabscardpdfandmediabutton">
{{$:/causal/NO-PDF Marker}}&nbsp;</span></p></div>

\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Author Index Button}}
</p></div>

|cpborderless|k
|cptablecelltopbottomspace2|k
|cpsessionlisttable|k
|^<div class="cpsessionlistsessioncode">[[Mon-K-1|SESSION Mon-K-1 — ISCA Medal 2019 Keynote Speech]]</div> |^<div class="cpsessionlistsessionname">ISCA Medal 2019 Keynote Speech</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-SS-1-6|SESSION Mon-SS-1-6 — Spoken Language Processing for Children’s Speech]]</div> |^<div class="cpsessionlistsessionname">Spoken Language Processing for Children&#8217;s Speech</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-SS-2-6|SESSION Mon-SS-2-6 — Dynamics of Emotional Speech Exchanges in Multimodal Communication]]</div> |^<div class="cpsessionlistsessionname">Dynamics of Emotional Speech Exchanges in Multimodal Communication</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-1-1|SESSION Mon-O-1-1 — End-to-End Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">End-to-End Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-1-2|SESSION Mon-O-1-2 — Speech Enhancement: Multi-Channel]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement: Multi-Channel</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-1-3|SESSION Mon-O-1-3 — Speech Production: Individual Differences and the Brain]]</div> |^<div class="cpsessionlistsessionname">Speech Production: Individual Differences and the Brain</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-1-4|SESSION Mon-O-1-4 — Speech Signal Characterization 1]]</div> |^<div class="cpsessionlistsessionname">Speech Signal Characterization 1</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-1-5|SESSION Mon-O-1-5 — Neural Waveform Generation]]</div> |^<div class="cpsessionlistsessionname">Neural Waveform Generation</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-2-1|SESSION Mon-O-2-1 — Attention Mechanism for Speaker State Recognition]]</div> |^<div class="cpsessionlistsessionname">Attention Mechanism for Speaker State Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-2-2|SESSION Mon-O-2-2 — ASR Neural Network Training — 1]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Training &#8212; 1</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-2-3|SESSION Mon-O-2-3 — Zero-Resource ASR]]</div> |^<div class="cpsessionlistsessionname">Zero-Resource ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-2-4|SESSION Mon-O-2-4 — Sociophonetics]]</div> |^<div class="cpsessionlistsessionname">Sociophonetics</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-O-2-5|SESSION Mon-O-2-5 — Resources – Annotation – Evaluation]]</div> |^<div class="cpsessionlistsessionname">Resources &#8211; Annotation &#8211; Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-1-A|SESSION Mon-P-1-A — Speaker Recognition and Diarization]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition and Diarization</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-1-B|SESSION Mon-P-1-B — ASR for Noisy and Far-Field Speech]]</div> |^<div class="cpsessionlistsessionname">ASR for Noisy and Far-Field Speech</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-1-C|SESSION Mon-P-1-C — Social Signals Detection and Speaker Traits Analysis]]</div> |^<div class="cpsessionlistsessionname">Social Signals Detection and Speaker Traits Analysis</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-1-D|SESSION Mon-P-1-D — Applications of Language Technologies]]</div> |^<div class="cpsessionlistsessionname">Applications of Language Technologies</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-1-E|SESSION Mon-P-1-E — Speech and Audio Characterization and Segmentation]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Characterization and Segmentation</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-2-A|SESSION Mon-P-2-A — Neural Techniques for Voice Conversion and Waveform Generation]]</div> |^<div class="cpsessionlistsessionname">Neural Techniques for Voice Conversion and Waveform Generation</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-2-B|SESSION Mon-P-2-B — Model Adaptation for ASR]]</div> |^<div class="cpsessionlistsessionname">Model Adaptation for ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-2-C|SESSION Mon-P-2-C — Dialogue Speech Understanding]]</div> |^<div class="cpsessionlistsessionname">Dialogue Speech Understanding</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-2-D|SESSION Mon-P-2-D — Speech Production and Silent Interfaces]]</div> |^<div class="cpsessionlistsessionname">Speech Production and Silent Interfaces</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-P-2-E|SESSION Mon-P-2-E — Speech Signal Characterization 2]]</div> |^<div class="cpsessionlistsessionname">Speech Signal Characterization 2</div> |
|^<div class="cpsessionlistsessioncode">[[Mon-S&T-1|SESSION Mon-S&T-1 — Applications in Language Learning and Healthcare]]</div> |^<div class="cpsessionlistsessionname">Applications in Language Learning and Healthcare</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-K-2|SESSION Tue-K-2 — Keynote 2: Tanja Schultz]]</div> |^<div class="cpsessionlistsessionname">Keynote 2: Tanja Schultz</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-SS-3-6|SESSION Tue-SS-3-6 — The Second DIHARD Speech Diarization Challenge (DIHARD II)]]</div> |^<div class="cpsessionlistsessionname">The Second DIHARD Speech Diarization Challenge (DIHARD II)</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-SS-4-4|SESSION Tue-SS-4-4 — The 2019 Automatic Speaker Verification Spoofing and Countermeasures Challenge: ASVspoof Challenge — O]]</div> |^<div class="cpsessionlistsessionname">The 2019 Automatic Speaker Verification Spoofing and Countermeasures Challenge: ASVspoof Challenge &#8212; O</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-SS-4-A|SESSION Tue-SS-4-A — The 2019 Automatic Speaker Verification Spoofing and Countermeasures Challenge: ASVspoof Challenge — P]]</div> |^<div class="cpsessionlistsessionname">The 2019 Automatic Speaker Verification Spoofing and Countermeasures Challenge: ASVspoof Challenge &#8212; P</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-SS-5-6|SESSION Tue-SS-5-6 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div> |^<div class="cpsessionlistsessionname">The Zero Resource Speech Challenge 2019: TTS Without T</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-3-1|SESSION Tue-O-3-1 — Speech Translation]]</div> |^<div class="cpsessionlistsessionname">Speech Translation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-3-2|SESSION Tue-O-3-2 — Speaker Recognition 1]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition 1</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-3-3|SESSION Tue-O-3-3 — Dialogue Understanding]]</div> |^<div class="cpsessionlistsessionname">Dialogue Understanding</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-3-4|SESSION Tue-O-3-4 — Speech in the Brain]]</div> |^<div class="cpsessionlistsessionname">Speech in the Brain</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-3-5|SESSION Tue-O-3-5 — Far-Field Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Far-Field Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-4-1|SESSION Tue-O-4-1 — Speaker and Language Recognition 1]]</div> |^<div class="cpsessionlistsessionname">Speaker and Language Recognition 1</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-4-2|SESSION Tue-O-4-2 — Speech Synthesis: Towards End-to-End]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Towards End-to-End</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-4-3|SESSION Tue-O-4-3 — Semantic Analysis and Classification]]</div> |^<div class="cpsessionlistsessionname">Semantic Analysis and Classification</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-4-5|SESSION Tue-O-4-5 — Speech and Audio Source Separation and Scene Analysis 1]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Source Separation and Scene Analysis 1</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-5-1|SESSION Tue-O-5-1 — Speech Intelligibility]]</div> |^<div class="cpsessionlistsessionname">Speech Intelligibility</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-5-2|SESSION Tue-O-5-2 — ASR Neural Network Architectures 1]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Architectures 1</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-5-3|SESSION Tue-O-5-3 — Speech and Language Analytics for Mental Health]]</div> |^<div class="cpsessionlistsessionname">Speech and Language Analytics for Mental Health</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-5-4|SESSION Tue-O-5-4 — Dialogue Modelling]]</div> |^<div class="cpsessionlistsessionname">Dialogue Modelling</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-O-5-5|SESSION Tue-O-5-5 — Speaker Recognition Evaluation]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-3-A|SESSION Tue-P-3-A — Speech Synthesis: Data and Evaluation]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Data and Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-3-B|SESSION Tue-P-3-B — Model Training for ASR]]</div> |^<div class="cpsessionlistsessionname">Model Training for ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-3-C|SESSION Tue-P-3-C — Network Architectures for Emotion and Paralinguistics Recognition]]</div> |^<div class="cpsessionlistsessionname">Network Architectures for Emotion and Paralinguistics Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-3-D|SESSION Tue-P-3-D — Acoustic Phonetics]]</div> |^<div class="cpsessionlistsessionname">Acoustic Phonetics</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-3-E|SESSION Tue-P-3-E — Speech Enhancement: Noise Attenuation]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement: Noise Attenuation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-4-B|SESSION Tue-P-4-B — Language Learning and Databases]]</div> |^<div class="cpsessionlistsessionname">Language Learning and Databases</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-4-C|SESSION Tue-P-4-C — Emotion and Personality in Conversation]]</div> |^<div class="cpsessionlistsessionname">Emotion and Personality in Conversation</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-4-D|SESSION Tue-P-4-D — Voice Quality, Speech Perception, and Prosody]]</div> |^<div class="cpsessionlistsessionname">Voice Quality, Speech Perception, and Prosody</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-4-E|SESSION Tue-P-4-E — Speech Signal Characterization 3]]</div> |^<div class="cpsessionlistsessionname">Speech Signal Characterization 3</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-5-A|SESSION Tue-P-5-A — Speech Synthesis: Pronunciation, Multilingual, and Low Resource]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Pronunciation, Multilingual, and Low Resource</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-5-B|SESSION Tue-P-5-B — Cross-Lingual and Multilingual ASR]]</div> |^<div class="cpsessionlistsessionname">Cross-Lingual and Multilingual ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-5-C|SESSION Tue-P-5-C — Spoken Term Detection, Confidence Measure, and End-to-End Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Spoken Term Detection, Confidence Measure, and End-to-End Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-5-D|SESSION Tue-P-5-D — Speech Perception]]</div> |^<div class="cpsessionlistsessionname">Speech Perception</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-P-5-E|SESSION Tue-P-5-E — Topics in Speech and Audio Signal Processing]]</div> |^<div class="cpsessionlistsessionname">Topics in Speech and Audio Signal Processing</div> |
|^<div class="cpsessionlistsessioncode">[[Tue-S&T-2|SESSION Tue-S&T-2 — Speech Processing and Analysis]]</div> |^<div class="cpsessionlistsessionname">Speech Processing and Analysis</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-K-3|SESSION Wed-K-3 — Keynote 3: Manfred Kaltenbacher]]</div> |^<div class="cpsessionlistsessionname">Keynote 3: Manfred Kaltenbacher</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-6-4|SESSION Wed-SS-6-4 — The Interspeech 2019 Computational Paralinguistics Challenge (ComParE)]]</div> |^<div class="cpsessionlistsessionname">The Interspeech 2019 Computational Paralinguistics Challenge (ComParE)</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-7-3|SESSION Wed-SS-7-3 — The VOiCES from a Distance Challenge — O]]</div> |^<div class="cpsessionlistsessionname">The VOiCES from a Distance Challenge &#8212; O</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-7-A|SESSION Wed-SS-7-A — The VOiCES from a Distance Challenge — P]]</div> |^<div class="cpsessionlistsessionname">The VOiCES from a Distance Challenge &#8212; P</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-SS-8-6|SESSION Wed-SS-8-6 — Voice Quality Characterization for Clinical Voice Assessment: Voice Production, Acoustics, and Auditory Perception]]</div> |^<div class="cpsessionlistsessionname">Voice Quality Characterization for Clinical Voice Assessment: Voice Production, Acoustics, and Auditory Perception</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-6-1|SESSION Wed-O-6-1 — Prosody]]</div> |^<div class="cpsessionlistsessionname">Prosody</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-6-2|SESSION Wed-O-6-2 — Speech and Audio Classification 1]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Classification 1</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-6-3|SESSION Wed-O-6-3 — Singing and Multimodal Synthesis]]</div> |^<div class="cpsessionlistsessionname">Singing and Multimodal Synthesis</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-6-5|SESSION Wed-O-6-5 — ASR Neural Network Training — 2]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Training &#8212; 2</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-7-1|SESSION Wed-O-7-1 — Bilingualism, L2, and Non-Nativeness]]</div> |^<div class="cpsessionlistsessionname">Bilingualism, L2, and Non-Nativeness</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-7-2|SESSION Wed-O-7-2 — Spoken Term Detection]]</div> |^<div class="cpsessionlistsessionname">Spoken Term Detection</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-7-4|SESSION Wed-O-7-4 — Speech and Audio Source Separation and Scene Analysis 2]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Source Separation and Scene Analysis 2</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-7-5|SESSION Wed-O-7-5 — Speech Enhancement: Single Channel 2]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement: Single Channel 2</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-8-1|SESSION Wed-O-8-1 — Multimodal ASR]]</div> |^<div class="cpsessionlistsessionname">Multimodal ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-8-2|SESSION Wed-O-8-2 — ASR Neural Network Architectures 2]]</div> |^<div class="cpsessionlistsessionname">ASR Neural Network Architectures 2</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-8-3|SESSION Wed-O-8-3 — Training Strategy for Speech Emotion Recognition]]</div> |^<div class="cpsessionlistsessionname">Training Strategy for Speech Emotion Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-8-4|SESSION Wed-O-8-4 — Voice Conversion for Style, Accent, and Emotion]]</div> |^<div class="cpsessionlistsessionname">Voice Conversion for Style, Accent, and Emotion</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-O-8-5|SESSION Wed-O-8-5 — Speaker Recognition 2]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition 2</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-6-A|SESSION Wed-P-6-A — Speaker Recognition and Anti-Spoofing]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition and Anti-Spoofing</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-6-B|SESSION Wed-P-6-B — Rich Transcription and ASR Systems]]</div> |^<div class="cpsessionlistsessionname">Rich Transcription and ASR Systems</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-6-C|SESSION Wed-P-6-C — Speech and Language Analytics for Medical Applications]]</div> |^<div class="cpsessionlistsessionname">Speech and Language Analytics for Medical Applications</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-6-D|SESSION Wed-P-6-D — Speech Perception in Adverse Listening Conditions]]</div> |^<div class="cpsessionlistsessionname">Speech Perception in Adverse Listening Conditions</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-6-E|SESSION Wed-P-6-E — Speech Enhancement: Single Channel 1]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement: Single Channel 1</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-7-B|SESSION Wed-P-7-B — Speech Recognition and Beyond]]</div> |^<div class="cpsessionlistsessionname">Speech Recognition and Beyond</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-7-C|SESSION Wed-P-7-C — Emotion Modeling and Analysis]]</div> |^<div class="cpsessionlistsessionname">Emotion Modeling and Analysis</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-7-D|SESSION Wed-P-7-D — Articulatory Phonetics]]</div> |^<div class="cpsessionlistsessionname">Articulatory Phonetics</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-7-E|SESSION Wed-P-7-E — Speech and Audio Classification 2]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Classification 2</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-8-A|SESSION Wed-P-8-A — Speech Coding and Evaluation]]</div> |^<div class="cpsessionlistsessionname">Speech Coding and Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-8-B|SESSION Wed-P-8-B — Feature Extraction for ASR]]</div> |^<div class="cpsessionlistsessionname">Feature Extraction for ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-8-C|SESSION Wed-P-8-C — Lexicon and Language Model for Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Lexicon and Language Model for Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-8-D|SESSION Wed-P-8-D — First and Second Language Acquisition]]</div> |^<div class="cpsessionlistsessionname">First and Second Language Acquisition</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-P-8-E|SESSION Wed-P-8-E — Speech and Audio Classification 3]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Classification 3</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-S&T-3|SESSION Wed-S&T-3 — Speech and Speaker Recognition]]</div> |^<div class="cpsessionlistsessionname">Speech and Speaker Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-S&T-4|SESSION Wed-S&T-4 — Speech Annotation and Labelling]]</div> |^<div class="cpsessionlistsessionname">Speech Annotation and Labelling</div> |
|^<div class="cpsessionlistsessioncode">[[Wed-S&T-5|SESSION Wed-S&T-5 — Speech Synthesis]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-K-4|SESSION Thu-K-4 — Keynote 4: Mirella Lapata]]</div> |^<div class="cpsessionlistsessionname">Keynote 4: Mirella Lapata</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-SS-9-6|SESSION Thu-SS-9-6 — Privacy in Speech and Audio Interfaces]]</div> |^<div class="cpsessionlistsessionname">Privacy in Speech and Audio Interfaces</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-SS-10-5|SESSION Thu-SS-10-5 — Speech Technologies for Code-Switching in Multilingual Communities]]</div> |^<div class="cpsessionlistsessionname">Speech Technologies for Code-Switching in Multilingual Communities</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-9-1|SESSION Thu-O-9-1 — Speech Synthesis: Articulatory and Physical Approaches]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Articulatory and Physical Approaches</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-9-2|SESSION Thu-O-9-2 — Sequence-to-Sequence Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Sequence-to-Sequence Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-9-3|SESSION Thu-O-9-3 — Search Methods for Speech Recognition]]</div> |^<div class="cpsessionlistsessionname">Search Methods for Speech Recognition</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-9-4|SESSION Thu-O-9-4 — Audio Signal Characterization]]</div> |^<div class="cpsessionlistsessionname">Audio Signal Characterization</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-9-5|SESSION Thu-O-9-5 — Speech and Voice Disorders 1]]</div> |^<div class="cpsessionlistsessionname">Speech and Voice Disorders 1</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-10-1|SESSION Thu-O-10-1 — Neural Networks for Language Modeling]]</div> |^<div class="cpsessionlistsessionname">Neural Networks for Language Modeling</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-10-2|SESSION Thu-O-10-2 — Representation Learning of Emotion and Paralinguistics]]</div> |^<div class="cpsessionlistsessionname">Representation Learning of Emotion and Paralinguistics</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-10-3|SESSION Thu-O-10-3 — World’s Languages and Varieties]]</div> |^<div class="cpsessionlistsessionname">World&#8217;s Languages and Varieties</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-O-10-4|SESSION Thu-O-10-4 — Adaptation and Accommodation in Conversation]]</div> |^<div class="cpsessionlistsessionname">Adaptation and Accommodation in Conversation</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-9-A|SESSION Thu-P-9-A — Speaker and Language Recognition 2]]</div> |^<div class="cpsessionlistsessionname">Speaker and Language Recognition 2</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-9-B|SESSION Thu-P-9-B — Medical Applications and Visual ASR]]</div> |^<div class="cpsessionlistsessionname">Medical Applications and Visual ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-9-C|SESSION Thu-P-9-C — Turn Management in Dialogue]]</div> |^<div class="cpsessionlistsessionname">Turn Management in Dialogue</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-9-D|SESSION Thu-P-9-D — Corpus Annotation and Evaluation]]</div> |^<div class="cpsessionlistsessionname">Corpus Annotation and Evaluation</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-9-E|SESSION Thu-P-9-E — Speech Enhancement: Multi-Channel and Intelligibility]]</div> |^<div class="cpsessionlistsessionname">Speech Enhancement: Multi-Channel and Intelligibility</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-10-A|SESSION Thu-P-10-A — Speaker Recognition 3]]</div> |^<div class="cpsessionlistsessionname">Speaker Recognition 3</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-10-B|SESSION Thu-P-10-B — NN Architectures for ASR]]</div> |^<div class="cpsessionlistsessionname">NN Architectures for ASR</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-10-C|SESSION Thu-P-10-C — Speech Synthesis: Text Processing, Prosody, and Emotion]]</div> |^<div class="cpsessionlistsessionname">Speech Synthesis: Text Processing, Prosody, and Emotion</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-10-D|SESSION Thu-P-10-D — Speech and Voice Disorders 2]]</div> |^<div class="cpsessionlistsessionname">Speech and Voice Disorders 2</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-P-10-E|SESSION Thu-P-10-E — Speech and Audio Source Separation and Scene Analysis 3]]</div> |^<div class="cpsessionlistsessionname">Speech and Audio Source Separation and Scene Analysis 3</div> |
|^<div class="cpsessionlistsessioncode">[[Thu-S&T-6|SESSION Thu-S&T-6 — Speech-to-Text and Speech Assessment]]</div> |^<div class="cpsessionlistsessionname">Speech-to-Text and Speech Assessment</div> |
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|09:30–10:30, Monday 16 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Mon-K-1|PAPER Mon-K-1 — Statistical Approach to Speech Synthesis: Past, Present and Future]]</div>|<div class="cpsessionviewpapertitle">Statistical Approach to Speech Synthesis: Past, Present and Future</div><div class="cpsessionviewpaperauthor">[[Keiichi Tokuda|AUTHOR Keiichi Tokuda]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Brian Mak, Florian Metze|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Mon-O-1-1-1|PAPER Mon-O-1-1-1 — Survey Talk: Modeling in Automatic Speech Recognition: Beyond Hidden Markov Models]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Modeling in Automatic Speech Recognition: Beyond Hidden Markov Models</div><div class="cpsessionviewpaperauthor">[[Ralf Schlüter|AUTHOR Ralf Schlüter]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192702.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-1-2|PAPER Mon-O-1-1-2 — Very Deep Self-Attention Networks for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Very Deep Self-Attention Networks for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ngoc-Quan Pham|AUTHOR Ngoc-Quan Pham]], [[Thai-Son Nguyen|AUTHOR Thai-Son Nguyen]], [[Jan Niehues|AUTHOR Jan Niehues]], [[Markus Müller|AUTHOR Markus Müller]], [[Alex Waibel|AUTHOR Alex Waibel]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191819.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-1-3|PAPER Mon-O-1-1-3 — Jasper: An End-to-End Convolutional Neural Acoustic Model]]</div>|<div class="cpsessionviewpapertitle">Jasper: An End-to-End Convolutional Neural Acoustic Model</div><div class="cpsessionviewpaperauthor">[[Jason Li|AUTHOR Jason Li]], [[Vitaly Lavrukhin|AUTHOR Vitaly Lavrukhin]], [[Boris Ginsburg|AUTHOR Boris Ginsburg]], [[Ryan Leary|AUTHOR Ryan Leary]], [[Oleksii Kuchaiev|AUTHOR Oleksii Kuchaiev]], [[Jonathan M. Cohen|AUTHOR Jonathan M. Cohen]], [[Huyen Nguyen|AUTHOR Huyen Nguyen]], [[Ravi Teja Gadde|AUTHOR Ravi Teja Gadde]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-1-4|PAPER Mon-O-1-1-4 — Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Unidirectional Neural Network Architectures for End-to-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Niko Moritz|AUTHOR Niko Moritz]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192599.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-1-5|PAPER Mon-O-1-1-5 — Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Analyzing Phonetic and Graphemic Representations in End-to-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Yonatan Belinkov|AUTHOR Yonatan Belinkov]], [[Ahmed Ali|AUTHOR Ahmed Ali]], [[James Glass|AUTHOR James Glass]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Hong-Goo Kang, Ina Kodrasi|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-2-1|PAPER Mon-O-1-2-1 — Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Multi-Channel Speech Enhancement Using Time-Domain Convolutional Denoising Autoencoder</div><div class="cpsessionviewpaperauthor">[[Naohiro Tawara|AUTHOR Naohiro Tawara]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]], [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192751.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-2-2|PAPER Mon-O-1-2-2 — On Nonlinear Spatial Filtering in Multichannel Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">On Nonlinear Spatial Filtering in Multichannel Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Kristina Tesch|AUTHOR Kristina Tesch]], [[Robert Rehr|AUTHOR Robert Rehr]], [[Timo Gerkmann|AUTHOR Timo Gerkmann]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192244.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-2-3|PAPER Mon-O-1-2-3 — Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation]]</div>|<div class="cpsessionviewpapertitle">Multi-Channel Block-Online Source Extraction Based on Utterance Adaptation</div><div class="cpsessionviewpaperauthor">[[Juan M. Martín-Doñas|AUTHOR Juan M. Martín-Doñas]], [[Jens Heitkaemper|AUTHOR Jens Heitkaemper]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]], [[Angel M. Gomez|AUTHOR Angel M. Gomez]], [[Antonio M. Peinado|AUTHOR Antonio M. Peinado]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-2-4|PAPER Mon-O-1-2-4 — Exploiting Multi-Channel Speech Presence Probability in Parametric Multi-Channel Wiener Filter]]</div>|<div class="cpsessionviewpapertitle">Exploiting Multi-Channel Speech Presence Probability in Parametric Multi-Channel Wiener Filter</div><div class="cpsessionviewpaperauthor">[[Saeed Bagheri|AUTHOR Saeed Bagheri]], [[Daniele Giacobello|AUTHOR Daniele Giacobello]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191220.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-2-5|PAPER Mon-O-1-2-5 — Variational Bayesian Multi-Channel Speech Dereverberation Under Noisy Environments with Probabilistic Convolutive Transfer Function]]</div>|<div class="cpsessionviewpapertitle">Variational Bayesian Multi-Channel Speech Dereverberation Under Noisy Environments with Probabilistic Convolutive Transfer Function</div><div class="cpsessionviewpaperauthor">[[Masahito Togami|AUTHOR Masahito Togami]], [[Tatsuya Komatsu|AUTHOR Tatsuya Komatsu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-2-6|PAPER Mon-O-1-2-6 — Simultaneous Denoising and Dereverberation for Low-Latency Applications Using Frame-by-Frame Online Unified Convolutional Beamformer]]</div>|<div class="cpsessionviewpapertitle">Simultaneous Denoising and Dereverberation for Low-Latency Applications Using Frame-by-Frame Online Unified Convolutional Beamformer</div><div class="cpsessionviewpaperauthor">[[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Samuel Silva, Tanja Schultz|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-3-1|PAPER Mon-O-1-3-1 — Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices]]</div>|<div class="cpsessionviewpapertitle">Individual Variation in Cognitive Processing Style Predicts Differences in Phonetic Imitation of Device and Human Voices</div><div class="cpsessionviewpaperauthor">[[Cathryn Snyder|AUTHOR Cathryn Snyder]], [[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-3-2|PAPER Mon-O-1-3-2 — An Investigation on Speaker Specific Articulatory Synthesis with Speaker Independent Articulatory Inversion]]</div>|<div class="cpsessionviewpapertitle">An Investigation on Speaker Specific Articulatory Synthesis with Speaker Independent Articulatory Inversion</div><div class="cpsessionviewpaperauthor">[[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-3-3|PAPER Mon-O-1-3-3 — Individual Difference of Relative Tongue Size and its Acoustic Effects]]</div>|<div class="cpsessionviewpapertitle">Individual Difference of Relative Tongue Size and its Acoustic Effects</div><div class="cpsessionviewpaperauthor">[[Xiaohan Zhang|AUTHOR Xiaohan Zhang]], [[Chongke Bi|AUTHOR Chongke Bi]], [[Kiyoshi Honda|AUTHOR Kiyoshi Honda]], [[Wenhuan Lu|AUTHOR Wenhuan Lu]], [[Jianguo Wei|AUTHOR Jianguo Wei]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191376.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-3-4|PAPER Mon-O-1-3-4 — Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/]]</div>|<div class="cpsessionviewpapertitle">Individual Differences of Airflow and Sound Generation in the Vocal Tract of Sibilant /s/</div><div class="cpsessionviewpaperauthor">[[Tsukasa Yoshinaga|AUTHOR Tsukasa Yoshinaga]], [[Kazunori Nozaki|AUTHOR Kazunori Nozaki]], [[Shigeo Wada|AUTHOR Shigeo Wada]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193269.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-3-5|PAPER Mon-O-1-3-5 — Hush-Hush Speak: Speech Reconstruction Using Silent Videos]]</div>|<div class="cpsessionviewpapertitle">Hush-Hush Speak: Speech Reconstruction Using Silent Videos</div><div class="cpsessionviewpaperauthor">[[Shashwat Uttam|AUTHOR Shashwat Uttam]], [[Yaman Kumar|AUTHOR Yaman Kumar]], [[Dhruva Sahrawat|AUTHOR Dhruva Sahrawat]], [[Mansi Aggarwal|AUTHOR Mansi Aggarwal]], [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]], [[Debanjan Mahata|AUTHOR Debanjan Mahata]], [[Amanda Stent|AUTHOR Amanda Stent]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-3-6|PAPER Mon-O-1-3-6 — SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning]]</div>|<div class="cpsessionviewpapertitle">SPEAK YOUR MIND! Towards Imagined Speech Recognition with Hierarchical Deep Learning</div><div class="cpsessionviewpaperauthor">[[Pramit Saha|AUTHOR Pramit Saha]], [[Muhammad Abdul-Mageed|AUTHOR Muhammad Abdul-Mageed]], [[Sidney Fels|AUTHOR Sidney Fels]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Koichi Shinoda, Shrikanth Narayanan|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191473.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-4-1|PAPER Mon-O-1-4-1 — An Unsupervised Autoregressive Model for Speech Representation Learning]]</div>|<div class="cpsessionviewpapertitle">An Unsupervised Autoregressive Model for Speech Representation Learning</div><div class="cpsessionviewpaperauthor">[[Yu-An Chung|AUTHOR Yu-An Chung]], [[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[Hao Tang|AUTHOR Hao Tang]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191327.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-4-2|PAPER Mon-O-1-4-2 — Harmonic-Aligned Frame Mask Based on Non-Stationary Gabor Transform with Application to Content-Dependent Speaker Comparison]]</div>|<div class="cpsessionviewpapertitle">Harmonic-Aligned Frame Mask Based on Non-Stationary Gabor Transform with Application to Content-Dependent Speaker Comparison</div><div class="cpsessionviewpaperauthor">[[Feng Huang|AUTHOR Feng Huang]], [[Peter Balazs|AUTHOR Peter Balazs]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-4-3|PAPER Mon-O-1-4-3 — Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual]]</div>|<div class="cpsessionviewpapertitle">Glottal Closure Instants Detection from Speech Signal by Deep Features Extracted from Raw Speech and Linear Prediction Residual</div><div class="cpsessionviewpaperauthor">[[Gurunath Reddy M.|AUTHOR Gurunath Reddy M.]], [[K. Sreenivasa Rao|AUTHOR K. Sreenivasa Rao]], [[Partha Pratim Das|AUTHOR Partha Pratim Das]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-4-4|PAPER Mon-O-1-4-4 — Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks]]</div>|<div class="cpsessionviewpapertitle">Learning Problem-Agnostic Speech Representations from Multiple Self-Supervised Tasks</div><div class="cpsessionviewpaperauthor">[[Santiago Pascual|AUTHOR Santiago Pascual]], [[Mirco Ravanelli|AUTHOR Mirco Ravanelli]], [[Joan Serrà|AUTHOR Joan Serrà]], [[Antonio Bonafonte|AUTHOR Antonio Bonafonte]], [[Yoshua Bengio|AUTHOR Yoshua Bengio]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-4-5|PAPER Mon-O-1-4-5 — Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech]]</div>|<div class="cpsessionviewpapertitle">Excitation Source and Vocal Tract System Based Acoustic Features for Detection of Nasals in Continuous Speech</div><div class="cpsessionviewpaperauthor">[[Bhanu Teja Nellore|AUTHOR Bhanu Teja Nellore]], [[Sri Harsha Dumpala|AUTHOR Sri Harsha Dumpala]], [[Karan Nathwani|AUTHOR Karan Nathwani]], [[Suryakanth V. Gangashetty|AUTHOR Suryakanth V. Gangashetty]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192561.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-4-6|PAPER Mon-O-1-4-6 — Data Augmentation Using GANs for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Data Augmentation Using GANs for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Aggelina Chatziagapi|AUTHOR Aggelina Chatziagapi]], [[Georgios Paraskevopoulos|AUTHOR Georgios Paraskevopoulos]], [[Dimitris Sgouropoulos|AUTHOR Dimitris Sgouropoulos]], [[Georgios Pantazopoulos|AUTHOR Georgios Pantazopoulos]], [[Malvina Nikandrou|AUTHOR Malvina Nikandrou]], [[Theodoros Giannakopoulos|AUTHOR Theodoros Giannakopoulos]], [[Athanasios Katsamanis|AUTHOR Athanasios Katsamanis]], [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Hema Murthy, Alan W. Black|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191705.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-5-1|PAPER Mon-O-1-5-1 — High Quality, Lightweight and Adaptable TTS Using LPCNet]]</div>|<div class="cpsessionviewpapertitle">High Quality, Lightweight and Adaptable TTS Using LPCNet</div><div class="cpsessionviewpaperauthor">[[Zvi Kons|AUTHOR Zvi Kons]], [[Slava Shechtman|AUTHOR Slava Shechtman]], [[Alex Sorin|AUTHOR Alex Sorin]], [[Carmel Rabinovitz|AUTHOR Carmel Rabinovitz]], [[Ron Hoory|AUTHOR Ron Hoory]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191424.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-5-2|PAPER Mon-O-1-5-2 — Towards Achieving Robust Universal Neural Vocoding]]</div>|<div class="cpsessionviewpapertitle">Towards Achieving Robust Universal Neural Vocoding</div><div class="cpsessionviewpaperauthor">[[Jaime Lorenzo-Trueba|AUTHOR Jaime Lorenzo-Trueba]], [[Thomas Drugman|AUTHOR Thomas Drugman]], [[Javier Latorre|AUTHOR Javier Latorre]], [[Thomas Merritt|AUTHOR Thomas Merritt]], [[Bartosz Putrycz|AUTHOR Bartosz Putrycz]], [[Roberto Barra-Chicote|AUTHOR Roberto Barra-Chicote]], [[Alexis Moinet|AUTHOR Alexis Moinet]], [[Vatsal Aggarwal|AUTHOR Vatsal Aggarwal]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193099.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-5-3|PAPER Mon-O-1-5-3 — Expediting TTS Synthesis with Adversarial Vocoding]]</div>|<div class="cpsessionviewpapertitle">Expediting TTS Synthesis with Adversarial Vocoding</div><div class="cpsessionviewpaperauthor">[[Paarth Neekhara|AUTHOR Paarth Neekhara]], [[Chris Donahue|AUTHOR Chris Donahue]], [[Miller Puckette|AUTHOR Miller Puckette]], [[Shlomo Dubnov|AUTHOR Shlomo Dubnov]], [[Julian McAuley|AUTHOR Julian McAuley]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-5-4|PAPER Mon-O-1-5-4 — Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding]]</div>|<div class="cpsessionviewpapertitle">Analysis by Adversarial Synthesis — A Novel Approach for Speech Vocoding</div><div class="cpsessionviewpaperauthor">[[Ahmed Mustafa|AUTHOR Ahmed Mustafa]], [[Arijit Biswas|AUTHOR Arijit Biswas]], [[Christian Bergler|AUTHOR Christian Bergler]], [[Julia Schottenhamml|AUTHOR Julia Schottenhamml]], [[Andreas Maier|AUTHOR Andreas Maier]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-5-5|PAPER Mon-O-1-5-5 — Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation]]</div>|<div class="cpsessionviewpapertitle">Quasi-Periodic WaveNet Vocoder: A Pitch Dependent Dilated Convolution Model for Parametric Speech Generation</div><div class="cpsessionviewpaperauthor">[[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191514.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-1-5-6|PAPER Mon-O-1-5-6 — A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data]]</div>|<div class="cpsessionviewpapertitle">A Speaker-Dependent WaveNet for Voice Conversion with Non-Parallel Data</div><div class="cpsessionviewpaperauthor">[[Xiaohai Tian|AUTHOR Xiaohai Tian]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Carlos Busso, Chi-Chun Lee|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Mon-O-2-1-1|PAPER Mon-O-2-1-1 — Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: When Attention Meets Speech Applications: Speech & Speaker Recognition Perspective</div><div class="cpsessionviewpaperauthor">[[Kyu J. Han|AUTHOR Kyu J. Han]], [[Ramon Prieto|AUTHOR Ramon Prieto]], [[Tao Ma|AUTHOR Tao Ma]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191649.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-1-2|PAPER Mon-O-2-1-2 — Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Attention-Enhanced Connectionist Temporal Classification for Discrete Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Ziping Zhao|AUTHOR Ziping Zhao]], [[Zhongtian Bao|AUTHOR Zhongtian Bao]], [[Zixing Zhang|AUTHOR Zixing Zhang]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Haishuai Wang|AUTHOR Haishuai Wang]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192044.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-1-3|PAPER Mon-O-2-1-3 — Attentive to Individual: A Multimodal Emotion Recognition Network with Personalized Attention Profile]]</div>|<div class="cpsessionviewpapertitle">Attentive to Individual: A Multimodal Emotion Recognition Network with Personalized Attention Profile</div><div class="cpsessionviewpaperauthor">[[Jeng-Lin Li|AUTHOR Jeng-Lin Li]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191603.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-1-4|PAPER Mon-O-2-1-4 — A Saliency-Based Attention LSTM Model for Cognitive Load Classification from Speech]]</div>|<div class="cpsessionviewpapertitle">A Saliency-Based Attention LSTM Model for Cognitive Load Classification from Speech</div><div class="cpsessionviewpaperauthor">[[Ascensión Gallardo-Antolín|AUTHOR Ascensión Gallardo-Antolín]], [[Juan Manuel Montero|AUTHOR Juan Manuel Montero]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-1-5|PAPER Mon-O-2-1-5 — A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews]]</div>|<div class="cpsessionviewpapertitle">A Hierarchical Attention Network-Based Approach for Depression Detection from Transcribed Clinical Interviews</div><div class="cpsessionviewpaperauthor">[[Adria Mallol-Ragolta|AUTHOR Adria Mallol-Ragolta]], [[Ziping Zhao|AUTHOR Ziping Zhao]], [[Lukas Stappen|AUTHOR Lukas Stappen]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Mark Gales|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-2-1|PAPER Mon-O-2-2-1 — Untranscribed Web Audio for Low Resource Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Untranscribed Web Audio for Low Resource Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Andrea Carmantini|AUTHOR Andrea Carmantini]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191780.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-2-2|PAPER Mon-O-2-2-2 — RWTH ASR Systems for LibriSpeech: Hybrid vs Attention]]</div>|<div class="cpsessionviewpapertitle">RWTH ASR Systems for LibriSpeech: Hybrid vs Attention</div><div class="cpsessionviewpaperauthor">[[Christoph Lüscher|AUTHOR Christoph Lüscher]], [[Eugen Beck|AUTHOR Eugen Beck]], [[Kazuki Irie|AUTHOR Kazuki Irie]], [[Markus Kitza|AUTHOR Markus Kitza]], [[Wilfried Michel|AUTHOR Wilfried Michel]], [[Albert Zeyer|AUTHOR Albert Zeyer]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-2-3|PAPER Mon-O-2-2-3 — Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Auxiliary Interference Speaker Loss for Target-Speaker Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Ryoichi Takashima|AUTHOR Ryoichi Takashima]], [[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-2-4|PAPER Mon-O-2-2-4 — Speaker Adaptation for Attention-Based End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Speaker Adaptation for Attention-Based End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhong Meng|AUTHOR Zhong Meng]], [[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-2-5|PAPER Mon-O-2-2-5 — Large Margin Training for Attention Based End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Large Margin Training for Attention Based End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Peidong Wang|AUTHOR Peidong Wang]], [[Jia Cui|AUTHOR Jia Cui]], [[Chao Weng|AUTHOR Chao Weng]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192641.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-2-6|PAPER Mon-O-2-2-6 — Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Large-Scale Mixed-Bandwidth Deep Neural Network Acoustic Modeling for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Khoi-Nguyen C. Mac|AUTHOR Khoi-Nguyen C. Mac]], [[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[Wei Zhang|AUTHOR Wei Zhang]], [[Michael Picheny|AUTHOR Michael Picheny]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Jan Černocký, Marco Siniscalchi|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-3-1|PAPER Mon-O-2-3-1 — SparseSpeech: Unsupervised Acoustic Unit Discovery with Memory-Augmented Sequence Autoencoders]]</div>|<div class="cpsessionviewpapertitle">SparseSpeech: Unsupervised Acoustic Unit Discovery with Memory-Augmented Sequence Autoencoders</div><div class="cpsessionviewpaperauthor">[[Benjamin Milde|AUTHOR Benjamin Milde]], [[Chris Biemann|AUTHOR Chris Biemann]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192224.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-3-2|PAPER Mon-O-2-3-2 — Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery]]</div>|<div class="cpsessionviewpapertitle">Bayesian Subspace Hidden Markov Model for Acoustic Unit Discovery</div><div class="cpsessionviewpaperauthor">[[Lucas Ondel|AUTHOR Lucas Ondel]], [[Hari Krishna Vydana|AUTHOR Hari Krishna Vydana]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Jan Černocký|AUTHOR Jan Černocký]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-3-3|PAPER Mon-O-2-3-3 — Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages]]</div>|<div class="cpsessionviewpapertitle">Speaker Adversarial Training of DPGMM-Based Feature Extractor for Zero-Resource Languages</div><div class="cpsessionviewpaperauthor">[[Yosuke Higuchi|AUTHOR Yosuke Higuchi]], [[Naohiro Tawara|AUTHOR Naohiro Tawara]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]], [[Tetsuji Ogawa|AUTHOR Tetsuji Ogawa]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191775.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-3-4|PAPER Mon-O-2-3-4 — Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data]]</div>|<div class="cpsessionviewpapertitle">Building Large-Vocabulary ASR Systems for Languages Without Any Audio Training Data</div><div class="cpsessionviewpaperauthor">[[Manasa Prasad|AUTHOR Manasa Prasad]], [[Daan van Esch|AUTHOR Daan van Esch]], [[Sandy Ritchie|AUTHOR Sandy Ritchie]], [[Jonas Fromseier Mortensen|AUTHOR Jonas Fromseier Mortensen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191718.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-3-5|PAPER Mon-O-2-3-5 — Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio]]</div>|<div class="cpsessionviewpapertitle">Towards Bilingual Lexicon Discovery From Visually Grounded Speech Audio</div><div class="cpsessionviewpaperauthor">[[Emmanuel Azuh|AUTHOR Emmanuel Azuh]], [[David Harwath|AUTHOR David Harwath]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191338.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-3-6|PAPER Mon-O-2-3-6 — Improving Unsupervised Subword Modeling via Disentangled Speech Representation Learning and Transformation]]</div>|<div class="cpsessionviewpapertitle">Improving Unsupervised Subword Modeling via Disentangled Speech Representation Learning and Transformation</div><div class="cpsessionviewpaperauthor">[[Siyuan Feng|AUTHOR Siyuan Feng]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Barbara Schuppler, Marcin Włodarczak|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191865.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-4-1|PAPER Mon-O-2-4-1 — Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts]]</div>|<div class="cpsessionviewpapertitle">Listeners’ Ability to Identify the Gender of Preadolescent Children in Different Linguistic Contexts</div><div class="cpsessionviewpaperauthor">[[Shawn Nissen|AUTHOR Shawn Nissen]], [[Sharalee Blunck|AUTHOR Sharalee Blunck]], [[Anita Dromey|AUTHOR Anita Dromey]], [[Christopher Dromey|AUTHOR Christopher Dromey]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-4-2|PAPER Mon-O-2-4-2 — Sibilant Variation in New Englishes: A Comparative Sociophonetic Study of Trinidadian and American English /s(tr)/-Retraction]]</div>|<div class="cpsessionviewpapertitle">Sibilant Variation in New Englishes: A Comparative Sociophonetic Study of Trinidadian and American English /s(tr)/-Retraction</div><div class="cpsessionviewpaperauthor">[[Wiebke Ahlers|AUTHOR Wiebke Ahlers]], [[Philipp Meer|AUTHOR Philipp Meer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192115.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-4-3|PAPER Mon-O-2-4-3 — Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis]]</div>|<div class="cpsessionviewpapertitle">Tracking the New Zealand English NEAR/SQUARE Merger Using Functional Principal Components Analysis</div><div class="cpsessionviewpaperauthor">[[Michele Gubian|AUTHOR Michele Gubian]], [[Jonathan Harrington|AUTHOR Jonathan Harrington]], [[Mary Stevens|AUTHOR Mary Stevens]], [[Florian Schiel|AUTHOR Florian Schiel]], [[Paul Warren|AUTHOR Paul Warren]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-4-4|PAPER Mon-O-2-4-4 — Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments]]</div>|<div class="cpsessionviewpapertitle">Phonetic Accommodation in a Wizard-of-Oz Experiment: Intonation and Segments</div><div class="cpsessionviewpaperauthor">[[Iona Gessinger|AUTHOR Iona Gessinger]], [[Bernd Möbius|AUTHOR Bernd Möbius]], [[Bistra Andreeva|AUTHOR Bistra Andreeva]], [[Eran Raveh|AUTHOR Eran Raveh]], [[Ingmar Steiner|AUTHOR Ingmar Steiner]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-4-5|PAPER Mon-O-2-4-5 — PASCAL and DPA: A Pilot Study on Using Prosodic Competence Scores to Predict Communicative Skills for Team Working and Public Speaking]]</div>|<div class="cpsessionviewpapertitle">PASCAL and DPA: A Pilot Study on Using Prosodic Competence Scores to Predict Communicative Skills for Team Working and Public Speaking</div><div class="cpsessionviewpaperauthor">[[Oliver Niebuhr|AUTHOR Oliver Niebuhr]], [[Jan Michalsky|AUTHOR Jan Michalsky]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-4-6|PAPER Mon-O-2-4-6 — Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks]]</div>|<div class="cpsessionviewpapertitle">Towards the Prosody of Persuasion in Competitive Negotiation. The Relationship Between f0 and Negotiation Success in Same Sex Sales Tasks</div><div class="cpsessionviewpaperauthor">[[Jan Michalsky|AUTHOR Jan Michalsky]], [[Heike Schoormann|AUTHOR Heike Schoormann]], [[Thomas Schultze|AUTHOR Thomas Schultze]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Martine Adda-Decker, Michael Picheny|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-5-1|PAPER Mon-O-2-5-1 — VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English]]</div>|<div class="cpsessionviewpapertitle">VESUS: A Crowd-Annotated Database to Study Emotion Production and Perception in Spoken English</div><div class="cpsessionviewpaperauthor">[[Jacob Sager|AUTHOR Jacob Sager]], [[Ravi Shankar|AUTHOR Ravi Shankar]], [[Jacob Reinhold|AUTHOR Jacob Reinhold]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191525.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-5-2|PAPER Mon-O-2-5-2 — Building the Singapore English National Speech Corpus]]</div>|<div class="cpsessionviewpapertitle">Building the Singapore English National Speech Corpus</div><div class="cpsessionviewpaperauthor">[[Jia Xin Koh|AUTHOR Jia Xin Koh]], [[Aqilah Mislan|AUTHOR Aqilah Mislan]], [[Kevin Khoo|AUTHOR Kevin Khoo]], [[Brian Ang|AUTHOR Brian Ang]], [[Wilson Ang|AUTHOR Wilson Ang]], [[Charmaine Ng|AUTHOR Charmaine Ng]], [[Ying-Ying Tan|AUTHOR Ying-Ying Tan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191907.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-5-3|PAPER Mon-O-2-5-3 — Challenging the Boundaries of Speech Recognition: The MALACH Corpus]]</div>|<div class="cpsessionviewpapertitle">Challenging the Boundaries of Speech Recognition: The MALACH Corpus</div><div class="cpsessionviewpaperauthor">[[Michael Picheny|AUTHOR Michael Picheny]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[George Saon|AUTHOR George Saon]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192061.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-5-4|PAPER Mon-O-2-5-4 — NITK Kids’ Speech Corpus]]</div>|<div class="cpsessionviewpapertitle">NITK Kids’ Speech Corpus</div><div class="cpsessionviewpaperauthor">[[Pravin Bhaskar Ramteke|AUTHOR Pravin Bhaskar Ramteke]], [[Sujata Supanekar|AUTHOR Sujata Supanekar]], [[Pradyoth Hegde|AUTHOR Pradyoth Hegde]], [[Hanna Nelson|AUTHOR Hanna Nelson]], [[Venkataraja Aithal|AUTHOR Venkataraja Aithal]], [[Shashidhar G. Koolagudi|AUTHOR Shashidhar G. Koolagudi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-5-5|PAPER Mon-O-2-5-5 — Towards Variability Resistant Dialectal Speech Evaluation]]</div>|<div class="cpsessionviewpapertitle">Towards Variability Resistant Dialectal Speech Evaluation</div><div class="cpsessionviewpaperauthor">[[Ahmed Ali|AUTHOR Ahmed Ali]], [[Salam Khalifa|AUTHOR Salam Khalifa]], [[Nizar Habash|AUTHOR Nizar Habash]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-O-2-5-6|PAPER Mon-O-2-5-6 — How to Annotate 100 Hours in 45 Minutes]]</div>|<div class="cpsessionviewpapertitle">How to Annotate 100 Hours in 45 Minutes</div><div class="cpsessionviewpaperauthor">[[Per Fallgren|AUTHOR Per Fallgren]], [[Zofia Malisz|AUTHOR Zofia Malisz]], [[Jens Edlund|AUTHOR Jens Edlund]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Suwon Shon|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192813.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-1|PAPER Mon-P-1-A-1 — Bayesian HMM Based x-Vector Clustering for Speaker Diarization]]</div>|<div class="cpsessionviewpapertitle">Bayesian HMM Based x-Vector Clustering for Speaker Diarization</div><div class="cpsessionviewpaperauthor">[[Mireia Diez|AUTHOR Mireia Diez]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Jan Černocký|AUTHOR Jan Černocký]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-2|PAPER Mon-P-1-A-2 — Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration]]</div>|<div class="cpsessionviewpapertitle">Unleashing the Unused Potential of i-Vectors Enabled by GPU Acceleration</div><div class="cpsessionviewpaperauthor">[[Ville Vestman|AUTHOR Ville Vestman]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Tomi H. Kinnunen|AUTHOR Tomi H. Kinnunen]], [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-3|PAPER Mon-P-1-A-3 — MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation]]</div>|<div class="cpsessionviewpapertitle">MCE 2018: The 1st Multi-Target Speaker Detection and Identification Challenge Evaluation</div><div class="cpsessionviewpaperauthor">[[Suwon Shon|AUTHOR Suwon Shon]], [[Najim Dehak|AUTHOR Najim Dehak]], [[Douglas Reynolds|AUTHOR Douglas Reynolds]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-4|PAPER Mon-P-1-A-4 — Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System]]</div>|<div class="cpsessionviewpapertitle">Improving Aggregation and Loss Function for Better Embedding Learning in End-to-End Speaker Verification System</div><div class="cpsessionviewpaperauthor">[[Zhifu Gao|AUTHOR Zhifu Gao]], [[Yan Song|AUTHOR Yan Song]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Pengcheng Li|AUTHOR Pengcheng Li]], [[Yiheng Jiang|AUTHOR Yiheng Jiang]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191388.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-5|PAPER Mon-P-1-A-5 — LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization]]</div>|<div class="cpsessionviewpapertitle">LSTM Based Similarity Measurement with Spectral Clustering for Speaker Diarization</div><div class="cpsessionviewpaperauthor">[[Qingjian Lin|AUTHOR Qingjian Lin]], [[Ruiqing Yin|AUTHOR Ruiqing Yin]], [[Ming Li|AUTHOR Ming Li]], [[Hervé Bredin|AUTHOR Hervé Bredin]], [[Claude Barras|AUTHOR Claude Barras]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193116.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-6|PAPER Mon-P-1-A-6 — Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings]]</div>|<div class="cpsessionviewpapertitle">Who Said That?: Audio-Visual Speaker Diarisation of Real-World Meetings</div><div class="cpsessionviewpaperauthor">[[Joon Son Chung|AUTHOR Joon Son Chung]], [[Bong-Jin Lee|AUTHOR Bong-Jin Lee]], [[Icksang Han|AUTHOR Icksang Han]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192961.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-7|PAPER Mon-P-1-A-7 — Multi-PLDA Diarization on Children’s Speech]]</div>|<div class="cpsessionviewpapertitle">Multi-PLDA Diarization on Children’s Speech</div><div class="cpsessionviewpaperauthor">[[Jiamin Xie|AUTHOR Jiamin Xie]], [[Leibny Paola García-Perera|AUTHOR Leibny Paola García-Perera]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192912.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-8|PAPER Mon-P-1-A-8 — Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings]]</div>|<div class="cpsessionviewpapertitle">Speaker Diarization Using Leave-One-Out Gaussian PLDA Clustering of DNN Embeddings</div><div class="cpsessionviewpaperauthor">[[Alan McCree|AUTHOR Alan McCree]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192756.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-9|PAPER Mon-P-1-A-9 — Speaker-Corrupted Embeddings for Online Speaker Diarization]]</div>|<div class="cpsessionviewpapertitle">Speaker-Corrupted Embeddings for Online Speaker Diarization</div><div class="cpsessionviewpaperauthor">[[Omid Ghahabi|AUTHOR Omid Ghahabi]], [[Volker Fischer|AUTHOR Volker Fischer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191947.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-10|PAPER Mon-P-1-A-10 — Speaker Diarization with Lexical Information]]</div>|<div class="cpsessionviewpapertitle">Speaker Diarization with Lexical Information</div><div class="cpsessionviewpaperauthor">[[Tae Jin Park|AUTHOR Tae Jin Park]], [[Kyu J. Han|AUTHOR Kyu J. Han]], [[Jing Huang|AUTHOR Jing Huang]], [[Xiaodong He|AUTHOR Xiaodong He]], [[Bowen Zhou|AUTHOR Bowen Zhou]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191943.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-11|PAPER Mon-P-1-A-11 — Joint Speech Recognition and Speaker Diarization via Sequence Transduction]]</div>|<div class="cpsessionviewpapertitle">Joint Speech Recognition and Speaker Diarization via Sequence Transduction</div><div class="cpsessionviewpaperauthor">[[Laurent El Shafey|AUTHOR Laurent El Shafey]], [[Hagen Soltau|AUTHOR Hagen Soltau]], [[Izhak Shafran|AUTHOR Izhak Shafran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-12|PAPER Mon-P-1-A-12 — Normal Variance-Mean Mixtures for Unsupervised Score Calibration]]</div>|<div class="cpsessionviewpapertitle">Normal Variance-Mean Mixtures for Unsupervised Score Calibration</div><div class="cpsessionviewpaperauthor">[[Sandro Cumani|AUTHOR Sandro Cumani]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191508.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-13|PAPER Mon-P-1-A-13 — Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding]]</div>|<div class="cpsessionviewpapertitle">Speaker Augmentation and Bandwidth Extension for Deep Speaker Embedding</div><div class="cpsessionviewpaperauthor">[[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Koji Okabe|AUTHOR Koji Okabe]], [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191399.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-14|PAPER Mon-P-1-A-14 — Large-Scale Speaker Diarization of Radio Broadcast Archives]]</div>|<div class="cpsessionviewpapertitle">Large-Scale Speaker Diarization of Radio Broadcast Archives</div><div class="cpsessionviewpaperauthor">[[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Adem Derinel|AUTHOR Adem Derinel]], [[Kun Zhou|AUTHOR Kun Zhou]], [[Henk van den Heuvel|AUTHOR Henk van den Heuvel]], [[Niko Brummer|AUTHOR Niko Brummer]], [[Haizhou Li|AUTHOR Haizhou Li]], [[David A. van Leeuwen|AUTHOR David A. van Leeuwen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-A-15|PAPER Mon-P-1-A-15 — Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams]]</div>|<div class="cpsessionviewpapertitle">Toeplitz Inverse Covariance Based Robust Speaker Clustering for Naturalistic Audio Streams</div><div class="cpsessionviewpaperauthor">[[Harishchandra Dubey|AUTHOR Harishchandra Dubey]], [[Abhijeet Sangwan|AUTHOR Abhijeet Sangwan]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Ozlem Kalinli|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-1|PAPER Mon-P-1-B-1 — Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Examining the Combination of Multi-Band Processing and Channel Dropout for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[György Kovács|AUTHOR György Kovács]], [[László Tóth|AUTHOR László Tóth]], [[Dirk Van Compernolle|AUTHOR Dirk Van Compernolle]], [[Marcus Liwicki|AUTHOR Marcus Liwicki]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-2|PAPER Mon-P-1-B-2 — Label Driven Time-Frequency Masking for Robust Continuous Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Label Driven Time-Frequency Masking for Robust Continuous Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Meet Soni|AUTHOR Meet Soni]], [[Ashish Panda|AUTHOR Ashish Panda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-3|PAPER Mon-P-1-B-3 — Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning]]</div>|<div class="cpsessionviewpapertitle">Speaker-Invariant Feature-Mapping for Distant Speech Recognition via Adversarial Teacher-Student Learning</div><div class="cpsessionviewpaperauthor">[[Long Wu|AUTHOR Long Wu]], [[Hangting Chen|AUTHOR Hangting Chen]], [[Li Wang|AUTHOR Li Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192127.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-4|PAPER Mon-P-1-B-4 — Full-Sentence Correlation: A Method to Handle Unpredictable Noise for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Full-Sentence Correlation: A Method to Handle Unpredictable Noise for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ji Ming|AUTHOR Ji Ming]], [[Danny Crookes|AUTHOR Danny Crookes]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192090.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-5|PAPER Mon-P-1-B-5 — Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions]]</div>|<div class="cpsessionviewpapertitle">Generative Noise Modeling and Channel Simulation for Robust Speech Recognition in Unseen Conditions</div><div class="cpsessionviewpaperauthor">[[Meet Soni|AUTHOR Meet Soni]], [[Sonal Joshi|AUTHOR Sonal Joshi]], [[Ashish Panda|AUTHOR Ashish Panda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-6|PAPER Mon-P-1-B-6 — Far-Field Speech Enhancement Using Heteroscedastic Autoencoder for Improved Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Far-Field Speech Enhancement Using Heteroscedastic Autoencoder for Improved Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shashi Kumar|AUTHOR Shashi Kumar]], [[Shakti P. Rath|AUTHOR Shakti P. Rath]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-7|PAPER Mon-P-1-B-7 — End-to-End SpeakerBeam for Single Channel Target Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">End-to-End SpeakerBeam for Single Channel Target Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Marc Delcroix|AUTHOR Marc Delcroix]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Shigeki Karita|AUTHOR Shigeki Karita]], [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-8|PAPER Mon-P-1-B-8 — NIESR: Nuisance Invariant End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">NIESR: Nuisance Invariant End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[I-Hung Hsu|AUTHOR I-Hung Hsu]], [[Ayush Jaiswal|AUTHOR Ayush Jaiswal]], [[Premkumar Natarajan|AUTHOR Premkumar Natarajan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191597.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-9|PAPER Mon-P-1-B-9 — Knowledge Distillation for Throat Microphone Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Knowledge Distillation for Throat Microphone Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Takahito Suzuki|AUTHOR Takahito Suzuki]], [[Jun Ogata|AUTHOR Jun Ogata]], [[Takashi Tsunakawa|AUTHOR Takashi Tsunakawa]], [[Masafumi Nishida|AUTHOR Masafumi Nishida]], [[Masafumi Nishimura|AUTHOR Masafumi Nishimura]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191569.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-10|PAPER Mon-P-1-B-10 — Improved Speaker-Dependent Separation for CHiME-5 Challenge]]</div>|<div class="cpsessionviewpapertitle">Improved Speaker-Dependent Separation for CHiME-5 Challenge</div><div class="cpsessionviewpaperauthor">[[Jian Wu|AUTHOR Jian Wu]], [[Yong Xu|AUTHOR Yong Xu]], [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]], [[Lianwu Chen|AUTHOR Lianwu Chen]], [[Meng Yu|AUTHOR Meng Yu]], [[Lei Xie|AUTHOR Lei Xie]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191495.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-11|PAPER Mon-P-1-B-11 — Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling]]</div>|<div class="cpsessionviewpapertitle">Bridging the Gap Between Monaural Speech Enhancement and Recognition with Distortion-Independent Acoustic Modeling</div><div class="cpsessionviewpaperauthor">[[Peidong Wang|AUTHOR Peidong Wang]], [[Ke Tan|AUTHOR Ke Tan]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191493.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-12|PAPER Mon-P-1-B-12 — Enhanced Spectral Features for Distortion-Independent Acoustic Modeling]]</div>|<div class="cpsessionviewpapertitle">Enhanced Spectral Features for Distortion-Independent Acoustic Modeling</div><div class="cpsessionviewpaperauthor">[[Peidong Wang|AUTHOR Peidong Wang]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-13|PAPER Mon-P-1-B-13 — Universal Adversarial Perturbations for Speech Recognition Systems]]</div>|<div class="cpsessionviewpapertitle">Universal Adversarial Perturbations for Speech Recognition Systems</div><div class="cpsessionviewpaperauthor">[[Paarth Neekhara|AUTHOR Paarth Neekhara]], [[Shehzeen Hussain|AUTHOR Shehzeen Hussain]], [[Prakhar Pandey|AUTHOR Prakhar Pandey]], [[Shlomo Dubnov|AUTHOR Shlomo Dubnov]], [[Julian McAuley|AUTHOR Julian McAuley]], [[Farinaz Koushanfar|AUTHOR Farinaz Koushanfar]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-14|PAPER Mon-P-1-B-14 — One-Pass Single-Channel Noisy Speech Recognition Using a Combination of Noisy and Enhanced Features]]</div>|<div class="cpsessionviewpapertitle">One-Pass Single-Channel Noisy Speech Recognition Using a Combination of Noisy and Enhanced Features</div><div class="cpsessionviewpaperauthor">[[Masakiyo Fujimoto|AUTHOR Masakiyo Fujimoto]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-B-15|PAPER Mon-P-1-B-15 — Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Jointly Adversarial Enhancement Training for Robust End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Bin Liu|AUTHOR Bin Liu]], [[Shuai Nie|AUTHOR Shuai Nie]], [[Shan Liang|AUTHOR Shan Liang]], [[Wenju Liu|AUTHOR Wenju Liu]], [[Meng Yu|AUTHOR Meng Yu]], [[Lianwu Chen|AUTHOR Lianwu Chen]], [[Shouye Peng|AUTHOR Shouye Peng]], [[Changliang Li|AUTHOR Changliang Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Elizabeth Shriberg|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193113.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-1|PAPER Mon-P-1-C-1 — Predicting Humor by Learning from Time-Aligned Comments]]</div>|<div class="cpsessionviewpapertitle">Predicting Humor by Learning from Time-Aligned Comments</div><div class="cpsessionviewpaperauthor">[[Zixiaofan Yang|AUTHOR Zixiaofan Yang]], [[Bingyan Hu|AUTHOR Bingyan Hu]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-2|PAPER Mon-P-1-C-2 — Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information]]</div>|<div class="cpsessionviewpapertitle">Predicting the Leading Political Ideology of YouTube Channels Using Acoustic, Textual, and Metadata Information</div><div class="cpsessionviewpaperauthor">[[Yoan Dinkov|AUTHOR Yoan Dinkov]], [[Ahmed Ali|AUTHOR Ahmed Ali]], [[Ivan Koychev|AUTHOR Ivan Koychev]], [[Preslav Nakov|AUTHOR Preslav Nakov]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-3|PAPER Mon-P-1-C-3 — Mitigating Gender and L1 Differences to Improve State and Trait Recognition]]</div>|<div class="cpsessionviewpapertitle">Mitigating Gender and L1 Differences to Improve State and Trait Recognition</div><div class="cpsessionviewpaperauthor">[[Guozhen An|AUTHOR Guozhen An]], [[Rivka Levitan|AUTHOR Rivka Levitan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-4|PAPER Mon-P-1-C-4 — Deep Learning Based Mandarin Accent Identification for Accent Robust ASR]]</div>|<div class="cpsessionviewpapertitle">Deep Learning Based Mandarin Accent Identification for Accent Robust ASR</div><div class="cpsessionviewpaperauthor">[[Felix Weninger|AUTHOR Felix Weninger]], [[Yang Sun|AUTHOR Yang Sun]], [[Junho Park|AUTHOR Junho Park]], [[Daniel Willett|AUTHOR Daniel Willett]], [[Puming Zhan|AUTHOR Puming Zhan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192552.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-5|PAPER Mon-P-1-C-5 — Calibrating DNN Posterior Probability Estimates of HMM/DNN Models to Improve Social Signal Detection from Audio Data]]</div>|<div class="cpsessionviewpapertitle">Calibrating DNN Posterior Probability Estimates of HMM/DNN Models to Improve Social Signal Detection from Audio Data</div><div class="cpsessionviewpaperauthor">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]], [[László Tóth|AUTHOR László Tóth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-6|PAPER Mon-P-1-C-6 — Conversational and Social Laughter Synthesis with WaveNet]]</div>|<div class="cpsessionviewpapertitle">Conversational and Social Laughter Synthesis with WaveNet</div><div class="cpsessionviewpaperauthor">[[Hiroki Mori|AUTHOR Hiroki Mori]], [[Tomohiro Nagata|AUTHOR Tomohiro Nagata]], [[Yoshiko Arimoto|AUTHOR Yoshiko Arimoto]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191733.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-7|PAPER Mon-P-1-C-7 — Laughter Dynamics in Dyadic Conversations]]</div>|<div class="cpsessionviewpapertitle">Laughter Dynamics in Dyadic Conversations</div><div class="cpsessionviewpaperauthor">[[Bogdan Ludusan|AUTHOR Bogdan Ludusan]], [[Petra Wagner|AUTHOR Petra Wagner]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191557.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-8|PAPER Mon-P-1-C-8 — Towards an Annotation Scheme for Complex Laughter in Speech Corpora]]</div>|<div class="cpsessionviewpapertitle">Towards an Annotation Scheme for Complex Laughter in Speech Corpora</div><div class="cpsessionviewpaperauthor">[[Khiet P. Truong|AUTHOR Khiet P. Truong]], [[Jürgen Trouvain|AUTHOR Jürgen Trouvain]], [[Michel-Pierre Jansen|AUTHOR Michel-Pierre Jansen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-9|PAPER Mon-P-1-C-9 — Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test]]</div>|<div class="cpsessionviewpapertitle">Using Speech to Predict Sequentially Measured Cortisol Levels During a Trier Social Stress Test</div><div class="cpsessionviewpaperauthor">[[Alice Baird|AUTHOR Alice Baird]], [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Sarah Sturmbauer|AUTHOR Sarah Sturmbauer]], [[Johanna Janson|AUTHOR Johanna Janson]], [[Eva-Maria Messner|AUTHOR Eva-Maria Messner]], [[Harald Baumeister|AUTHOR Harald Baumeister]], [[Nicolas Rohleder|AUTHOR Nicolas Rohleder]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191349.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-10|PAPER Mon-P-1-C-10 — Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results]]</div>|<div class="cpsessionviewpapertitle">Sincerity in Acted Speech: Presenting the Sincere Apology Corpus and Results</div><div class="cpsessionviewpaperauthor">[[Alice Baird|AUTHOR Alice Baird]], [[Eduardo Coutinho|AUTHOR Eduardo Coutinho]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-11|PAPER Mon-P-1-C-11 — Do not Hesitate! — Unless You Do it Shortly or Nasally: How the Phonetics of Filled Pauses Determine Their Subjective Frequency and Perceived Speaker Performance]]</div>|<div class="cpsessionviewpapertitle">Do not Hesitate! — Unless You Do it Shortly or Nasally: How the Phonetics of Filled Pauses Determine Their Subjective Frequency and Perceived Speaker Performance</div><div class="cpsessionviewpaperauthor">[[Oliver Niebuhr|AUTHOR Oliver Niebuhr]], [[Kerstin Fischer|AUTHOR Kerstin Fischer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191405.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-C-12|PAPER Mon-P-1-C-12 — Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech]]</div>|<div class="cpsessionviewpapertitle">Phonet: A Tool Based on Gated Recurrent Neural Networks to Extract Phonological Posteriors from Speech</div><div class="cpsessionviewpaperauthor">[[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]], [[Philipp Klumpp|AUTHOR Philipp Klumpp]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Ngoc Thang Vu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193214.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-1|PAPER Mon-P-1-D-1 — Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">Code-Switching Sentence Generation by Generative Adversarial Networks and its Application to Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Ching-Ting Chang|AUTHOR Ching-Ting Chang]], [[Shun-Po Chuang|AUTHOR Shun-Po Chuang]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193072.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-2|PAPER Mon-P-1-D-2 — Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics]]</div>|<div class="cpsessionviewpapertitle">Comparative Analysis of Think-Aloud Methods for Everyday Activities in the Context of Cognitive Robotics</div><div class="cpsessionviewpaperauthor">[[Moritz Meier|AUTHOR Moritz Meier]], [[Celeste Mason|AUTHOR Celeste Mason]], [[Felix Putze|AUTHOR Felix Putze]], [[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192714.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-3|PAPER Mon-P-1-D-3 — RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts]]</div>|<div class="cpsessionviewpapertitle">RadioTalk: A Large-Scale Corpus of Talk Radio Transcripts</div><div class="cpsessionviewpaperauthor">[[Doug Beeferman|AUTHOR Doug Beeferman]], [[William Brannon|AUTHOR William Brannon]], [[Deb Roy|AUTHOR Deb Roy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192661.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-4|PAPER Mon-P-1-D-4 — Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus]]</div>|<div class="cpsessionviewpapertitle">Qualitative Evaluation of ASR Adaptation in a Lecture Context: Application to the PASTEL Corpus</div><div class="cpsessionviewpaperauthor">[[Salima Mdhaffar|AUTHOR Salima Mdhaffar]], [[Yannick Estève|AUTHOR Yannick Estève]], [[Nicolas Hernandez|AUTHOR Nicolas Hernandez]], [[Antoine Laurent|AUTHOR Antoine Laurent]], [[Richard Dufour|AUTHOR Richard Dufour]], [[Solen Quiniou|AUTHOR Solen Quiniou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-5|PAPER Mon-P-1-D-5 — Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning]]</div>|<div class="cpsessionviewpapertitle">Active Annotation: Bootstrapping Annotation Lexicon and Guidelines for Supervised NLU Learning</div><div class="cpsessionviewpaperauthor">[[Federico Marinelli|AUTHOR Federico Marinelli]], [[Alessandra Cervone|AUTHOR Alessandra Cervone]], [[Giuliano Tortoreto|AUTHOR Giuliano Tortoreto]], [[Evgeny A. Stepanov|AUTHOR Evgeny A. Stepanov]], [[Giuseppe Di Fabbrizio|AUTHOR Giuseppe Di Fabbrizio]], [[Giuseppe Riccardi|AUTHOR Giuseppe Riccardi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192378.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-6|PAPER Mon-P-1-D-6 — Automatic Lyric Transcription from Karaoke Vocal Tracks: Resources and a Baseline System]]</div>|<div class="cpsessionviewpapertitle">Automatic Lyric Transcription from Karaoke Vocal Tracks: Resources and a Baseline System</div><div class="cpsessionviewpaperauthor">[[Gerardo Roa Dabike|AUTHOR Gerardo Roa Dabike]], [[Jon Barker|AUTHOR Jon Barker]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-7|PAPER Mon-P-1-D-7 — Detecting Mismatch Between Speech and Transcription Using Cross-Modal Attention]]</div>|<div class="cpsessionviewpapertitle">Detecting Mismatch Between Speech and Transcription Using Cross-Modal Attention</div><div class="cpsessionviewpaperauthor">[[Qiang Huang|AUTHOR Qiang Huang]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191839.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-8|PAPER Mon-P-1-D-8 — EpaDB: A Database for Development of Pronunciation Assessment Systems]]</div>|<div class="cpsessionviewpapertitle">EpaDB: A Database for Development of Pronunciation Assessment Systems</div><div class="cpsessionviewpaperauthor">[[Jazmín Vidal|AUTHOR Jazmín Vidal]], [[Luciana Ferrer|AUTHOR Luciana Ferrer]], [[Leonardo Brambilla|AUTHOR Leonardo Brambilla]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191750.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-9|PAPER Mon-P-1-D-9 — Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience]]</div>|<div class="cpsessionviewpapertitle">Automatic Compression of Subtitles with Neural Networks and its Effect on User Experience</div><div class="cpsessionviewpaperauthor">[[Katrin Angerbauer|AUTHOR Katrin Angerbauer]], [[Heike Adel|AUTHOR Heike Adel]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191736.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-D-10|PAPER Mon-P-1-D-10 — Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering]]</div>|<div class="cpsessionviewpapertitle">Integrating Video Retrieval and Moment Detection in a Unified Corpus for Video Question Answering</div><div class="cpsessionviewpaperauthor">[[Hongyin Luo|AUTHOR Hongyin Luo]], [[Mitra Mohtarami|AUTHOR Mitra Mohtarami]], [[James Glass|AUTHOR James Glass]], [[Karthik Krishnamurthy|AUTHOR Karthik Krishnamurthy]], [[Brigitte Richardson|AUTHOR Brigitte Richardson]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Korbinian Riedhammer|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192967.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-1|PAPER Mon-P-1-E-1 — Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification]]</div>|<div class="cpsessionviewpapertitle">Early Identification of Speech Changes Due to Amyotrophic Lateral Sclerosis Using Machine Classification</div><div class="cpsessionviewpaperauthor">[[Sarah E. Gutz|AUTHOR Sarah E. Gutz]], [[Jun Wang|AUTHOR Jun Wang]], [[Yana Yunusova|AUTHOR Yana Yunusova]], [[Jordan R. Green|AUTHOR Jordan R. Green]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-2|PAPER Mon-P-1-E-2 — Automatic Detection of Breath Using Voice Activity Detection and SVM Classifier with Application on News Reports]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of Breath Using Voice Activity Detection and SVM Classifier with Application on News Reports</div><div class="cpsessionviewpaperauthor">[[Mohamed Ismail Yasar Arafath K.|AUTHOR Mohamed Ismail Yasar Arafath K.]], [[Aurobinda Routray|AUTHOR Aurobinda Routray]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-3|PAPER Mon-P-1-E-3 — Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels]]</div>|<div class="cpsessionviewpapertitle">Acoustic Scene Classification Using Teacher-Student Learning with Soft-Labels</div><div class="cpsessionviewpaperauthor">[[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-4|PAPER Mon-P-1-E-4 — Rare Sound Event Detection Using Deep Learning and Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">Rare Sound Event Detection Using Deep Learning and Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Yanping Chen|AUTHOR Yanping Chen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191942.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-5|PAPER Mon-P-1-E-5 — A Combination of Model-Based and Feature-Based Strategy for Speech-to-Singing Alignment]]</div>|<div class="cpsessionviewpapertitle">A Combination of Model-Based and Feature-Based Strategy for Speech-to-Singing Alignment</div><div class="cpsessionviewpaperauthor">[[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191735.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-6|PAPER Mon-P-1-E-6 — Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild]]</div>|<div class="cpsessionviewpapertitle">Dr.VOT: Measuring Positive and Negative Voice Onset Time in the Wild</div><div class="cpsessionviewpaperauthor">[[Yosi Shrem|AUTHOR Yosi Shrem]], [[Matthew Goldrick|AUTHOR Matthew Goldrick]], [[Joseph Keshet|AUTHOR Joseph Keshet]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191715.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-7|PAPER Mon-P-1-E-7 — Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models]]</div>|<div class="cpsessionviewpapertitle">Effects of Base-Frequency and Spectral Envelope on Deep-Learning Speech Separation and Recognition Models</div><div class="cpsessionviewpaperauthor">[[J. Hui|AUTHOR J. Hui]], [[Y. Wei|AUTHOR Y. Wei]], [[S.T. Chen|AUTHOR S.T. Chen]], [[R.H.Y. So|AUTHOR R.H.Y. So]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191504.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-8|PAPER Mon-P-1-E-8 — Phone Aware Nearest Neighbor Technique Using Spectral Transition Measure for Non-Parallel Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Phone Aware Nearest Neighbor Technique Using Spectral Transition Measure for Non-Parallel Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Nirmesh J. Shah|AUTHOR Nirmesh J. Shah]], [[Hemant A. Patil|AUTHOR Hemant A. Patil]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191450.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-9|PAPER Mon-P-1-E-9 — Weakly Supervised Syllable Segmentation by Vowel-Consonant Peak Classification]]</div>|<div class="cpsessionviewpapertitle">Weakly Supervised Syllable Segmentation by Vowel-Consonant Peak Classification</div><div class="cpsessionviewpaperauthor">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191407.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-10|PAPER Mon-P-1-E-10 — An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs]]</div>|<div class="cpsessionviewpapertitle">An Approach to Online Speaker Change Point Detection Using DNNs and WFSTs</div><div class="cpsessionviewpaperauthor">[[Lukas Mateju|AUTHOR Lukas Mateju]], [[Petr Cerva|AUTHOR Petr Cerva]], [[Jindrich Zdansky|AUTHOR Jindrich Zdansky]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-1-E-11|PAPER Mon-P-1-E-11 — Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Regression and Classification for Direction-of-Arrival Estimation with Convolutional Recurrent Neural Networks</div><div class="cpsessionviewpaperauthor">[[Zhenyu Tang|AUTHOR Zhenyu Tang]], [[John D. Kanu|AUTHOR John D. Kanu]], [[Kevin Hogan|AUTHOR Kevin Hogan]], [[Dinesh Manocha|AUTHOR Dinesh Manocha]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Gustav Eje Henter|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192869.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-1|PAPER Mon-P-2-A-1 — Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Non-Parallel Voice Conversion Using Weighted Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Dipjyoti Paul|AUTHOR Dipjyoti Paul]], [[Yannis Pantazis|AUTHOR Yannis Pantazis]], [[Yannis Stylianou|AUTHOR Yannis Stylianou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192663.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-2|PAPER Mon-P-2-A-2 — One-Shot Voice Conversion by Separating Speaker and Content Representations with Instance Normalization]]</div>|<div class="cpsessionviewpapertitle">One-Shot Voice Conversion by Separating Speaker and Content Representations with Instance Normalization</div><div class="cpsessionviewpaperauthor">[[Ju-chieh Chou|AUTHOR Ju-chieh Chou]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-3|PAPER Mon-P-2-A-3 — One-Shot Voice Conversion with Global Speaker Embeddings]]</div>|<div class="cpsessionviewpapertitle">One-Shot Voice Conversion with Global Speaker Embeddings</div><div class="cpsessionviewpaperauthor">[[Hui Lu|AUTHOR Hui Lu]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Dongyang Dai|AUTHOR Dongyang Dai]], [[Runnan Li|AUTHOR Runnan Li]], [[Shiyin Kang|AUTHOR Shiyin Kang]], [[Jia Jia|AUTHOR Jia Jia]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192307.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-4|PAPER Mon-P-2-A-4 — Non-Parallel Voice Conversion with Cyclic Variational Autoencoder]]</div>|<div class="cpsessionviewpapertitle">Non-Parallel Voice Conversion with Cyclic Variational Autoencoder</div><div class="cpsessionviewpaperauthor">[[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192236.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-5|PAPER Mon-P-2-A-5 — StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">StarGAN-VC2: Rethinking Conditional Methods for StarGAN-Based Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Takuhiro Kaneko|AUTHOR Takuhiro Kaneko]], [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]], [[Kou Tanaka|AUTHOR Kou Tanaka]], [[Nobukatsu Hojo|AUTHOR Nobukatsu Hojo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-6|PAPER Mon-P-2-A-6 — Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds]]</div>|<div class="cpsessionviewpapertitle">Robustness of Statistical Voice Conversion Based on Direct Waveform Modification Against Background Sounds</div><div class="cpsessionviewpaperauthor">[[Yusuke Kurita|AUTHOR Yusuke Kurita]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Kazuya Takeda|AUTHOR Kazuya Takeda]], [[Tomoki Toda|AUTHOR Tomoki Toda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-7|PAPER Mon-P-2-A-7 — Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Fast Learning for Non-Parallel Many-to-Many Voice Conversion with Residual Star Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Shengkui Zhao|AUTHOR Shengkui Zhao]], [[Trung Hieu Nguyen|AUTHOR Trung Hieu Nguyen]], [[Hao Wang|AUTHOR Hao Wang]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-8|PAPER Mon-P-2-A-8 — GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram]]</div>|<div class="cpsessionviewpapertitle">GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram</div><div class="cpsessionviewpaperauthor">[[Lauri Juvela|AUTHOR Lauri Juvela]], [[Bajibabu Bollepalli|AUTHOR Bajibabu Bollepalli]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Paavo Alku|AUTHOR Paavo Alku]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191965.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-9|PAPER Mon-P-2-A-9 — Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation]]</div>|<div class="cpsessionviewpapertitle">Probability Density Distillation with Generative Adversarial Networks for High-Quality Parallel Waveform Generation</div><div class="cpsessionviewpaperauthor">[[Ryuichi Yamamoto|AUTHOR Ryuichi Yamamoto]], [[Eunwoo Song|AUTHOR Eunwoo Song]], [[Jae-Min Kim|AUTHOR Jae-Min Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-10|PAPER Mon-P-2-A-10 — One-Shot Voice Conversion with Disentangled Representations by Leveraging Phonetic Posteriorgrams]]</div>|<div class="cpsessionviewpapertitle">One-Shot Voice Conversion with Disentangled Representations by Leveraging Phonetic Posteriorgrams</div><div class="cpsessionviewpaperauthor">[[Seyed Hamidreza Mohammadi|AUTHOR Seyed Hamidreza Mohammadi]], [[Taehwan Kim|AUTHOR Taehwan Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191774.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-11|PAPER Mon-P-2-A-11 — Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Investigation of F0 Conditioning and Fully Convolutional Networks in Variational Autoencoder Based Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Wen-Chin Huang|AUTHOR Wen-Chin Huang]], [[Yi-Chiao Wu|AUTHOR Yi-Chiao Wu]], [[Chen-Chou Lo|AUTHOR Chen-Chou Lo]], [[Patrick Lumban Tobing|AUTHOR Patrick Lumban Tobing]], [[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Kazuhiro Kobayashi|AUTHOR Kazuhiro Kobayashi]], [[Tomoki Toda|AUTHOR Tomoki Toda]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-12|PAPER Mon-P-2-A-12 — Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams]]</div>|<div class="cpsessionviewpapertitle">Jointly Trained Conversion Model and WaveNet Vocoder for Non-Parallel Voice Conversion Using Mel-Spectrograms and Phonetic Posteriorgrams</div><div class="cpsessionviewpaperauthor">[[Songxiang Liu|AUTHOR Songxiang Liu]], [[Yuewen Cao|AUTHOR Yuewen Cao]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Lifa Sun|AUTHOR Lifa Sun]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191265.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-13|PAPER Mon-P-2-A-13 — Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech]]</div>|<div class="cpsessionviewpapertitle">Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech</div><div class="cpsessionviewpaperauthor">[[Li-Wei Chen|AUTHOR Li-Wei Chen]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]], [[Yu Tsao|AUTHOR Yu Tsao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191198.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-14|PAPER Mon-P-2-A-14 — Group Latent Embedding for Vector Quantized Variational Autoencoder in Non-Parallel Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Group Latent Embedding for Vector Quantized Variational Autoencoder in Non-Parallel Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Shaojin Ding|AUTHOR Shaojin Ding]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-A-15|PAPER Mon-P-2-A-15 — Semi-Supervised Voice Conversion with Amortized Variational Inference]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Voice Conversion with Amortized Variational Inference</div><div class="cpsessionviewpaperauthor">[[Cory Stephenson|AUTHOR Cory Stephenson]], [[Gokce Keskin|AUTHOR Gokce Keskin]], [[Anil Thomas|AUTHOR Anil Thomas]], [[Oguz H. Elibol|AUTHOR Oguz H. Elibol]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Penny Karanasou|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193246.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-1|PAPER Mon-P-2-B-1 — Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploiting Semi-Supervised Training Through a Dropout Regularization in End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Subhadeep Dey|AUTHOR Subhadeep Dey]], [[Petr Motlicek|AUTHOR Petr Motlicek]], [[Trung Bui|AUTHOR Trung Bui]], [[Franck Dernoncourt|AUTHOR Franck Dernoncourt]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-2|PAPER Mon-P-2-B-2 — Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System]]</div>|<div class="cpsessionviewpapertitle">Improved Vocal Tract Length Perturbation for a State-of-the-Art End-to-End Speech Recognition System</div><div class="cpsessionviewpaperauthor">[[Chanwoo Kim|AUTHOR Chanwoo Kim]], [[Minkyu Shin|AUTHOR Minkyu Shin]], [[Abhinav Garg|AUTHOR Abhinav Garg]], [[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193155.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-3|PAPER Mon-P-2-B-3 — Multi-Accent Adaptation Based on Gate Mechanism]]</div>|<div class="cpsessionviewpapertitle">Multi-Accent Adaptation Based on Gate Mechanism</div><div class="cpsessionviewpaperauthor">[[Han Zhu|AUTHOR Han Zhu]], [[Li Wang|AUTHOR Li Wang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192544.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-4|PAPER Mon-P-2-B-4 — Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Adaptation with Adversarial Dropout Regularization for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Pengcheng Guo|AUTHOR Pengcheng Guo]], [[Sining Sun|AUTHOR Sining Sun]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192162.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-5|PAPER Mon-P-2-B-5 — Cumulative Adaptation for BLSTM Acoustic Models]]</div>|<div class="cpsessionviewpapertitle">Cumulative Adaptation for BLSTM Acoustic Models</div><div class="cpsessionviewpaperauthor">[[Markus Kitza|AUTHOR Markus Kitza]], [[Pavel Golik|AUTHOR Pavel Golik]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192050.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-6|PAPER Mon-P-2-B-6 — Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features]]</div>|<div class="cpsessionviewpapertitle">Fast DNN Acoustic Model Speaker Adaptation by Learning Hidden Unit Contribution Features</div><div class="cpsessionviewpaperauthor">[[Xurong Xie|AUTHOR Xurong Xie]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Tan Lee|AUTHOR Tan Lee]], [[Lan Wang|AUTHOR Lan Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-7|PAPER Mon-P-2-B-7 — End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System]]</div>|<div class="cpsessionviewpapertitle">End-to-End Adaptation with Backpropagation Through WFST for On-Device Speech Recognition System</div><div class="cpsessionviewpaperauthor">[[Emiru Tsunoo|AUTHOR Emiru Tsunoo]], [[Yosuke Kashiwagi|AUTHOR Yosuke Kashiwagi]], [[Satoshi Asakawa|AUTHOR Satoshi Asakawa]], [[Toshiyuki Kumakura|AUTHOR Toshiyuki Kumakura]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191788.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-8|PAPER Mon-P-2-B-8 — Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Learning Speaker Aware Offsets for Speaker Adaptation of Neural Networks</div><div class="cpsessionviewpaperauthor">[[Leda Sarı|AUTHOR Leda Sarı]], [[Samuel Thomas|AUTHOR Samuel Thomas]], [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191752.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-9|PAPER Mon-P-2-B-9 — An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models]]</div>|<div class="cpsessionviewpapertitle">An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models</div><div class="cpsessionviewpaperauthor">[[Khe Chai Sim|AUTHOR Khe Chai Sim]], [[Petr Zadrazil|AUTHOR Petr Zadrazil]], [[Françoise Beaufays|AUTHOR Françoise Beaufays]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-10|PAPER Mon-P-2-B-10 — A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">A Multi-Accent Acoustic Model Using Mixture of Experts for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Abhinav Jain|AUTHOR Abhinav Jain]], [[Vishwanath P. Singh|AUTHOR Vishwanath P. Singh]], [[Shakti P. Rath|AUTHOR Shakti P. Rath]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191427.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-B-11|PAPER Mon-P-2-B-11 — Personalizing ASR for Dysarthric and Accented Speech with Limited Data]]</div>|<div class="cpsessionviewpapertitle">Personalizing ASR for Dysarthric and Accented Speech with Limited Data</div><div class="cpsessionviewpaperauthor">[[Joel Shor|AUTHOR Joel Shor]], [[Dotan Emanuel|AUTHOR Dotan Emanuel]], [[Oran Lang|AUTHOR Oran Lang]], [[Omry Tuval|AUTHOR Omry Tuval]], [[Michael Brenner|AUTHOR Michael Brenner]], [[Julie Cattiau|AUTHOR Julie Cattiau]], [[Fernando Vieira|AUTHOR Fernando Vieira]], [[Maeve McNally|AUTHOR Maeve McNally]], [[Taylor Charbonneau|AUTHOR Taylor Charbonneau]], [[Melissa Nollstadt|AUTHOR Melissa Nollstadt]], [[Avinatan Hassidim|AUTHOR Avinatan Hassidim]], [[Yossi Matias|AUTHOR Yossi Matias]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Ian Lane|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-1|PAPER Mon-P-2-C-1 — Mitigating Noisy Inputs for Question Answering]]</div>|<div class="cpsessionviewpapertitle">Mitigating Noisy Inputs for Question Answering</div><div class="cpsessionviewpaperauthor">[[Denis Peskov|AUTHOR Denis Peskov]], [[Joe Barrow|AUTHOR Joe Barrow]], [[Pedro Rodriguez|AUTHOR Pedro Rodriguez]], [[Graham Neubig|AUTHOR Graham Neubig]], [[Jordan Boyd-Graber|AUTHOR Jordan Boyd-Graber]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-2|PAPER Mon-P-2-C-2 — One-vs-All Models for Asynchronous Training: An Empirical Analysis]]</div>|<div class="cpsessionviewpapertitle">One-vs-All Models for Asynchronous Training: An Empirical Analysis</div><div class="cpsessionviewpaperauthor">[[Rahul Gupta|AUTHOR Rahul Gupta]], [[Aman Alok|AUTHOR Aman Alok]], [[Shankar Ananthakrishnan|AUTHOR Shankar Ananthakrishnan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192732.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-3|PAPER Mon-P-2-C-3 — Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning]]</div>|<div class="cpsessionviewpapertitle">Adapting a FrameNet Semantic Parser for Spoken Language Understanding Using Adversarial Learning</div><div class="cpsessionviewpaperauthor">[[Gabriel Marzinotto|AUTHOR Gabriel Marzinotto]], [[Géraldine Damnati|AUTHOR Géraldine Damnati]], [[Frédéric Béchet|AUTHOR Frédéric Béchet]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-4|PAPER Mon-P-2-C-4 — M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding]]</div>|<div class="cpsessionviewpapertitle">M2H-GAN: A GAN-Based Mapping from Machine to Human Transcripts for Speech Understanding</div><div class="cpsessionviewpaperauthor">[[Titouan Parcollet|AUTHOR Titouan Parcollet]], [[Mohamed Morchid|AUTHOR Mohamed Morchid]], [[Xavier Bost|AUTHOR Xavier Bost]], [[Georges Linarès|AUTHOR Georges Linarès]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-5|PAPER Mon-P-2-C-5 — Ultra-Compact NLU: Neuronal Network Binarization as Regularization]]</div>|<div class="cpsessionviewpapertitle">Ultra-Compact NLU: Neuronal Network Binarization as Regularization</div><div class="cpsessionviewpaperauthor">[[Munir Georges|AUTHOR Munir Georges]], [[Krzysztof Czarnowski|AUTHOR Krzysztof Czarnowski]], [[Tobias Bocklet|AUTHOR Tobias Bocklet]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192396.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-6|PAPER Mon-P-2-C-6 — Speech Model Pre-Training for End-to-End Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Speech Model Pre-Training for End-to-End Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Loren Lugosch|AUTHOR Loren Lugosch]], [[Mirco Ravanelli|AUTHOR Mirco Ravanelli]], [[Patrick Ignoto|AUTHOR Patrick Ignoto]], [[Vikrant Singh Tomar|AUTHOR Vikrant Singh Tomar]], [[Yoshua Bengio|AUTHOR Yoshua Bengio]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-7|PAPER Mon-P-2-C-7 — Spoken Language Intent Detection Using Confusion2Vec]]</div>|<div class="cpsessionviewpapertitle">Spoken Language Intent Detection Using Confusion2Vec</div><div class="cpsessionviewpaperauthor">[[Prashanth Gurunath Shivakumar|AUTHOR Prashanth Gurunath Shivakumar]], [[Mu Yang|AUTHOR Mu Yang]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-8|PAPER Mon-P-2-C-8 — Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech]]</div>|<div class="cpsessionviewpapertitle">Investigating Adaptation and Transfer Learning for End-to-End Spoken Language Understanding from Speech</div><div class="cpsessionviewpaperauthor">[[Natalia Tomashenko|AUTHOR Natalia Tomashenko]], [[Antoine Caubrière|AUTHOR Antoine Caubrière]], [[Yannick Estève|AUTHOR Yannick Estève]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191694.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-9|PAPER Mon-P-2-C-9 — Topic-Aware Dialogue Speech Recognition with Transfer Learning]]</div>|<div class="cpsessionviewpapertitle">Topic-Aware Dialogue Speech Recognition with Transfer Learning</div><div class="cpsessionviewpaperauthor">[[Yuanfeng Song|AUTHOR Yuanfeng Song]], [[Di Jiang|AUTHOR Di Jiang]], [[Xueyang Wu|AUTHOR Xueyang Wu]], [[Qian Xu|AUTHOR Qian Xu]], [[Raymond Chi-Wing Wong|AUTHOR Raymond Chi-Wing Wong]], [[Qiang Yang|AUTHOR Qiang Yang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191534.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-10|PAPER Mon-P-2-C-10 — Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models]]</div>|<div class="cpsessionviewpapertitle">Improving Conversation-Context Language Models with Multiple Spoken Language Understanding Models</div><div class="cpsessionviewpaperauthor">[[Ryo Masumura|AUTHOR Ryo Masumura]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Atsushi Ando|AUTHOR Atsushi Ando]], [[Hosana Kamiyama|AUTHOR Hosana Kamiyama]], [[Takanobu Oba|AUTHOR Takanobu Oba]], [[Satoshi Kobashikawa|AUTHOR Satoshi Kobashikawa]], [[Yushi Aono|AUTHOR Yushi Aono]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191383.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-11|PAPER Mon-P-2-C-11 — Meta Learning for Hyperparameter Optimization in Dialogue System]]</div>|<div class="cpsessionviewpapertitle">Meta Learning for Hyperparameter Optimization in Dialogue System</div><div class="cpsessionviewpaperauthor">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Wei Xiang Lieow|AUTHOR Wei Xiang Lieow]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191274.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-12|PAPER Mon-P-2-C-12 — Zero Shot Intent Classification Using Long-Short Term Memory Networks]]</div>|<div class="cpsessionviewpapertitle">Zero Shot Intent Classification Using Long-Short Term Memory Networks</div><div class="cpsessionviewpaperauthor">[[Kyle Williams|AUTHOR Kyle Williams]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191262.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-13|PAPER Mon-P-2-C-13 — A Comparison of Deep Learning Methods for Language Understanding]]</div>|<div class="cpsessionviewpapertitle">A Comparison of Deep Learning Methods for Language Understanding</div><div class="cpsessionviewpaperauthor">[[Mandy Korpusik|AUTHOR Mandy Korpusik]], [[Zoe Liu|AUTHOR Zoe Liu]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191226.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-C-14|PAPER Mon-P-2-C-14 — Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values]]</div>|<div class="cpsessionviewpapertitle">Slot Filling with Weighted Multi-Encoders for Out-of-Domain Values</div><div class="cpsessionviewpaperauthor">[[Yuka Kobayashi|AUTHOR Yuka Kobayashi]], [[Takami Yoshida|AUTHOR Takami Yoshida]], [[Kenji Iwata|AUTHOR Kenji Iwata]], [[Hiroshi Fujimura|AUTHOR Hiroshi Fujimura]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Jana Brunner|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-1|PAPER Mon-P-2-D-1 — Multi-Corpus Acoustic-to-Articulatory Speech Inversion]]</div>|<div class="cpsessionviewpapertitle">Multi-Corpus Acoustic-to-Articulatory Speech Inversion</div><div class="cpsessionviewpaperauthor">[[Nadee Seneviratne|AUTHOR Nadee Seneviratne]], [[Ganesh Sivaraman|AUTHOR Ganesh Sivaraman]], [[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193109.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-2|PAPER Mon-P-2-D-2 — Towards a Speaker Independent Speech-BCI Using Speaker Adaptation]]</div>|<div class="cpsessionviewpapertitle">Towards a Speaker Independent Speech-BCI Using Speaker Adaptation</div><div class="cpsessionviewpaperauthor">[[Debadatta Dash|AUTHOR Debadatta Dash]], [[Alan Wisler|AUTHOR Alan Wisler]], [[Paul Ferrari|AUTHOR Paul Ferrari]], [[Jun Wang|AUTHOR Jun Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-3|PAPER Mon-P-2-D-3 — Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text]]</div>|<div class="cpsessionviewpapertitle">Identifying Input Features for Development of Real-Time Translation of Neural Signals to Text</div><div class="cpsessionviewpaperauthor">[[Janaki Sheth|AUTHOR Janaki Sheth]], [[Ariel Tankus|AUTHOR Ariel Tankus]], [[Michelle Tran|AUTHOR Michelle Tran]], [[Lindy Comstock|AUTHOR Lindy Comstock]], [[Itzhak Fried|AUTHOR Itzhak Fried]], [[William Speier|AUTHOR William Speier]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-4|PAPER Mon-P-2-D-4 — Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract]]</div>|<div class="cpsessionviewpapertitle">Exploring Critical Articulator Identification from 50Hz RT-MRI Data of the Vocal Tract</div><div class="cpsessionviewpaperauthor">[[Samuel Silva|AUTHOR Samuel Silva]], [[António Teixeira|AUTHOR António Teixeira]], [[Conceição Cunha|AUTHOR Conceição Cunha]], [[Nuno Almeida|AUTHOR Nuno Almeida]], [[Arun A. Joseph|AUTHOR Arun A. Joseph]], [[Jens Frahm|AUTHOR Jens Frahm]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192880.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-5|PAPER Mon-P-2-D-5 — Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data]]</div>|<div class="cpsessionviewpapertitle">Towards a Method of Dynamic Vocal Tract Shapes Generation by Combining Static 3D and Dynamic 2D MRI Speech Data</div><div class="cpsessionviewpaperauthor">[[Ioannis K. Douros|AUTHOR Ioannis K. Douros]], [[Anastasiia Tsukanova|AUTHOR Anastasiia Tsukanova]], [[Karyna Isaieva|AUTHOR Karyna Isaieva]], [[Pierre-André Vuissoz|AUTHOR Pierre-André Vuissoz]], [[Yves Laprie|AUTHOR Yves Laprie]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192876.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-6|PAPER Mon-P-2-D-6 — Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation]]</div>|<div class="cpsessionviewpapertitle">Temporal Coordination of Articulatory and Respiratory Events Prior to Speech Initiation</div><div class="cpsessionviewpaperauthor">[[Oksana Rasskazova|AUTHOR Oksana Rasskazova]], [[Christine Mooshammer|AUTHOR Christine Mooshammer]], [[Susanne Fuchs|AUTHOR Susanne Fuchs]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-7|PAPER Mon-P-2-D-7 — Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA]]</div>|<div class="cpsessionviewpapertitle">Zooming in on Spatiotemporal V-to-C Coarticulation with Functional PCA</div><div class="cpsessionviewpaperauthor">[[Michele Gubian|AUTHOR Michele Gubian]], [[Manfred Pastätter|AUTHOR Manfred Pastätter]], [[Marianne Pouplier|AUTHOR Marianne Pouplier]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192046.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-8|PAPER Mon-P-2-D-8 — Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder]]</div>|<div class="cpsessionviewpapertitle">Ultrasound-Based Silent Speech Interface Built on a Continuous Vocoder</div><div class="cpsessionviewpaperauthor">[[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]], [[Mohammed Salah Al-Radhi|AUTHOR Mohammed Salah Al-Radhi]], [[Géza Németh|AUTHOR Géza Németh]], [[Gábor Gosztolya|AUTHOR Gábor Gosztolya]], [[Tamás Grósz|AUTHOR Tamás Grósz]], [[László Tóth|AUTHOR László Tóth]], [[Alexandra Markó|AUTHOR Alexandra Markó]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191812.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-9|PAPER Mon-P-2-D-9 — Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests]]</div>|<div class="cpsessionviewpapertitle">Assessing Acoustic and Articulatory Dimensions of Speech Motor Adaptation with Random Forests</div><div class="cpsessionviewpaperauthor">[[Eugen Klein|AUTHOR Eugen Klein]], [[Jana Brunner|AUTHOR Jana Brunner]], [[Phil Hoole|AUTHOR Phil Hoole]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191593.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-11|PAPER Mon-P-2-D-11 — Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method]]</div>|<div class="cpsessionviewpapertitle">Speech Organ Contour Extraction Using Real-Time MRI and Machine Learning Method</div><div class="cpsessionviewpaperauthor">[[Hironori Takemoto|AUTHOR Hironori Takemoto]], [[Tsubasa Goto|AUTHOR Tsubasa Goto]], [[Yuya Hagihara|AUTHOR Yuya Hagihara]], [[Sayaka Hamanaka|AUTHOR Sayaka Hamanaka]], [[Tatsuya Kitamura|AUTHOR Tatsuya Kitamura]], [[Yukiko Nota|AUTHOR Yukiko Nota]], [[Kikuo Maekawa|AUTHOR Kikuo Maekawa]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-12|PAPER Mon-P-2-D-12 — CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology]]</div>|<div class="cpsessionviewpapertitle">CNN-Based Phoneme Classifier from Vocal Tract MRI Learns Embedding Consistent with Articulatory Topology</div><div class="cpsessionviewpaperauthor">[[K.G. van Leeuwen|AUTHOR K.G. van Leeuwen]], [[P. Bos|AUTHOR P. Bos]], [[S. Trebeschi|AUTHOR S. Trebeschi]], [[M.J.A. van Alphen|AUTHOR M.J.A. van Alphen]], [[L. Voskuilen|AUTHOR L. Voskuilen]], [[L.E. Smeele|AUTHOR L.E. Smeele]], [[F. van der Heijden|AUTHOR F. van der Heijden]], [[R.J.J.H. van Son|AUTHOR R.J.J.H. van Son]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192650.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-D-13|PAPER Mon-P-2-D-13 — Strength and Structure: Coupling Tones with Oral Constriction Gestures]]</div>|<div class="cpsessionviewpapertitle">Strength and Structure: Coupling Tones with Oral Constriction Gestures</div><div class="cpsessionviewpaperauthor">[[Doris Mücke|AUTHOR Doris Mücke]], [[Anne Hermes|AUTHOR Anne Hermes]], [[Sam Tilsen|AUTHOR Sam Tilsen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Jean-François Bonastre|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191861.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-1|PAPER Mon-P-2-E-1 — Salient Speech Representations Based on Cloned Networks]]</div>|<div class="cpsessionviewpapertitle">Salient Speech Representations Based on Cloned Networks</div><div class="cpsessionviewpaperauthor">[[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]], [[Felicia S.C. Lim|AUTHOR Felicia S.C. Lim]], [[Michael Chinen|AUTHOR Michael Chinen]], [[Jan Skoglund|AUTHOR Jan Skoglund]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-2|PAPER Mon-P-2-E-2 — ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features]]</div>|<div class="cpsessionviewpapertitle">ASR Inspired Syllable Stress Detection for Pronunciation Evaluation Without Using a Supervised Classifier and Syllable Level Features</div><div class="cpsessionviewpaperauthor">[[Manoj Kumar Ramanathi|AUTHOR Manoj Kumar Ramanathi]], [[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192295.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-3|PAPER Mon-P-2-E-3 — Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network]]</div>|<div class="cpsessionviewpapertitle">Acoustic and Articulatory Feature Based Speech Rate Estimation Using a Convolutional Dense Neural Network</div><div class="cpsessionviewpaperauthor">[[Renuka Mannem|AUTHOR Renuka Mannem]], [[Jhansi Mallela|AUTHOR Jhansi Mallela]], [[Aravind Illa|AUTHOR Aravind Illa]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192845.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-5|PAPER Mon-P-2-E-5 — Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics]]</div>|<div class="cpsessionviewpapertitle">Predictive Auxiliary Variational Autoencoder for Representation Learning of Global Speech Characteristics</div><div class="cpsessionviewpaperauthor">[[Sebastian Springenberg|AUTHOR Sebastian Springenberg]], [[Egor Lakomkin|AUTHOR Egor Lakomkin]], [[Cornelius Weber|AUTHOR Cornelius Weber]], [[Stefan Wermter|AUTHOR Stefan Wermter]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-6|PAPER Mon-P-2-E-6 — Unsupervised Low-Rank Representations for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Low-Rank Representations for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Georgios Paraskevopoulos|AUTHOR Georgios Paraskevopoulos]], [[Efthymios Tzinis|AUTHOR Efthymios Tzinis]], [[Nikolaos Ellinas|AUTHOR Nikolaos Ellinas]], [[Theodoros Giannakopoulos|AUTHOR Theodoros Giannakopoulos]], [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-7|PAPER Mon-P-2-E-7 — On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">On the Suitability of the Riesz Spectro-Temporal Envelope for WaveNet Based Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Jitendra Kumar Dhiman|AUTHOR Jitendra Kumar Dhiman]], [[Nagaraj Adiga|AUTHOR Nagaraj Adiga]], [[Chandra Sekhar Seelamantula|AUTHOR Chandra Sekhar Seelamantula]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192406.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-8|PAPER Mon-P-2-E-8 — Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Autonomous Emotion Learning in Speech: A View of Zero-Shot Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Xinzhou Xu|AUTHOR Xinzhou Xu]], [[Jun Deng|AUTHOR Jun Deng]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Zixing Zhang|AUTHOR Zixing Zhang]], [[Li Zhao|AUTHOR Li Zhao]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-9|PAPER Mon-P-2-E-9 — An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities]]</div>|<div class="cpsessionviewpapertitle">An Improved Goodness of Pronunciation (GoP) Measure for Pronunciation Evaluation with DNN-HMM System Considering HMM Transition Probabilities</div><div class="cpsessionviewpaperauthor">[[Sweekar Sudhakara|AUTHOR Sweekar Sudhakara]], [[Manoj Kumar Ramanathi|AUTHOR Manoj Kumar Ramanathi]], [[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-P-2-E-10|PAPER Mon-P-2-E-10 — Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns]]</div>|<div class="cpsessionviewpapertitle">Low Resource Automatic Intonation Classification Using Gated Recurrent Unit (GRU) Networks Pre-Trained with Synthesized Pitch Patterns</div><div class="cpsessionviewpaperauthor">[[Atreyee Saha|AUTHOR Atreyee Saha]], [[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 4|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-1|PAPER Mon-S&T-1-1 — Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease]]</div>|<div class="cpsessionviewpapertitle">Apkinson: A Mobile Solution for Multimodal Assessment of Patients with Parkinson’s Disease</div><div class="cpsessionviewpaperauthor">[[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]], [[T. Arias-Vergara|AUTHOR T. Arias-Vergara]], [[Philipp Klumpp|AUTHOR Philipp Klumpp]], [[M. Strauss|AUTHOR M. Strauss]], [[A. Küderle|AUTHOR A. Küderle]], [[N. Roth|AUTHOR N. Roth]], [[S. Bayerl|AUTHOR S. Bayerl]], [[N. García-Ospina|AUTHOR N. García-Ospina]], [[P.A. Perez-Toro|AUTHOR P.A. Perez-Toro]], [[L.F. Parra-Gallego|AUTHOR L.F. Parra-Gallego]], [[Cristian David Rios-Urrego|AUTHOR Cristian David Rios-Urrego]], [[D. Escobar-Grisales|AUTHOR D. Escobar-Grisales]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]], [[B. Eskofier|AUTHOR B. Eskofier]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198004.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-2|PAPER Mon-S&T-1-2 — Depression State Assessment: Application for Detection of Depression by Speech]]</div>|<div class="cpsessionviewpapertitle">Depression State Assessment: Application for Detection of Depression by Speech</div><div class="cpsessionviewpaperauthor">[[Gábor Kiss|AUTHOR Gábor Kiss]], [[Dávid Sztahó|AUTHOR Dávid Sztahó]], [[Klára Vicsi|AUTHOR Klára Vicsi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198008.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-3|PAPER Mon-S&T-1-3 — SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners]]</div>|<div class="cpsessionviewpapertitle">SPIRE-fluent: A Self-Learning App for Tutoring Oral Fluency to Second Language English Learners</div><div class="cpsessionviewpaperauthor">[[Chiranjeevi Yarra|AUTHOR Chiranjeevi Yarra]], [[Aparna Srinivasan|AUTHOR Aparna Srinivasan]], [[Sravani Gottimukkala|AUTHOR Sravani Gottimukkala]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198016.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-4|PAPER Mon-S&T-1-4 — Using Real-Time Visual Biofeedback for Second Language Instruction]]</div>|<div class="cpsessionviewpapertitle">Using Real-Time Visual Biofeedback for Second Language Instruction</div><div class="cpsessionviewpaperauthor">[[Shawn Nissen|AUTHOR Shawn Nissen]], [[Rebecca Nissen|AUTHOR Rebecca Nissen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198027.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-5|PAPER Mon-S&T-1-5 — Splash: Speech and Language Assessment in Schools and Homes]]</div>|<div class="cpsessionviewpapertitle">Splash: Speech and Language Assessment in Schools and Homes</div><div class="cpsessionviewpaperauthor">[[A. Miwardelli|AUTHOR A. Miwardelli]], [[I. Gallagher|AUTHOR I. Gallagher]], [[J. Gibson|AUTHOR J. Gibson]], [[N. Katsos|AUTHOR N. Katsos]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[H. Wood|AUTHOR H. Wood]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-6|PAPER Mon-S&T-1-6 — Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice]]</div>|<div class="cpsessionviewpapertitle">Using Ultrasound Imaging to Create Augmented Visual Biofeedback for Articulatory Practice</div><div class="cpsessionviewpaperauthor">[[Colin T. Annand|AUTHOR Colin T. Annand]], [[Maurice Lamb|AUTHOR Maurice Lamb]], [[Sarah Dugan|AUTHOR Sarah Dugan]], [[Sarah R. Li|AUTHOR Sarah R. Li]], [[Hannah M. Woeste|AUTHOR Hannah M. Woeste]], [[T. Douglas Mast|AUTHOR T. Douglas Mast]], [[Michael A. Riley|AUTHOR Michael A. Riley]], [[Jack A. Masterson|AUTHOR Jack A. Masterson]], [[Neeraja Mahalingam|AUTHOR Neeraja Mahalingam]], [[Kathryn J. Eary|AUTHOR Kathryn J. Eary]], [[Caroline Spencer|AUTHOR Caroline Spencer]], [[Suzanne Boyce|AUTHOR Suzanne Boyce]], [[Stephanie Jackson|AUTHOR Stephanie Jackson]], [[Anoosha Baxi|AUTHOR Anoosha Baxi]], [[Reneé Seward|AUTHOR Reneé Seward]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-S&T-1-7|PAPER Mon-S&T-1-7 — Speech-Based Web Navigation for Limited Mobility Users]]</div>|<div class="cpsessionviewpapertitle">Speech-Based Web Navigation for Limited Mobility Users</div><div class="cpsessionviewpaperauthor">[[Vasiliy Radostev|AUTHOR Vasiliy Radostev]], [[Serge Berger|AUTHOR Serge Berger]], [[Justin Tabrizi|AUTHOR Justin Tabrizi]], [[Pasha Kamyshev|AUTHOR Pasha Kamyshev]], [[Hisami Suzuki|AUTHOR Hisami Suzuki]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|11:00–13:00, Monday 16 Sept 2019, Hall 3|<|
|^Chair:&nbsp;|^Keelan Evanini|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-1|PAPER Mon-SS-1-6-1 — Introduction: SIG-CHILD Special Interest Group]]</div>|<div class="cpsessionviewpapertitle">Introduction: SIG-CHILD Special Interest Group</div><div class="cpsessionviewpaperauthor"></div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192980.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-2|PAPER Mon-SS-1-6-2 — Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network]]</div>|<div class="cpsessionviewpapertitle">Advances in Automatic Speech Recognition for Child Speech Using Factored Time Delay Neural Network</div><div class="cpsessionviewpaperauthor">[[Fei Wu|AUTHOR Fei Wu]], [[Leibny Paola García-Perera|AUTHOR Leibny Paola García-Perera]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191847.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-3|PAPER Mon-SS-1-6-3 — A Frequency Normalization Technique for Kindergarten Speech Recognition Inspired by the Role of f,,o,, in Vowel Perception]]</div>|<div class="cpsessionviewpapertitle">A Frequency Normalization Technique for Kindergarten Speech Recognition Inspired by the Role of f,,o,, in Vowel Perception</div><div class="cpsessionviewpaperauthor">[[Gary Yeung|AUTHOR Gary Yeung]], [[Abeer Alwan|AUTHOR Abeer Alwan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-4|PAPER Mon-SS-1-6-4 — Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques]]</div>|<div class="cpsessionviewpapertitle">Improving ASR Systems for Children with Autism and Language Impairment Using Domain-Focused DNN Transfer Techniques</div><div class="cpsessionviewpaperauthor">[[Robert Gale|AUTHOR Robert Gale]], [[Liu Chen|AUTHOR Liu Chen]], [[Jill Dolata|AUTHOR Jill Dolata]], [[Jan van Santen|AUTHOR Jan van Santen]], [[Meysam Asgari|AUTHOR Meysam Asgari]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192612.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-5|PAPER Mon-SS-1-6-5 — Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions]]</div>|<div class="cpsessionviewpapertitle">Ultrasound Tongue Imaging for Diarization and Alignment of Child Speech Therapy Sessions</div><div class="cpsessionviewpaperauthor">[[Manuel Sam Ribeiro|AUTHOR Manuel Sam Ribeiro]], [[Aciel Eshky|AUTHOR Aciel Eshky]], [[Korin Richmond|AUTHOR Korin Richmond]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192889.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-6|PAPER Mon-SS-1-6-6 — Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead]]</div>|<div class="cpsessionviewpapertitle">Automated Estimation of Oral Reading Fluency During Summer Camp e-Book Reading with MyTurnToRead</div><div class="cpsessionviewpaperauthor">[[Anastassia Loukina|AUTHOR Anastassia Loukina]], [[Beata Beigman Klebanov|AUTHOR Beata Beigman Klebanov]], [[Patrick Lange|AUTHOR Patrick Lange]], [[Yao Qian|AUTHOR Yao Qian]], [[Binod Gyawali|AUTHOR Binod Gyawali]], [[Nitin Madnani|AUTHOR Nitin Madnani]], [[Abhinav Misra|AUTHOR Abhinav Misra]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Zuowei Wang|AUTHOR Zuowei Wang]], [[John Sabatini|AUTHOR John Sabatini]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-1-6-7|PAPER Mon-SS-1-6-7 — Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia]]</div>|<div class="cpsessionviewpapertitle">Sustained Vowel Game: A Computer Therapy Game for Children with Dysphonia</div><div class="cpsessionviewpaperauthor">[[Vanessa Lopes|AUTHOR Vanessa Lopes]], [[João Magalhães|AUTHOR João Magalhães]], [[Sofia Cavaco|AUTHOR Sofia Cavaco]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–16:30, Monday 16 Sept 2019, Hall 3|<|
|^Chair:&nbsp;|^Anna Esposito, Gennaro Cordasco|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191734.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-1|PAPER Mon-SS-2-6-1 — The Dependability of Voice on Elders’ Acceptance of Humanoid Agents]]</div>|<div class="cpsessionviewpapertitle">The Dependability of Voice on Elders’ Acceptance of Humanoid Agents</div><div class="cpsessionviewpaperauthor">[[Anna Esposito|AUTHOR Anna Esposito]], [[Terry Amorese|AUTHOR Terry Amorese]], [[Marialucia Cuciniello|AUTHOR Marialucia Cuciniello]], [[Maria Teresa Riviello|AUTHOR Maria Teresa Riviello]], [[Antonietta M. Esposito|AUTHOR Antonietta M. Esposito]], [[Alda Troncone|AUTHOR Alda Troncone]], [[Gennaro Cordasco|AUTHOR Gennaro Cordasco]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-2|PAPER Mon-SS-2-6-2 — God as Interlocutor — Real or Imaginary? Prosodic Markers of Dialogue Speech and Expected Efficacy in Spoken Prayer]]</div>|<div class="cpsessionviewpapertitle">God as Interlocutor — Real or Imaginary? Prosodic Markers of Dialogue Speech and Expected Efficacy in Spoken Prayer</div><div class="cpsessionviewpaperauthor">[[Oliver Niebuhr|AUTHOR Oliver Niebuhr]], [[Uffe Schjoedt|AUTHOR Uffe Schjoedt]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191368.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-3|PAPER Mon-SS-2-6-3 — Expressiveness Influences Human Vocal Alignment Toward voice-AI]]</div>|<div class="cpsessionviewpapertitle">Expressiveness Influences Human Vocal Alignment Toward voice-AI</div><div class="cpsessionviewpaperauthor">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-4|PAPER Mon-SS-2-6-4 — Detecting Topic-Oriented Speaker Stance in Conversational Speech]]</div>|<div class="cpsessionviewpapertitle">Detecting Topic-Oriented Speaker Stance in Conversational Speech</div><div class="cpsessionviewpaperauthor">[[Catherine Lai|AUTHOR Catherine Lai]], [[Beatrice Alex|AUTHOR Beatrice Alex]], [[Johanna D. Moore|AUTHOR Johanna D. Moore]], [[Leimin Tian|AUTHOR Leimin Tian]], [[Tatsuro Hori|AUTHOR Tatsuro Hori]], [[Gianpiero Francesca|AUTHOR Gianpiero Francesca]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193201.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-5|PAPER Mon-SS-2-6-5 — Fusion Techniques for Utterance-Level Emotion Recognition Combining Speech and Transcripts]]</div>|<div class="cpsessionviewpapertitle">Fusion Techniques for Utterance-Level Emotion Recognition Combining Speech and Transcripts</div><div class="cpsessionviewpaperauthor">[[Jilt Sebastian|AUTHOR Jilt Sebastian]], [[Piero Pierucci|AUTHOR Piero Pierucci]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192743.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-6|PAPER Mon-SS-2-6-6 — Explaining Sentiment Classification]]</div>|<div class="cpsessionviewpapertitle">Explaining Sentiment Classification</div><div class="cpsessionviewpaperauthor">[[Marvin Rajwadi|AUTHOR Marvin Rajwadi]], [[Cornelius Glackin|AUTHOR Cornelius Glackin]], [[Julie Wall|AUTHOR Julie Wall]], [[Gérard Chollet|AUTHOR Gérard Chollet]], [[Nigel Cannings|AUTHOR Nigel Cannings]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-7|PAPER Mon-SS-2-6-7 — Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models]]</div>|<div class="cpsessionviewpapertitle">Predicting Group-Level Skin Attention to Short Movies from Audio-Based LSTM-Mixture of Experts Models</div><div class="cpsessionviewpaperauthor">[[Ricardo Kleinlein|AUTHOR Ricardo Kleinlein]], [[Cristina Luna Jiménez|AUTHOR Cristina Luna Jiménez]], [[Juan Manuel Montero|AUTHOR Juan Manuel Montero]], [[Zoraida Callejas|AUTHOR Zoraida Callejas]], [[Fernando Fernández-Martínez|AUTHOR Fernando Fernández-Martínez]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Mon-SS-2-6-8|PAPER Mon-SS-2-6-8 — Discussion]]</div>|<div class="cpsessionviewpapertitle">Discussion</div><div class="cpsessionviewpaperauthor"></div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|08:30–09:30, Thursday 19 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Thu-K-4|PAPER Thu-K-4 — Learning Natural Language Interfaces with Neural Models]]</div>|<div class="cpsessionviewpapertitle">Learning Natural Language Interfaces with Neural Models</div><div class="cpsessionviewpaperauthor">[[Mirella Lapata|AUTHOR Mirella Lapata]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Ariya Rastrow, Ralf Schlüter|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Thu-O-10-1-1|PAPER Thu-O-10-1-1 — Survey Talk: Reaching Over the Gap: Cross- and Interdisciplinary Research on Human and Automatic Speech Processing]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Reaching Over the Gap: Cross- and Interdisciplinary Research on Human and Automatic Speech Processing</div><div class="cpsessionviewpaperauthor">[[Odette Scharenborg|AUTHOR Odette Scharenborg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191949.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-1-2|PAPER Thu-O-10-1-2 — Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders]]</div>|<div class="cpsessionviewpapertitle">Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders</div><div class="cpsessionviewpaperauthor">[[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Shigeki Karita|AUTHOR Shigeki Karita]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192225.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-1-3|PAPER Thu-O-10-1-3 — Language Modeling with Deep Transformers]]</div>|<div class="cpsessionviewpapertitle">Language Modeling with Deep Transformers</div><div class="cpsessionviewpaperauthor">[[Kazuki Irie|AUTHOR Kazuki Irie]], [[Albert Zeyer|AUTHOR Albert Zeyer]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193060.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-1-4|PAPER Thu-O-10-1-4 — Scalable Multi Corpora Neural Language Models for ASR]]</div>|<div class="cpsessionviewpapertitle">Scalable Multi Corpora Neural Language Models for ASR</div><div class="cpsessionviewpaperauthor">[[Anirudh Raju|AUTHOR Anirudh Raju]], [[Denis Filimonov|AUTHOR Denis Filimonov]], [[Gautam Tiwari|AUTHOR Gautam Tiwari]], [[Guitang Lan|AUTHOR Guitang Lan]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193107.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-1-5|PAPER Thu-O-10-1-5 — Who Needs Words? Lexicon-Free Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Who Needs Words? Lexicon-Free Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Tatiana Likhomanenko|AUTHOR Tatiana Likhomanenko]], [[Gabriel Synnaeve|AUTHOR Gabriel Synnaeve]], [[Ronan Collobert|AUTHOR Ronan Collobert]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Tatsuya Kawahara, Chung-Hsien Wu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193252.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-2-1|PAPER Thu-O-10-2-1 — Direct Modelling of Speech Emotion from Raw Speech]]</div>|<div class="cpsessionviewpapertitle">Direct Modelling of Speech Emotion from Raw Speech</div><div class="cpsessionviewpaperauthor">[[Siddique Latif|AUTHOR Siddique Latif]], [[Rajib Rana|AUTHOR Rajib Rana]], [[Sara Khalifa|AUTHOR Sara Khalifa]], [[Raja Jurdak|AUTHOR Raja Jurdak]], [[Julien Epps|AUTHOR Julien Epps]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192093.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-2-2|PAPER Thu-O-10-2-2 — Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN]]</div>|<div class="cpsessionviewpapertitle">Improving Emotion Identification Using Phone Posteriors in Raw Speech Waveform Based DNN</div><div class="cpsessionviewpaperauthor">[[Mousmita Sarma|AUTHOR Mousmita Sarma]], [[Pegah Ghahremani|AUTHOR Pegah Ghahremani]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Nagendra Kumar Goel|AUTHOR Nagendra Kumar Goel]], [[Kandarpa Kumar Sarma|AUTHOR Kandarpa Kumar Sarma]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193140.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-2-3|PAPER Thu-O-10-2-3 — Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Pyramid Memory Block and Timestep Attention for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Miao Cao|AUTHOR Miao Cao]], [[Chun Yang|AUTHOR Chun Yang]], [[Fang Zhou|AUTHOR Fang Zhou]], [[Xu-cheng Yin|AUTHOR Xu-cheng Yin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191658.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-2-4|PAPER Thu-O-10-2-4 — Robust Speech Emotion Recognition Under Different Encoding Conditions]]</div>|<div class="cpsessionviewpapertitle">Robust Speech Emotion Recognition Under Different Encoding Conditions</div><div class="cpsessionviewpaperauthor">[[Christopher Oates|AUTHOR Christopher Oates]], [[Andreas Triantafyllopoulos|AUTHOR Andreas Triantafyllopoulos]], [[Ingmar Steiner|AUTHOR Ingmar Steiner]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-2-5|PAPER Thu-O-10-2-5 — Using the Bag-of-Audio-Word Feature Representation of ASR DNN Posteriors for Paralinguistic Classification]]</div>|<div class="cpsessionviewpapertitle">Using the Bag-of-Audio-Word Feature Representation of ASR DNN Posteriors for Paralinguistic Classification</div><div class="cpsessionviewpaperauthor">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191769.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-2-6|PAPER Thu-O-10-2-6 — Disentangling Style Factors from Speaker Representations]]</div>|<div class="cpsessionviewpapertitle">Disentangling Style Factors from Speaker Representations</div><div class="cpsessionviewpaperauthor">[[Jennifer Williams|AUTHOR Jennifer Williams]], [[Simon King|AUTHOR Simon King]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Maria da Conceição Cunha, Pavel Šturm|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192545.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-3-1|PAPER Thu-O-10-3-1 — Sentence Prosody and  Wh-Indeterminates in Taiwan Mandarin]]</div>|<div class="cpsessionviewpapertitle">Sentence Prosody and  Wh-Indeterminates in Taiwan Mandarin</div><div class="cpsessionviewpaperauthor">[[Yu-Yin Hsu|AUTHOR Yu-Yin Hsu]], [[Anqi Xu|AUTHOR Anqi Xu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-3-2|PAPER Thu-O-10-3-2 — Frication as a Vowel Feature? — Evidence from the Rui’an Wu Chinese Dialect]]</div>|<div class="cpsessionviewpapertitle">Frication as a Vowel Feature? — Evidence from the Rui’an Wu Chinese Dialect</div><div class="cpsessionviewpaperauthor">[[Fang Hu|AUTHOR Fang Hu]], [[Youjue He|AUTHOR Youjue He]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-3-3|PAPER Thu-O-10-3-3 — Vowels and Diphthongs in the Xupu Xiang Chinese Dialect]]</div>|<div class="cpsessionviewpapertitle">Vowels and Diphthongs in the Xupu Xiang Chinese Dialect</div><div class="cpsessionviewpaperauthor">[[Zhenrui Zhang|AUTHOR Zhenrui Zhang]], [[Fang Hu|AUTHOR Fang Hu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-3-4|PAPER Thu-O-10-3-4 — Age-Related Changes in European Portuguese Vowel Acoustics]]</div>|<div class="cpsessionviewpapertitle">Age-Related Changes in European Portuguese Vowel Acoustics</div><div class="cpsessionviewpaperauthor">[[Luciana Albuquerque|AUTHOR Luciana Albuquerque]], [[Catarina Oliveira|AUTHOR Catarina Oliveira]], [[António Teixeira|AUTHOR António Teixeira]], [[Pedro Sa-Couto|AUTHOR Pedro Sa-Couto]], [[Daniela Figueiredo|AUTHOR Daniela Figueiredo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-3-5|PAPER Thu-O-10-3-5 — Vowel-Tone Interaction in Two Tibeto-Burman Languages]]</div>|<div class="cpsessionviewpapertitle">Vowel-Tone Interaction in Two Tibeto-Burman Languages</div><div class="cpsessionviewpaperauthor">[[Wendy Lalhminghlui|AUTHOR Wendy Lalhminghlui]], [[Viyazonuo Terhiija|AUTHOR Viyazonuo Terhiija]], [[Priyankoo Sarmah|AUTHOR Priyankoo Sarmah]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193210.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-3-6|PAPER Thu-O-10-3-6 — The Vowel System of Korebaju]]</div>|<div class="cpsessionviewpapertitle">The Vowel System of Korebaju</div><div class="cpsessionviewpaperauthor">[[Jenifer Vega Rodríguez|AUTHOR Jenifer Vega Rodríguez]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-4-1|PAPER Thu-O-10-4-1 — Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing]]</div>|<div class="cpsessionviewpapertitle">Fundamental Frequency Accommodation in Multi-Party Human-Robot Game Interactions: The Effect of Winning or Losing</div><div class="cpsessionviewpaperauthor">[[Omnia Ibrahim|AUTHOR Omnia Ibrahim]], [[Gabriel Skantze|AUTHOR Gabriel Skantze]], [[Sabine Stoll|AUTHOR Sabine Stoll]], [[Volker Dellwo|AUTHOR Volker Dellwo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-4-2|PAPER Thu-O-10-4-2 — Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction]]</div>|<div class="cpsessionviewpapertitle">Pitch Accent Trajectories Across Different Conditions of Visibility and Information Structure — Evidence from Spontaneous Dyadic Interaction</div><div class="cpsessionviewpaperauthor">[[Petra Wagner|AUTHOR Petra Wagner]], [[Nataliya Bryhadyr|AUTHOR Nataliya Bryhadyr]], [[Marin Schröer|AUTHOR Marin Schröer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192572.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-4-3|PAPER Thu-O-10-4-3 — The Greennn Tree — Lengthening Position Influences Uncertainty Perception]]</div>|<div class="cpsessionviewpapertitle">The Greennn Tree — Lengthening Position Influences Uncertainty Perception</div><div class="cpsessionviewpaperauthor">[[Simon Betz|AUTHOR Simon Betz]], [[Sina Zarrieß|AUTHOR Sina Zarrieß]], [[Éva Székely|AUTHOR Éva Székely]], [[Petra Wagner|AUTHOR Petra Wagner]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191701.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-4-4|PAPER Thu-O-10-4-4 — CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information]]</div>|<div class="cpsessionviewpapertitle">CNN-BLSTM Based Question Detection from Dialogs Considering Phase and Context Information</div><div class="cpsessionviewpaperauthor">[[Yuke Si|AUTHOR Yuke Si]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Mengfei Wu|AUTHOR Mengfei Wu]], [[Aijun Li|AUTHOR Aijun Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-4-5|PAPER Thu-O-10-4-5 — Mirroring to Build Trust in Digital Assistants]]</div>|<div class="cpsessionviewpapertitle">Mirroring to Build Trust in Digital Assistants</div><div class="cpsessionviewpaperauthor">[[Katherine Metcalf|AUTHOR Katherine Metcalf]], [[Barry-John Theobald|AUTHOR Barry-John Theobald]], [[Garrett Weinberg|AUTHOR Garrett Weinberg]], [[Robert Lee|AUTHOR Robert Lee]], [[Ing-Marie Jonsson|AUTHOR Ing-Marie Jonsson]], [[Russ Webb|AUTHOR Russ Webb]], [[Nicholas Apostoloff|AUTHOR Nicholas Apostoloff]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-4-6|PAPER Thu-O-10-4-6 — Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant]]</div>|<div class="cpsessionviewpapertitle">Three’s a Crowd? Effects of a Second Human on Vocal Accommodation with a Voice Assistant</div><div class="cpsessionviewpaperauthor">[[Eran Raveh|AUTHOR Eran Raveh]], [[Ingo Siegert|AUTHOR Ingo Siegert]], [[Ingmar Steiner|AUTHOR Ingmar Steiner]], [[Iona Gessinger|AUTHOR Iona Gessinger]], [[Bernd Möbius|AUTHOR Bernd Möbius]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Paavo Alku, Frank Soong|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Thu-O-9-1-1|PAPER Thu-O-9-1-1 — Survey Talk: Realistic Physics-Based Computational Voice Production]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Realistic Physics-Based Computational Voice Production</div><div class="cpsessionviewpaperauthor">[[Oriol Guasch|AUTHOR Oriol Guasch]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191764.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-1-2|PAPER Thu-O-9-1-2 — An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes]]</div>|<div class="cpsessionviewpapertitle">An Extended Two-Dimensional Vocal Tract Model for Fast Acoustic Simulation of Single-Axis Symmetric Three-Dimensional Tubes</div><div class="cpsessionviewpaperauthor">[[Debasish Ray Mohapatra|AUTHOR Debasish Ray Mohapatra]], [[Victor Zappi|AUTHOR Victor Zappi]], [[Sidney Fels|AUTHOR Sidney Fels]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-1-3|PAPER Thu-O-9-1-3 — Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Perceptual Optimization of an Enhanced Geometric Vocal Fold Model for Articulatory Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Peter Birkholz|AUTHOR Peter Birkholz]], [[Susanne Drechsel|AUTHOR Susanne Drechsel]], [[Simon Stone|AUTHOR Simon Stone]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191334.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-1-4|PAPER Thu-O-9-1-4 — Articulatory Copy Synthesis Based on a Genetic Algorithm]]</div>|<div class="cpsessionviewpapertitle">Articulatory Copy Synthesis Based on a Genetic Algorithm</div><div class="cpsessionviewpaperauthor">[[Yingming Gao|AUTHOR Yingming Gao]], [[Simon Stone|AUTHOR Simon Stone]], [[Peter Birkholz|AUTHOR Peter Birkholz]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192526.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-1-5|PAPER Thu-O-9-1-5 — A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion]]</div>|<div class="cpsessionviewpapertitle">A Phonetic-Level Analysis of Different Input Features for Articulatory Inversion</div><div class="cpsessionviewpaperauthor">[[Abdolreza Sabzi Shahrebabaki|AUTHOR Abdolreza Sabzi Shahrebabaki]], [[Negar Olfati|AUTHOR Negar Olfati]], [[Ali Shariq Imran|AUTHOR Ali Shariq Imran]], [[Sabato Marco Siniscalchi|AUTHOR Sabato Marco Siniscalchi]], [[Torbjørn Svendsen|AUTHOR Torbjørn Svendsen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Jan Chorowski, Shinji Watanabe|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-2-1|PAPER Thu-O-9-2-1 — Advancing Sequence-to-Sequence Based Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Advancing Sequence-to-Sequence Based Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[George Saon|AUTHOR George Saon]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192460.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-2-2|PAPER Thu-O-9-2-2 — Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions]]</div>|<div class="cpsessionviewpapertitle">Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions</div><div class="cpsessionviewpaperauthor">[[Awni Hannun|AUTHOR Awni Hannun]], [[Ann Lee|AUTHOR Ann Lee]], [[Qiantong Xu|AUTHOR Qiantong Xu]], [[Ronan Collobert|AUTHOR Ronan Collobert]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-2-3|PAPER Thu-O-9-2-3 — Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Sequence-to-Sequence ASR Using Unpaired Speech and Text</div><div class="cpsessionviewpaperauthor">[[Murali Karthick Baskar|AUTHOR Murali Karthick Baskar]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Ramon Astudillo|AUTHOR Ramon Astudillo]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Jan Černocký|AUTHOR Jan Černocký]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191554.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-2-4|PAPER Thu-O-9-2-4 — Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Learn Spelling from Teachers: Transferring Knowledge from Language Models to Sequence-to-Sequence Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ye Bai|AUTHOR Ye Bai]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192277.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-2-5|PAPER Thu-O-9-2-5 — On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">On the Choice of Modeling Unit for Sequence-to-Sequence Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Kazuki Irie|AUTHOR Kazuki Irie]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]], [[Anjuli Kannan|AUTHOR Anjuli Kannan]], [[Antoine Bruguier|AUTHOR Antoine Bruguier]], [[David Rybach|AUTHOR David Rybach]], [[Patrick Nguyen|AUTHOR Patrick Nguyen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192719.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-2-6|PAPER Thu-O-9-2-6 — Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR]]</div>|<div class="cpsessionviewpapertitle">Listen, Attend, Spell and Adapt: Speaker Adapted Sequence-to-Sequence ASR</div><div class="cpsessionviewpaperauthor">[[Felix Weninger|AUTHOR Felix Weninger]], [[Jesús Andrés-Ferrer|AUTHOR Jesús Andrés-Ferrer]], [[Xinwei Li|AUTHOR Xinwei Li]], [[Puming Zhan|AUTHOR Puming Zhan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Takaaki Hori, Murat Saraçlar|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191790.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-3-1|PAPER Thu-O-9-3-1 — Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts]]</div>|<div class="cpsessionviewpapertitle">Lattice Re-Scoring During Manual Editing for Automatic Error Correction of ASR Transcripts</div><div class="cpsessionviewpaperauthor">[[Anna V. Rúnarsdóttir|AUTHOR Anna V. Rúnarsdóttir]], [[Inga R. Helgadóttir|AUTHOR Inga R. Helgadóttir]], [[Jón Guðnason|AUTHOR Jón Guðnason]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-3-2|PAPER Thu-O-9-3-2 — GPU-Based WFST Decoding with Extra Large Language Model]]</div>|<div class="cpsessionviewpapertitle">GPU-Based WFST Decoding with Extra Large Language Model</div><div class="cpsessionviewpaperauthor">[[Daisuke Fukunaga|AUTHOR Daisuke Fukunaga]], [[Yoshiki Tanaka|AUTHOR Yoshiki Tanaka]], [[Yuichi Kageyama|AUTHOR Yuichi Kageyama]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192798.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-3-3|PAPER Thu-O-9-3-3 — Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models]]</div>|<div class="cpsessionviewpapertitle">Real-Time One-Pass Decoder for Speech Recognition Using LSTM Language Models</div><div class="cpsessionviewpaperauthor">[[Javier Jorge|AUTHOR Javier Jorge]], [[Adrià Giménez|AUTHOR Adrià Giménez]], [[Javier Iranzo-Sánchez|AUTHOR Javier Iranzo-Sánchez]], [[Jorge Civera|AUTHOR Jorge Civera]], [[Albert Sanchis|AUTHOR Albert Sanchis]], [[Alfons Juan|AUTHOR Alfons Juan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-3-4|PAPER Thu-O-9-3-4 — Vectorized Beam Search for CTC-Attention-Based Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Vectorized Beam Search for CTC-Attention-Based Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Hiroshi Seki|AUTHOR Hiroshi Seki]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Niko Moritz|AUTHOR Niko Moritz]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-3-5|PAPER Thu-O-9-3-5 — Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Contextual Recovery of Out-of-Lattice Named Entities in Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jack Serrino|AUTHOR Jack Serrino]], [[Leonid Velikovich|AUTHOR Leonid Velikovich]], [[Petar Aleksic|AUTHOR Petar Aleksic]], [[Cyril Allauzen|AUTHOR Cyril Allauzen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192985.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-3-6|PAPER Thu-O-9-3-6 — Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Sequence-to-Sequence Learning via Attention Transfer for Incremental Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Sashi Novitasari|AUTHOR Sashi Novitasari]], [[Andros Tjandra|AUTHOR Andros Tjandra]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Franz Pernkopf, Reinhold Häb-Umbach|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-4-1|PAPER Thu-O-9-4-1 — Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Representation Learning with Future Observation Prediction for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Zheng Lian|AUTHOR Zheng Lian]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jian Huang|AUTHOR Jian Huang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-4-2|PAPER Thu-O-9-4-2 — Spatio-Temporal Attention Pooling for Audio Scene Classification]]</div>|<div class="cpsessionviewpapertitle">Spatio-Temporal Attention Pooling for Audio Scene Classification</div><div class="cpsessionviewpaperauthor">[[Huy Phan|AUTHOR Huy Phan]], [[Oliver Y. Chén|AUTHOR Oliver Y. Chén]], [[Lam Pham|AUTHOR Lam Pham]], [[Philipp Koch|AUTHOR Philipp Koch]], [[Maarten De Vos|AUTHOR Maarten De Vos]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Alfred Mertins|AUTHOR Alfred Mertins]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-4-3|PAPER Thu-O-9-4-3 — Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition]]</div>|<div class="cpsessionviewpapertitle">Subspace Pooling Based Temporal Features Extraction for Audio Event Recognition</div><div class="cpsessionviewpaperauthor">[[Qiuying Shi|AUTHOR Qiuying Shi]], [[Hui Luo|AUTHOR Hui Luo]], [[Jiqing Han|AUTHOR Jiqing Han]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-4-4|PAPER Thu-O-9-4-4 — Multi-Scale Time-Frequency Attention for Acoustic Event Detection]]</div>|<div class="cpsessionviewpapertitle">Multi-Scale Time-Frequency Attention for Acoustic Event Detection</div><div class="cpsessionviewpaperauthor">[[Jingyang Zhang|AUTHOR Jingyang Zhang]], [[Wenhao Ding|AUTHOR Wenhao Ding]], [[Jintao Kang|AUTHOR Jintao Kang]], [[Liang He|AUTHOR Liang He]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-4-5|PAPER Thu-O-9-4-5 — Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events]]</div>|<div class="cpsessionviewpapertitle">Acoustic Scene Classification by Implicitly Identifying Distinct Sound Events</div><div class="cpsessionviewpaperauthor">[[Hongwei Song|AUTHOR Hongwei Song]], [[Jiqing Han|AUTHOR Jiqing Han]], [[Shiwen Deng|AUTHOR Shiwen Deng]], [[Zhihao Du|AUTHOR Zhihao Du]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-4-6|PAPER Thu-O-9-4-6 — Parameter-Transfer Learning for Low-Resource Individualization of Head-Related Transfer Functions]]</div>|<div class="cpsessionviewpapertitle">Parameter-Transfer Learning for Low-Resource Individualization of Head-Related Transfer Functions</div><div class="cpsessionviewpaperauthor">[[Xiaoke Qi|AUTHOR Xiaoke Qi]], [[Lu Wang|AUTHOR Lu Wang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Heidi Christensen, Phil Green|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193276.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-5-1|PAPER Thu-O-9-5-1 — Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease]]</div>|<div class="cpsessionviewpapertitle">Prosodic Characteristics of Mandarin Declarative and Interrogative Utterances in Parkinson’s Disease</div><div class="cpsessionviewpaperauthor">[[Lei Liu|AUTHOR Lei Liu]], [[Meng Jian|AUTHOR Meng Jian]], [[Wentao Gu|AUTHOR Wentao Gu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192993.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-5-2|PAPER Thu-O-9-5-2 — Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease]]</div>|<div class="cpsessionviewpapertitle">Study of the Performance of Automatic Speech Recognition Systems in Speakers with Parkinson’s Disease</div><div class="cpsessionviewpaperauthor">[[Laureano Moro-Velazquez|AUTHOR Laureano Moro-Velazquez]], [[JaeJin Cho|AUTHOR JaeJin Cho]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]], [[Odette Scharenborg|AUTHOR Odette Scharenborg]], [[Heejin Kim|AUTHOR Heejin Kim]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-5-3|PAPER Thu-O-9-5-3 — Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese]]</div>|<div class="cpsessionviewpapertitle">Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese</div><div class="cpsessionviewpaperauthor">[[Tianqi Wang|AUTHOR Tianqi Wang]], [[Chongyuan Lian|AUTHOR Chongyuan Lian]], [[Jingshen Pan|AUTHOR Jingshen Pan]], [[Quanlei Yan|AUTHOR Quanlei Yan]], [[Feiqi Zhu|AUTHOR Feiqi Zhu]], [[Manwa L. Ng|AUTHOR Manwa L. Ng]], [[Lan Wang|AUTHOR Lan Wang]], [[Nan Yan|AUTHOR Nan Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192320.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-5-4|PAPER Thu-O-9-5-4 — Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features]]</div>|<div class="cpsessionviewpapertitle">Child Speech Disorder Detection with Siamese Recurrent Network Using Speech Attribute Features</div><div class="cpsessionviewpaperauthor">[[Jiarui Wang|AUTHOR Jiarui Wang]], [[Ying Qin|AUTHOR Ying Qin]], [[Zhiyuan Peng|AUTHOR Zhiyuan Peng]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191206.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-5-5|PAPER Thu-O-9-5-5 — Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech]]</div>|<div class="cpsessionviewpapertitle">Interpretable Deep Learning Model for the Detection and Reconstruction of Dysarthric Speech</div><div class="cpsessionviewpaperauthor">[[Daniel Korzekwa|AUTHOR Daniel Korzekwa]], [[Roberto Barra-Chicote|AUTHOR Roberto Barra-Chicote]], [[Bozena Kostek|AUTHOR Bozena Kostek]], [[Thomas Drugman|AUTHOR Thomas Drugman]], [[Mateusz Lajszczak|AUTHOR Mateusz Lajszczak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191200.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-9-5-6|PAPER Thu-O-9-5-6 — Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study]]</div>|<div class="cpsessionviewpapertitle">Vocal Biomarker Assessment Following Pediatric Traumatic Brain Injury: A Retrospective Cohort Study</div><div class="cpsessionviewpaperauthor">[[Camille Noufi|AUTHOR Camille Noufi]], [[Adam C. Lammert|AUTHOR Adam C. Lammert]], [[Daryush D. Mehta|AUTHOR Daryush D. Mehta]], [[James R. Williamson|AUTHOR James R. Williamson]], [[Gregory Ciccarelli|AUTHOR Gregory Ciccarelli]], [[Douglas Sturim|AUTHOR Douglas Sturim]], [[Jordan R. Green|AUTHOR Jordan R. Green]], [[Thomas F. Campbell|AUTHOR Thomas F. Campbell]], [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Hynek Boril|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192899.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-1|PAPER Thu-P-10-A-1 — End-to-End Neural Speaker Diarization with Permutation-Free Objectives]]</div>|<div class="cpsessionviewpapertitle">End-to-End Neural Speaker Diarization with Permutation-Free Objectives</div><div class="cpsessionviewpaperauthor">[[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192616.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-2|PAPER Thu-P-10-A-2 — Self Multi-Head Attention for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Self Multi-Head Attention for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Miquel India|AUTHOR Miquel India]], [[Pooyan Safari|AUTHOR Pooyan Safari]], [[Javier Hernando|AUTHOR Javier Hernando]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-3|PAPER Thu-P-10-A-3 — Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation]]</div>|<div class="cpsessionviewpapertitle">Phonetically-Aware Embeddings, Wide Residual Networks with Time-Delay Neural Networks and Self Attention Models for the 2018 NIST Speaker Recognition Evaluation</div><div class="cpsessionviewpaperauthor">[[Ignacio Viñals|AUTHOR Ignacio Viñals]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Victoria Mingote|AUTHOR Victoria Mingote]], [[Jorge Llombart|AUTHOR Jorge Llombart]], [[Pablo Gimeno|AUTHOR Pablo Gimeno]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192168.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-4|PAPER Thu-P-10-A-4 — Variational Domain Adversarial Learning for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Variational Domain Adversarial Learning for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Youzhi Tu|AUTHOR Youzhi Tu]], [[Man-Wai Mak|AUTHOR Man-Wai Mak]], [[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191994.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-5|PAPER Thu-P-10-A-5 — A Unified Framework for Speaker and Utterance Verification]]</div>|<div class="cpsessionviewpapertitle">A Unified Framework for Speaker and Utterance Verification</div><div class="cpsessionviewpaperauthor">[[Tianchi Liu|AUTHOR Tianchi Liu]], [[Maulik Madhavi|AUTHOR Maulik Madhavi]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191808.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-6|PAPER Thu-P-10-A-6 — Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems]]</div>|<div class="cpsessionviewpapertitle">Analysis of Critical Metadata Factors for the Calibration of Speaker Recognition Systems</div><div class="cpsessionviewpaperauthor">[[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]], [[Luciana Ferrer|AUTHOR Luciana Ferrer]], [[Mitchell McLaren|AUTHOR Mitchell McLaren]], [[Diego Castan|AUTHOR Diego Castan]], [[Aaron Lawson|AUTHOR Aaron Lawson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-7|PAPER Thu-P-10-A-7 — Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">Factorization of Discriminatively Trained i-Vector Extractor for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Ondřej Novotný|AUTHOR Ondřej Novotný]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]], [[Lukáš Burget|AUTHOR Lukáš Burget]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-8|PAPER Thu-P-10-A-8 — End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks]]</div>|<div class="cpsessionviewpapertitle">End-to-End Speaker Identification in Noisy and Reverberant Environments Using Raw Waveform Convolutional Neural Networks</div><div class="cpsessionviewpaperauthor">[[Daniele Salvati|AUTHOR Daniele Salvati]], [[Carlo Drioli|AUTHOR Carlo Drioli]], [[Gian Luca Foresti|AUTHOR Gian Luca Foresti]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192280.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-9|PAPER Thu-P-10-A-9 — Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Whisper to Neutral Mapping Using Cosine Similarity Maximization in i-Vector Space for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Abinay Reddy Naini|AUTHOR Abinay Reddy Naini]], [[Achuth Rao M.V.|AUTHOR Achuth Rao M.V.]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192250.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-10|PAPER Thu-P-10-A-10 — Mixup Learning Strategies for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Mixup Learning Strategies for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Yingke Zhu|AUTHOR Yingke Zhu]], [[Tom Ko|AUTHOR Tom Ko]], [[Brian Mak|AUTHOR Brian Mak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-11|PAPER Thu-P-10-A-11 — Optimizing a Speaker Embedding Extractor Through Backend-Driven Regularization]]</div>|<div class="cpsessionviewpapertitle">Optimizing a Speaker Embedding Extractor Through Backend-Driven Regularization</div><div class="cpsessionviewpaperauthor">[[Luciana Ferrer|AUTHOR Luciana Ferrer]], [[Mitchell McLaren|AUTHOR Mitchell McLaren]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191517.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-12|PAPER Thu-P-10-A-12 — The NEC-TT 2018 Speaker Verification System]]</div>|<div class="cpsessionviewpapertitle">The NEC-TT 2018 Speaker Verification System</div><div class="cpsessionviewpaperauthor">[[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]], [[Koji Okabe|AUTHOR Koji Okabe]], [[Qiongqiong Wang|AUTHOR Qiongqiong Wang]], [[Ling Guo|AUTHOR Ling Guo]], [[Takafumi Koshinaka|AUTHOR Takafumi Koshinaka]], [[Jiacen Zhang|AUTHOR Jiacen Zhang]], [[Koichi Shinoda|AUTHOR Koichi Shinoda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191440.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-13|PAPER Thu-P-10-A-13 — Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Autoencoder-Based Semi-Supervised Curriculum Learning for Out-of-Domain Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Siqi Zheng|AUTHOR Siqi Zheng]], [[Gang Liu|AUTHOR Gang Liu]], [[Hongbin Suo|AUTHOR Hongbin Suo]], [[Yun Lei|AUTHOR Yun Lei]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-14|PAPER Thu-P-10-A-14 — Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment]]</div>|<div class="cpsessionviewpapertitle">Multi-Channel Training for End-to-End Speaker Recognition Under Reverberant and Noisy Environment</div><div class="cpsessionviewpaperauthor">[[Danwei Cai|AUTHOR Danwei Cai]], [[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191436.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-A-15|PAPER Thu-P-10-A-15 — The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation]]</div>|<div class="cpsessionviewpapertitle">The DKU-SMIIP System for NIST 2018 Speaker Recognition Evaluation</div><div class="cpsessionviewpaperauthor">[[Danwei Cai|AUTHOR Danwei Cai]], [[Weicheng Cai|AUTHOR Weicheng Cai]], [[Ming Li|AUTHOR Ming Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Martin Karafiát|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-1|PAPER Thu-P-10-B-1 — Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings]]</div>|<div class="cpsessionviewpapertitle">Pretraining by Backtranslation for End-to-End ASR in Low-Resource Settings</div><div class="cpsessionviewpaperauthor">[[Matthew Wiesner|AUTHOR Matthew Wiesner]], [[Adithya Renduchintala|AUTHOR Adithya Renduchintala]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Chunxi Liu|AUTHOR Chunxi Liu]], [[Najim Dehak|AUTHOR Najim Dehak]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193173.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-2|PAPER Thu-P-10-B-2 — Cross-Attention End-to-End ASR for Two-Party Conversations]]</div>|<div class="cpsessionviewpapertitle">Cross-Attention End-to-End ASR for Two-Party Conversations</div><div class="cpsessionviewpaperauthor">[[Suyoun Kim|AUTHOR Suyoun Kim]], [[Siddharth Dalmia|AUTHOR Siddharth Dalmia]], [[Florian Metze|AUTHOR Florian Metze]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192720.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-3|PAPER Thu-P-10-B-3 — Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees]]</div>|<div class="cpsessionviewpapertitle">Towards Using Context-Dependent Symbols in CTC Without State-Tying Decision Trees</div><div class="cpsessionviewpaperauthor">[[Jan Chorowski|AUTHOR Jan Chorowski]], [[Adrian Łańcucki|AUTHOR Adrian Łańcucki]], [[Bartosz Kostka|AUTHOR Bartosz Kostka]], [[Michał Zapotoczny|AUTHOR Michał Zapotoczny]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-4|PAPER Thu-P-10-B-4 — An Online Attention-Based Model for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">An Online Attention-Based Model for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ruchao Fan|AUTHOR Ruchao Fan]], [[Pan Zhou|AUTHOR Pan Zhou]], [[Wei Chen|AUTHOR Wei Chen]], [[Jia Jia|AUTHOR Jia Jia]], [[Gang Liu|AUTHOR Gang Liu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192203.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-5|PAPER Thu-P-10-B-5 — Self-Attention Transducers for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Self-Attention Transducers for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Ye Bai|AUTHOR Ye Bai]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192112.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-6|PAPER Thu-P-10-B-6 — Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation]]</div>|<div class="cpsessionviewpapertitle">Improving Transformer-Based Speech Recognition Systems with Compressed Structure and Speech Attributes Augmentation</div><div class="cpsessionviewpaperauthor">[[Sheng Li|AUTHOR Sheng Li]], [[Dabre Raj|AUTHOR Dabre Raj]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Peng Shen|AUTHOR Peng Shen]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-7|PAPER Thu-P-10-B-7 — Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Extending an Acoustic Data-Driven Phone Set for Spontaneous Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jeong-Uk Bang|AUTHOR Jeong-Uk Bang]], [[Mu-Yeol Choi|AUTHOR Mu-Yeol Choi]], [[Sang-Hun Kim|AUTHOR Sang-Hun Kim]], [[Oh-Wook Kwon|AUTHOR Oh-Wook Kwon]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191558.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-8|PAPER Thu-P-10-B-8 — Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Joint Maximization Decoder with Neural Converters for Fully Neural Network-Based Japanese Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Jian Wang|AUTHOR Jian Wang]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Yusuke Shinohara|AUTHOR Yusuke Shinohara]], [[Yoshikazu Yamaguchi|AUTHOR Yoshikazu Yamaguchi]], [[Yushi Aono|AUTHOR Yushi Aono]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191539.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-9|PAPER Thu-P-10-B-9 — Real to H-Space Encoder for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Real to H-Space Encoder for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Titouan Parcollet|AUTHOR Titouan Parcollet]], [[Mohamed Morchid|AUTHOR Mohamed Morchid]], [[Georges Linarès|AUTHOR Georges Linarès]], [[Renato De Mori|AUTHOR Renato De Mori]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-10|PAPER Thu-P-10-B-10 —  Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle"> Ectc-Docd: An End-to-End Structure with CTC Encoder and OCD Decoder for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Cheng Yi|AUTHOR Cheng Yi]], [[Feng Wang|AUTHOR Feng Wang]], [[Bo Xu|AUTHOR Bo Xu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-B-11|PAPER Thu-P-10-B-11 — End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning]]</div>|<div class="cpsessionviewpapertitle">End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning</div><div class="cpsessionviewpaperauthor">[[Pavel Denisov|AUTHOR Pavel Denisov]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Éva Székely|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-1|PAPER Thu-P-10-C-1 — Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Pre-Trained Text Embeddings for Enhanced Text-to-Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Tomoki Hayashi|AUTHOR Tomoki Hayashi]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Tomoki Toda|AUTHOR Tomoki Toda]], [[Kazuya Takeda|AUTHOR Kazuya Takeda]], [[Shubham Toshniwal|AUTHOR Shubham Toshniwal]], [[Karen Livescu|AUTHOR Karen Livescu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192836.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-2|PAPER Thu-P-10-C-2 — Spontaneous Conversational Speech Synthesis from Found Data]]</div>|<div class="cpsessionviewpapertitle">Spontaneous Conversational Speech Synthesis from Found Data</div><div class="cpsessionviewpaperauthor">[[Éva Székely|AUTHOR Éva Székely]], [[Gustav Eje Henter|AUTHOR Gustav Eje Henter]], [[Jonas Beskow|AUTHOR Jonas Beskow]], [[Joakim Gustafson|AUTHOR Joakim Gustafson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192571.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-3|PAPER Thu-P-10-C-3 — Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech]]</div>|<div class="cpsessionviewpapertitle">Fine-Grained Robust Prosody Transfer for Single-Speaker Neural Text-To-Speech</div><div class="cpsessionviewpaperauthor">[[Viacheslav Klimkov|AUTHOR Viacheslav Klimkov]], [[Srikanth Ronanki|AUTHOR Srikanth Ronanki]], [[Jonas Rohnke|AUTHOR Jonas Rohnke]], [[Thomas Drugman|AUTHOR Thomas Drugman]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192521.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-4|PAPER Thu-P-10-C-4 — Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction]]</div>|<div class="cpsessionviewpapertitle">Speech Driven Backchannel Generation Using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction</div><div class="cpsessionviewpaperauthor">[[Nusrah Hussain|AUTHOR Nusrah Hussain]], [[Engin Erzin|AUTHOR Engin Erzin]], [[T. Metin Sezgin|AUTHOR T. Metin Sezgin]], [[Yücel Yemez|AUTHOR Yücel Yemez]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192497.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-5|PAPER Thu-P-10-C-5 — Semi-Supervised Prosody Modeling Using Deep Gaussian Process Latent Variable Model]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Prosody Modeling Using Deep Gaussian Process Latent Variable Model</div><div class="cpsessionviewpaperauthor">[[Tomoki Koriyama|AUTHOR Tomoki Koriyama]], [[Takao Kobayashi|AUTHOR Takao Kobayashi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192367.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-6|PAPER Thu-P-10-C-6 — Bootstrapping a Text Normalization System for an Inflected Language. Numbers as a Test Case]]</div>|<div class="cpsessionviewpapertitle">Bootstrapping a Text Normalization System for an Inflected Language. Numbers as a Test Case</div><div class="cpsessionviewpaperauthor">[[Anna Björk Nikulásdóttir|AUTHOR Anna Björk Nikulásdóttir]], [[Jón Guðnason|AUTHOR Jón Guðnason]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-7|PAPER Thu-P-10-C-7 — Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS]]</div>|<div class="cpsessionviewpapertitle">Exploiting Syntactic Features in a Parsed Tree to Improve End-to-End TTS</div><div class="cpsessionviewpaperauthor">[[Haohan Guo|AUTHOR Haohan Guo]], [[Frank K. Soong|AUTHOR Frank K. Soong]], [[Lei He|AUTHOR Lei He]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-8|PAPER Thu-P-10-C-8 — Duration Modeling with Global Phoneme-Duration Vectors]]</div>|<div class="cpsessionviewpapertitle">Duration Modeling with Global Phoneme-Duration Vectors</div><div class="cpsessionviewpaperauthor">[[Jinfu Ni|AUTHOR Jinfu Ni]], [[Yoshinori Shiga|AUTHOR Yoshinori Shiga]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191945.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-9|PAPER Thu-P-10-C-9 — Improving Speech Synthesis with Discourse Relations]]</div>|<div class="cpsessionviewpapertitle">Improving Speech Synthesis with Discourse Relations</div><div class="cpsessionviewpaperauthor">[[Adèle Aubin|AUTHOR Adèle Aubin]], [[Alessandra Cervone|AUTHOR Alessandra Cervone]], [[Oliver Watts|AUTHOR Oliver Watts]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-10|PAPER Thu-P-10-C-10 — Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis]]</div>|<div class="cpsessionviewpapertitle">Visualization and Interpretation of Latent Spaces for Controlling Expressive Speech Synthesis Through Audio Analysis</div><div class="cpsessionviewpaperauthor">[[Noé Tits|AUTHOR Noé Tits]], [[Fengna Wang|AUTHOR Fengna Wang]], [[Kevin El Haddad|AUTHOR Kevin El Haddad]], [[Vincent Pagel|AUTHOR Vincent Pagel]], [[Thierry Dutoit|AUTHOR Thierry Dutoit]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191418.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-11|PAPER Thu-P-10-C-11 — Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Pre-Trained Text Representations for Improving Front-End Text Processing in Mandarin Text-to-Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Bing Yang|AUTHOR Bing Yang]], [[Jiaqi Zhong|AUTHOR Jiaqi Zhong]], [[Shan Liu|AUTHOR Shan Liu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191400.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-12|PAPER Thu-P-10-C-12 — A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning]]</div>|<div class="cpsessionviewpapertitle">A Mandarin Prosodic Boundary Prediction Model Based on Multi-Task Learning</div><div class="cpsessionviewpaperauthor">[[Huashan Pan|AUTHOR Huashan Pan]], [[Xiulin Li|AUTHOR Xiulin Li]], [[Zhiqiang Huang|AUTHOR Zhiqiang Huang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191135.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-13|PAPER Thu-P-10-C-13 — Dual Encoder Classifier Models as Constraints in Neural Text Normalization]]</div>|<div class="cpsessionviewpapertitle">Dual Encoder Classifier Models as Constraints in Neural Text Normalization</div><div class="cpsessionviewpaperauthor">[[Ajda Gokcen|AUTHOR Ajda Gokcen]], [[Hao Zhang|AUTHOR Hao Zhang]], [[Richard Sproat|AUTHOR Richard Sproat]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-14|PAPER Thu-P-10-C-14 — Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Knowledge-Based Linguistic Encoding for End-to-End Mandarin Text-to-Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Jingbei Li|AUTHOR Jingbei Li]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Runnan Li|AUTHOR Runnan Li]], [[Pengpeng Zhi|AUTHOR Pengpeng Zhi]], [[Song Yang|AUTHOR Song Yang]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-C-15|PAPER Thu-P-10-C-15 — Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks]]</div>|<div class="cpsessionviewpapertitle">Automated Emotion Morphing in Speech Based on Diffeomorphic Curve Registration and Highway Networks</div><div class="cpsessionviewpaperauthor">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Hsi-Wei Hsieh|AUTHOR Hsi-Wei Hsieh]], [[Nicolas Charon|AUTHOR Nicolas Charon]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Laureano Moro-Velazquez|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193126.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-1|PAPER Thu-P-10-D-1 — Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)]]</div>|<div class="cpsessionviewpapertitle">Use of Beiwe Smartphone App to Identify and Track Speech Decline in Amyotrophic Lateral Sclerosis (ALS)</div><div class="cpsessionviewpaperauthor">[[Kathryn P. Connaghan|AUTHOR Kathryn P. Connaghan]], [[Jordan R. Green|AUTHOR Jordan R. Green]], [[Sabrina Paganoni|AUTHOR Sabrina Paganoni]], [[James Chan|AUTHOR James Chan]], [[Harli Weber|AUTHOR Harli Weber]], [[Ella Collins|AUTHOR Ella Collins]], [[Brian Richburg|AUTHOR Brian Richburg]], [[Marziye Eshghi|AUTHOR Marziye Eshghi]], [[J.P. Onnela|AUTHOR J.P. Onnela]], [[James D. Berry|AUTHOR James D. Berry]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192911.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-2|PAPER Thu-P-10-D-2 — Profiling Speech Motor Impairments in Persons with Amyotrophic Lateral Sclerosis: An Acoustic-Based Approach]]</div>|<div class="cpsessionviewpapertitle">Profiling Speech Motor Impairments in Persons with Amyotrophic Lateral Sclerosis: An Acoustic-Based Approach</div><div class="cpsessionviewpaperauthor">[[Hannah P. Rowe|AUTHOR Hannah P. Rowe]], [[Jordan R. Green|AUTHOR Jordan R. Green]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-3|PAPER Thu-P-10-D-3 — Diagnosing Dysarthria with Long Short-Term Memory Networks]]</div>|<div class="cpsessionviewpapertitle">Diagnosing Dysarthria with Long Short-Term Memory Networks</div><div class="cpsessionviewpaperauthor">[[Alex Mayle|AUTHOR Alex Mayle]], [[Zhiwei Mou|AUTHOR Zhiwei Mou]], [[Razvan Bunescu|AUTHOR Razvan Bunescu]], [[Sadegh Mirshekarian|AUTHOR Sadegh Mirshekarian]], [[Li Xu|AUTHOR Li Xu]], [[Chang Liu|AUTHOR Chang Liu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192604.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-4|PAPER Thu-P-10-D-4 — Modification of Devoicing Error in Cleft Lip and Palate Speech]]</div>|<div class="cpsessionviewpapertitle">Modification of Devoicing Error in Cleft Lip and Palate Speech</div><div class="cpsessionviewpaperauthor">[[Protima Nomo Sudro|AUTHOR Protima Nomo Sudro]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192546.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-5|PAPER Thu-P-10-D-5 — Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis]]</div>|<div class="cpsessionviewpapertitle">Reduced Task Adaptation in Alternating Motion Rate Tasks as an Early Marker of Bulbar Involvement in Amyotrophic Lateral Sclerosis</div><div class="cpsessionviewpaperauthor">[[Marziye Eshghi|AUTHOR Marziye Eshghi]], [[Panying Rong|AUTHOR Panying Rong]], [[Antje S. Mefferd|AUTHOR Antje S. Mefferd]], [[Kaila L. Stipancic|AUTHOR Kaila L. Stipancic]], [[Yana Yunusova|AUTHOR Yana Yunusova]], [[Jordan R. Green|AUTHOR Jordan R. Green]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192453.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-6|PAPER Thu-P-10-D-6 — Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database]]</div>|<div class="cpsessionviewpapertitle">Towards the Speech Features of Early-Stage Dementia: Design and Application of the Mandarin Elderly Cognitive Speech Database</div><div class="cpsessionviewpaperauthor">[[Tianqi Wang|AUTHOR Tianqi Wang]], [[Quanlei Yan|AUTHOR Quanlei Yan]], [[Jingshen Pan|AUTHOR Jingshen Pan]], [[Feiqi Zhu|AUTHOR Feiqi Zhu]], [[Rongfeng Su|AUTHOR Rongfeng Su]], [[Yi Guo|AUTHOR Yi Guo]], [[Lan Wang|AUTHOR Lan Wang]], [[Nan Yan|AUTHOR Nan Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192432.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-7|PAPER Thu-P-10-D-7 — Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage]]</div>|<div class="cpsessionviewpapertitle">Acoustic Characteristics of Lexical Tone Disruption in Mandarin Speakers After Brain Damage</div><div class="cpsessionviewpaperauthor">[[Wenjun Chen|AUTHOR Wenjun Chen]], [[Jeroen van de Weijer|AUTHOR Jeroen van de Weijer]], [[Shuangshuang Zhu|AUTHOR Shuangshuang Zhu]], [[Qian Qian|AUTHOR Qian Qian]], [[Manna Wang|AUTHOR Manna Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192389.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-8|PAPER Thu-P-10-D-8 — Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS]]</div>|<div class="cpsessionviewpapertitle">Intragestural Variation in Natural Sentence Production: Essential Tremor Patients Treated with DBS</div><div class="cpsessionviewpaperauthor">[[Anne Hermes|AUTHOR Anne Hermes]], [[Doris Mücke|AUTHOR Doris Mücke]], [[Tabea Thies|AUTHOR Tabea Thies]], [[Michael T. Barbe|AUTHOR Michael T. Barbe]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-9|PAPER Thu-P-10-D-9 — Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech]]</div>|<div class="cpsessionviewpapertitle">Nasal Air Emission in Sibilant Fricatives of Cleft Lip and Palate Speech</div><div class="cpsessionviewpaperauthor">[[Sishir Kalita|AUTHOR Sishir Kalita]], [[Protima Nomo Sudro|AUTHOR Protima Nomo Sudro]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]], [[S. Dandapat|AUTHOR S. Dandapat]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192194.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-10|PAPER Thu-P-10-D-10 — Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech]]</div>|<div class="cpsessionviewpapertitle">Parallel vs. Non-Parallel Voice Conversion for Esophageal Speech</div><div class="cpsessionviewpaperauthor">[[Luis Serrano|AUTHOR Luis Serrano]], [[Sneha Raman|AUTHOR Sneha Raman]], [[David Tavarez|AUTHOR David Tavarez]], [[Eva Navas|AUTHOR Eva Navas]], [[Inma Hernaez|AUTHOR Inma Hernaez]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192151.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-11|PAPER Thu-P-10-D-11 — Hypernasality Severity Detection Using Constant Q Cepstral Coefficients]]</div>|<div class="cpsessionviewpapertitle">Hypernasality Severity Detection Using Constant Q Cepstral Coefficients</div><div class="cpsessionviewpaperauthor">[[Akhilesh Kumar Dubey|AUTHOR Akhilesh Kumar Dubey]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]], [[S. Dandapat|AUTHOR S. Dandapat]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191617.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-12|PAPER Thu-P-10-D-12 — Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling]]</div>|<div class="cpsessionviewpapertitle">Automatic Depression Level Detection via ℓ,,p,,-Norm Pooling</div><div class="cpsessionviewpaperauthor">[[Mingyue Niu|AUTHOR Mingyue Niu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Cunhang Fan|AUTHOR Cunhang Fan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191285.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-D-13|PAPER Thu-P-10-D-13 — Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis]]</div>|<div class="cpsessionviewpapertitle">Comparison of Speech Tasks and Recording Devices for Voice Based Automatic Classification of Healthy Subjects and Patients with Amyotrophic Lateral Sclerosis</div><div class="cpsessionviewpaperauthor">[[Suhas B.N.|AUTHOR Suhas B.N.]], [[Deep Patel|AUTHOR Deep Patel]], [[Nithin Rao|AUTHOR Nithin Rao]], [[Yamini Belur|AUTHOR Yamini Belur]], [[Pradeep Reddy|AUTHOR Pradeep Reddy]], [[Nalini Atchayaram|AUTHOR Nalini Atchayaram]], [[Ravi Yadav|AUTHOR Ravi Yadav]], [[Dipanjan Gope|AUTHOR Dipanjan Gope]], [[Prasanta Kumar Ghosh|AUTHOR Prasanta Kumar Ghosh]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193242.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-1|PAPER Thu-P-10-E-1 — A Modified Algorithm for Multiple Input Spectrogram Inversion]]</div>|<div class="cpsessionviewpapertitle">A Modified Algorithm for Multiple Input Spectrogram Inversion</div><div class="cpsessionviewpaperauthor">[[Dongxiao Wang|AUTHOR Dongxiao Wang]], [[Hirokazu Kameoka|AUTHOR Hirokazu Kameoka]], [[Koichi Shinoda|AUTHOR Koichi Shinoda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-2|PAPER Thu-P-10-E-2 — A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation]]</div>|<div class="cpsessionviewpapertitle">A Comprehensive Study of Speech Separation: Spectrogram vs Waveform Separation</div><div class="cpsessionviewpaperauthor">[[Fahimeh Bahmaninezhad|AUTHOR Fahimeh Bahmaninezhad]], [[Jian Wu|AUTHOR Jian Wu]], [[Rongzhi Gu|AUTHOR Rongzhi Gu]], [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]], [[Yong Xu|AUTHOR Yong Xu]], [[Meng Yu|AUTHOR Meng Yu]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192671.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-3|PAPER Thu-P-10-E-3 — Evaluating Audiovisual Source Separation in the Context of Video Conferencing]]</div>|<div class="cpsessionviewpapertitle">Evaluating Audiovisual Source Separation in the Context of Video Conferencing</div><div class="cpsessionviewpaperauthor">[[Berkay İnan|AUTHOR Berkay İnan]], [[Milos Cernak|AUTHOR Milos Cernak]], [[Helmut Grabner|AUTHOR Helmut Grabner]], [[Helena Peic Tukuljac|AUTHOR Helena Peic Tukuljac]], [[Rodrigo C.G. Pena|AUTHOR Rodrigo C.G. Pena]], [[Benjamin Ricaud|AUTHOR Benjamin Ricaud]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192459.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-4|PAPER Thu-P-10-E-4 — Influence of Speaker-Specific Parameters on Speech Separation Systems]]</div>|<div class="cpsessionviewpapertitle">Influence of Speaker-Specific Parameters on Speech Separation Systems</div><div class="cpsessionviewpaperauthor">[[David Ditter|AUTHOR David Ditter]], [[Timo Gerkmann|AUTHOR Timo Gerkmann]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192423.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-5|PAPER Thu-P-10-E-5 — CNN-LSTM Models for Multi-Speaker Source Separation Using Bayesian Hyper Parameter Optimization]]</div>|<div class="cpsessionviewpapertitle">CNN-LSTM Models for Multi-Speaker Source Separation Using Bayesian Hyper Parameter Optimization</div><div class="cpsessionviewpaperauthor">[[Jeroen Zegers|AUTHOR Jeroen Zegers]], [[Hugo Van hamme|AUTHOR Hugo Van hamme]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192169.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-6|PAPER Thu-P-10-E-6 — Towards Joint Sound Scene and Polyphonic Sound Event Recognition]]</div>|<div class="cpsessionviewpapertitle">Towards Joint Sound Scene and Polyphonic Sound Event Recognition</div><div class="cpsessionviewpaperauthor">[[Helen L. Bear|AUTHOR Helen L. Bear]], [[In^es Nolasco|AUTHOR In^es Nolasco]], [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191940.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-7|PAPER Thu-P-10-E-7 — Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features]]</div>|<div class="cpsessionviewpapertitle">Discriminative Learning for Monaural Speech Separation Using Deep Embedding Features</div><div class="cpsessionviewpaperauthor">[[Cunhang Fan|AUTHOR Cunhang Fan]], [[Bin Liu|AUTHOR Bin Liu]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191827.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-8|PAPER Thu-P-10-E-8 — Probabilistic Permutation Invariant Training for Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Probabilistic Permutation Invariant Training for Speech Separation</div><div class="cpsessionviewpaperauthor">[[Midia Yousefi|AUTHOR Midia Yousefi]], [[Soheil Khorram|AUTHOR Soheil Khorram]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191591.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-9|PAPER Thu-P-10-E-9 — Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Which Ones Are Speaking? Speaker-Inferred Model for Multi-Talker Speech Separation</div><div class="cpsessionviewpaperauthor">[[Jing Shi|AUTHOR Jing Shi]], [[Jiaming Xu|AUTHOR Jiaming Xu]], [[Bo Xu|AUTHOR Bo Xu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-10|PAPER Thu-P-10-E-10 — End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network]]</div>|<div class="cpsessionviewpapertitle">End-to-End Monaural Speech Separation with Multi-Scale Dynamic Weighted Gated Dilated Convolutional Pyramid Network</div><div class="cpsessionviewpaperauthor">[[Ziqiang Shi|AUTHOR Ziqiang Shi]], [[Huibin Lin|AUTHOR Huibin Lin]], [[Liu Liu|AUTHOR Liu Liu]], [[Rujie Liu|AUTHOR Rujie Liu]], [[Shoji Hayakawa|AUTHOR Shoji Hayakawa]], [[Shouji Harada|AUTHOR Shouji Harada]], [[Jiqing Han|AUTHOR Jiqing Han]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-10-E-11|PAPER Thu-P-10-E-11 — End-to-End Music Source Separation: Is it Possible in the Waveform Domain?]]</div>|<div class="cpsessionviewpapertitle">End-to-End Music Source Separation: Is it Possible in the Waveform Domain?</div><div class="cpsessionviewpaperauthor">[[Francesc Lluís|AUTHOR Francesc Lluís]], [[Jordi Pons|AUTHOR Jordi Pons]], [[Xavier Serra|AUTHOR Xavier Serra]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Sandro Cumani|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192983.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-1|PAPER Thu-P-9-A-1 — Adversarial Regularization for End-to-End Robust Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Adversarial Regularization for End-to-End Robust Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Qing Wang|AUTHOR Qing Wang]], [[Pengcheng Guo|AUTHOR Pengcheng Guo]], [[Sining Sun|AUTHOR Sining Sun]], [[Lei Xie|AUTHOR Lei Xie]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192974.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-2|PAPER Thu-P-9-A-2 — Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning]]</div>|<div class="cpsessionviewpapertitle">Combining Speaker Recognition and Metric Learning for Speaker-Dependent Representation Learning</div><div class="cpsessionviewpaperauthor">[[João Monteiro|AUTHOR João Monteiro]], [[Jahangir Alam|AUTHOR Jahangir Alam]], [[Tiago H. Falk|AUTHOR Tiago H. Falk]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192486.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-3|PAPER Thu-P-9-A-3 — VAE-Based Regularization for Deep Speaker Embedding]]</div>|<div class="cpsessionviewpapertitle">VAE-Based Regularization for Deep Speaker Embedding</div><div class="cpsessionviewpaperauthor">[[Yang Zhang|AUTHOR Yang Zhang]], [[Lantian Li|AUTHOR Lantian Li]], [[Dong Wang|AUTHOR Dong Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192437.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-4|PAPER Thu-P-9-A-4 — Language Recognition Using Triplet Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Language Recognition Using Triplet Neural Networks</div><div class="cpsessionviewpaperauthor">[[Victoria Mingote|AUTHOR Victoria Mingote]], [[Diego Castan|AUTHOR Diego Castan]], [[Mitchell McLaren|AUTHOR Mitchell McLaren]], [[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]], [[Antonio Miguel|AUTHOR Antonio Miguel]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192177.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-5|PAPER Thu-P-9-A-5 — Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Spatial Pyramid Encoding with Convex Length Normalization for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Youngmoon Jung|AUTHOR Youngmoon Jung]], [[Younggwan Kim|AUTHOR Younggwan Kim]], [[Hyungjun Lim|AUTHOR Hyungjun Lim]], [[Yeunju Choi|AUTHOR Yeunju Choi]], [[Hoirin Kim|AUTHOR Hoirin Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191986.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-6|PAPER Thu-P-9-A-6 — End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">End-to-End Losses Based on Speaker Basis Vectors and All-Speaker Hard Negative Mining for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[IL-Ho Yang|AUTHOR IL-Ho Yang]], [[Sung-Hyun Yoon|AUTHOR Sung-Hyun Yoon]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191606.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-7|PAPER Thu-P-9-A-7 — An Effective Deep Embedding Learning Architecture for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">An Effective Deep Embedding Learning Architecture for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Yiheng Jiang|AUTHOR Yiheng Jiang]], [[Yan Song|AUTHOR Yan Song]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Zhifu Gao|AUTHOR Zhifu Gao]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191542.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-8|PAPER Thu-P-9-A-8 — Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">Far-Field End-to-End Text-Dependent Speaker Verification Based on Mixed Training Data with Transfer Learning and Enrollment Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Danwei Cai|AUTHOR Danwei Cai]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191522.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-9|PAPER Thu-P-9-A-9 — Two-Stage Training for Chinese Dialect Recognition]]</div>|<div class="cpsessionviewpapertitle">Two-Stage Training for Chinese Dialect Recognition</div><div class="cpsessionviewpaperauthor">[[Zongze Ren|AUTHOR Zongze Ren]], [[Guofu Yang|AUTHOR Guofu Yang]], [[Shugong Xu|AUTHOR Shugong Xu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191510.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-10|PAPER Thu-P-9-A-10 — Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Investigation on Blind Bandwidth Extension with a Non-Linear Function and its Evaluation of x-Vector-Based Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Ryota Kaminishi|AUTHOR Ryota Kaminishi]], [[Haruna Miyamoto|AUTHOR Haruna Miyamoto]], [[Sayaka Shiota|AUTHOR Sayaka Shiota]], [[Hitoshi Kiya|AUTHOR Hitoshi Kiya]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191444.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-11|PAPER Thu-P-9-A-11 — Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Auto-Encoding Nearest Neighbor i-Vectors for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Umair Khan|AUTHOR Umair Khan]], [[Miquel India|AUTHOR Miquel India]], [[Javier Hernando|AUTHOR Javier Hernando]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191442.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-12|PAPER Thu-P-9-A-12 — Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number]]</div>|<div class="cpsessionviewpapertitle">Towards a Fault-Tolerant Speaker Verification System: A Regularization Approach to Reduce the Condition Number</div><div class="cpsessionviewpaperauthor">[[Siqi Zheng|AUTHOR Siqi Zheng]], [[Gang Liu|AUTHOR Gang Liu]], [[Hongbin Suo|AUTHOR Hongbin Suo]], [[Yun Lei|AUTHOR Yun Lei]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191428.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-13|PAPER Thu-P-9-A-13 — Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments]]</div>|<div class="cpsessionviewpapertitle">Deep Learning Based Multi-Channel Speaker Recognition in Noisy and Reverberant Environments</div><div class="cpsessionviewpaperauthor">[[Hassan Taherian|AUTHOR Hassan Taherian]], [[Zhong-Qiu Wang|AUTHOR Zhong-Qiu Wang]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191356.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-14|PAPER Thu-P-9-A-14 — Joint Optimization of Neural Acoustic Beamforming and Dereverberation with x-Vectors for Robust Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Joint Optimization of Neural Acoustic Beamforming and Dereverberation with x-Vectors for Robust Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Joon-Young Yang|AUTHOR Joon-Young Yang]], [[Joon-Hyuk Chang|AUTHOR Joon-Hyuk Chang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191256.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-A-15|PAPER Thu-P-9-A-15 — A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification]]</div>|<div class="cpsessionviewpapertitle">A New Time-Frequency Attention Mechanism for TDNN and CNN-LSTM-TDNN, with Application to Language Identification</div><div class="cpsessionviewpaperauthor">[[Xiaoxiao Miao|AUTHOR Xiaoxiao Miao]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Yonghong Yan|AUTHOR Yonghong Yan]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192872.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-1|PAPER Thu-P-9-B-1 — An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech]]</div>|<div class="cpsessionviewpapertitle">An Attention-Based Hybrid Network for Automatic Detection of Alzheimer’s Disease from Narrative Speech</div><div class="cpsessionviewpaperauthor">[[Jun Chen|AUTHOR Jun Chen]], [[Ji Zhu|AUTHOR Ji Zhu]], [[Jieping Ye|AUTHOR Jieping Ye]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192726.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-2|PAPER Thu-P-9-B-2 — Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Investigating the Lombard Effect Influence on End-to-End Audio-Visual Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Pingchuan Ma|AUTHOR Pingchuan Ma]], [[Stavros Petridis|AUTHOR Stavros Petridis]], [[Maja Pantic|AUTHOR Maja Pantic]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192118.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-3|PAPER Thu-P-9-B-3 — “Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers]]</div>|<div class="cpsessionviewpapertitle">“Computer, Test My Hearing”: Accurate Speech Audiometry with Smart Speakers</div><div class="cpsessionviewpaperauthor">[[Jasper Ooster|AUTHOR Jasper Ooster]], [[Pia Nancy Porysek Moreta|AUTHOR Pia Nancy Porysek Moreta]], [[Jörg-Hendrik Bach|AUTHOR Jörg-Hendrik Bach]], [[Inga Holube|AUTHOR Inga Holube]], [[Bernd T. Meyer|AUTHOR Bernd T. Meyer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191804.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-4|PAPER Thu-P-9-B-4 — Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings]]</div>|<div class="cpsessionviewpapertitle">Synchronising Audio and Ultrasound by Learning Cross-Modal Embeddings</div><div class="cpsessionviewpaperauthor">[[Aciel Eshky|AUTHOR Aciel Eshky]], [[Manuel Sam Ribeiro|AUTHOR Manuel Sam Ribeiro]], [[Korin Richmond|AUTHOR Korin Richmond]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191799.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-5|PAPER Thu-P-9-B-5 — Automatic Hierarchical Attention Neural Network for Detecting AD]]</div>|<div class="cpsessionviewpapertitle">Automatic Hierarchical Attention Neural Network for Detecting AD</div><div class="cpsessionviewpaperauthor">[[Yilin Pan|AUTHOR Yilin Pan]], [[Bahman Mirheidari|AUTHOR Bahman Mirheidari]], [[Markus Reuber|AUTHOR Markus Reuber]], [[Annalena Venneri|AUTHOR Annalena Venneri]], [[Daniel Blackburn|AUTHOR Daniel Blackburn]], [[Heidi Christensen|AUTHOR Heidi Christensen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191796.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-6|PAPER Thu-P-9-B-6 — Deep Sensing of Breathing Signal During Conversational Speech]]</div>|<div class="cpsessionviewpapertitle">Deep Sensing of Breathing Signal During Conversational Speech</div><div class="cpsessionviewpaperauthor">[[Venkata Srikanth Nallanthighal|AUTHOR Venkata Srikanth Nallanthighal]], [[Aki Härmä|AUTHOR Aki Härmä]], [[Helmer Strik|AUTHOR Helmer Strik]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191789.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-7|PAPER Thu-P-9-B-7 — Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Parrotron: An End-to-End Speech-to-Speech Conversion Model and its Applications to Hearing-Impaired Speech and Speech Separation</div><div class="cpsessionviewpaperauthor">[[Fadi Biadsy|AUTHOR Fadi Biadsy]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Pedro J. Moreno|AUTHOR Pedro J. Moreno]], [[Dimitri Kanvesky|AUTHOR Dimitri Kanvesky]], [[Ye Jia|AUTHOR Ye Jia]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191536.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-8|PAPER Thu-P-9-B-8 — Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploiting Visual Features Using Bayesian Gated Neural Networks for Disordered Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shansong Liu|AUTHOR Shansong Liu]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Yi Wang|AUTHOR Yi Wang]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Rongfeng Su|AUTHOR Rongfeng Su]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191445.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-9|PAPER Thu-P-9-B-9 — Video-Driven Speech Reconstruction Using Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Video-Driven Speech Reconstruction Using Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Konstantinos Vougioukas|AUTHOR Konstantinos Vougioukas]], [[Pingchuan Ma|AUTHOR Pingchuan Ma]], [[Stavros Petridis|AUTHOR Stavros Petridis]], [[Maja Pantic|AUTHOR Maja Pantic]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192609.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-10|PAPER Thu-P-9-B-10 — On the Use of Pitch Features for Disordered Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">On the Use of Pitch Features for Disordered Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shansong Liu|AUTHOR Shansong Liu]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191669.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-B-11|PAPER Thu-P-9-B-11 — Large-Scale Visual Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Large-Scale Visual Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Brendan Shillingford|AUTHOR Brendan Shillingford]], [[Yannis Assael|AUTHOR Yannis Assael]], [[Matthew W. Hoffman|AUTHOR Matthew W. Hoffman]], [[Thomas Paine|AUTHOR Thomas Paine]], [[Cían Hughes|AUTHOR Cían Hughes]], [[Utsav Prabhu|AUTHOR Utsav Prabhu]], [[Hank Liao|AUTHOR Hank Liao]], [[Hasim Sak|AUTHOR Hasim Sak]], [[Kanishka Rao|AUTHOR Kanishka Rao]], [[Lorrayne Bennett|AUTHOR Lorrayne Bennett]], [[Marie Mulville|AUTHOR Marie Mulville]], [[Misha Denil|AUTHOR Misha Denil]], [[Ben Coppin|AUTHOR Ben Coppin]], [[Ben Laurie|AUTHOR Ben Laurie]], [[Andrew Senior|AUTHOR Andrew Senior]], [[Nando de Freitas|AUTHOR Nando de Freitas]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Helena Moniz|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193152.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-1|PAPER Thu-P-9-C-1 — Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation]]</div>|<div class="cpsessionviewpapertitle">Investigating Linguistic and Semantic Features for Turn-Taking Prediction in Open-Domain Human-Computer Conversation</div><div class="cpsessionviewpaperauthor">[[S. Zahra Razavi|AUTHOR S. Zahra Razavi]], [[Benjamin Kane|AUTHOR Benjamin Kane]], [[Lenhart K. Schubert|AUTHOR Lenhart K. Schubert]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193033.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-2|PAPER Thu-P-9-C-2 — Benchmarking Benchmarks: Introducing New Automatic Indicators for Benchmarking Spoken Language Understanding Corpora]]</div>|<div class="cpsessionviewpapertitle">Benchmarking Benchmarks: Introducing New Automatic Indicators for Benchmarking Spoken Language Understanding Corpora</div><div class="cpsessionviewpaperauthor">[[Frédéric Béchet|AUTHOR Frédéric Béchet]], [[Christian Raymond|AUTHOR Christian Raymond]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192270.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-3|PAPER Thu-P-9-C-3 — A Neural Turn-Taking Model without RNN]]</div>|<div class="cpsessionviewpapertitle">A Neural Turn-Taking Model without RNN</div><div class="cpsessionviewpaperauthor">[[Chaoran Liu|AUTHOR Chaoran Liu]], [[Carlos Ishi|AUTHOR Carlos Ishi]], [[Hiroshi Ishiguro|AUTHOR Hiroshi Ishiguro]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191826.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-4|PAPER Thu-P-9-C-4 — An Incremental Turn-Taking Model for Task-Oriented Dialog Systems]]</div>|<div class="cpsessionviewpapertitle">An Incremental Turn-Taking Model for Task-Oriented Dialog Systems</div><div class="cpsessionviewpaperauthor">[[Andrei C. Coman|AUTHOR Andrei C. Coman]], [[Koichiro Yoshino|AUTHOR Koichiro Yoshino]], [[Yukitoshi Murase|AUTHOR Yukitoshi Murase]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]], [[Giuseppe Riccardi|AUTHOR Giuseppe Riccardi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191696.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-5|PAPER Thu-P-9-C-5 — Personalized Dialogue Response Generation Learned from Monologues]]</div>|<div class="cpsessionviewpapertitle">Personalized Dialogue Response Generation Learned from Monologues</div><div class="cpsessionviewpaperauthor">[[Feng-Guang Su|AUTHOR Feng-Guang Su]], [[Aliyah R. Hsu|AUTHOR Aliyah R. Hsu]], [[Yi-Lin Tuan|AUTHOR Yi-Lin Tuan]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191592.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-6|PAPER Thu-P-9-C-6 — Voice Quality as a Turn-Taking Cue]]</div>|<div class="cpsessionviewpapertitle">Voice Quality as a Turn-Taking Cue</div><div class="cpsessionviewpaperauthor">[[Mattias Heldner|AUTHOR Mattias Heldner]], [[Marcin Włodarczak|AUTHOR Marcin Włodarczak]], [[Štefan Beňuš|AUTHOR Štefan Beňuš]], [[Agustín Gravano|AUTHOR Agustín Gravano]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191537.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-7|PAPER Thu-P-9-C-7 — Turn-Taking Prediction Based on Detection of Transition Relevance Place]]</div>|<div class="cpsessionviewpapertitle">Turn-Taking Prediction Based on Detection of Transition Relevance Place</div><div class="cpsessionviewpaperauthor">[[Kohei Hara|AUTHOR Kohei Hara]], [[Koji Inoue|AUTHOR Koji Inoue]], [[Katsuya Takanashi|AUTHOR Katsuya Takanashi]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191527.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-8|PAPER Thu-P-9-C-8 — Analysis of Effect and Timing of Fillers in Natural Turn-Taking]]</div>|<div class="cpsessionviewpapertitle">Analysis of Effect and Timing of Fillers in Natural Turn-Taking</div><div class="cpsessionviewpaperauthor">[[Divesh Lala|AUTHOR Divesh Lala]], [[Shizuka Nakamura|AUTHOR Shizuka Nakamura]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191313.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-9|PAPER Thu-P-9-C-9 — Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation]]</div>|<div class="cpsessionviewpapertitle">Multimodal Response Obligation Detection with Unsupervised Online Domain Adaptation</div><div class="cpsessionviewpaperauthor">[[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191300.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-C-10|PAPER Thu-P-9-C-10 — Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System]]</div>|<div class="cpsessionviewpapertitle">Follow-Up Question Generation Using Neural Tensor Network-Based Domain Ontology Population in an Interview Coaching System</div><div class="cpsessionviewpaperauthor">[[Ming-Hsiang Su|AUTHOR Ming-Hsiang Su]], [[Chung-Hsien Wu|AUTHOR Chung-Hsien Wu]], [[Yi Chang|AUTHOR Yi Chang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-1|PAPER Thu-P-9-D-1 — On the Role of Style in Parsing Speech with Neural Models]]</div>|<div class="cpsessionviewpapertitle">On the Role of Style in Parsing Speech with Neural Models</div><div class="cpsessionviewpaperauthor">[[Trang Tran|AUTHOR Trang Tran]], [[Jiahong Yuan|AUTHOR Jiahong Yuan]], [[Yang Liu|AUTHOR Yang Liu]], [[Mari Ostendorf|AUTHOR Mari Ostendorf]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193051.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-2|PAPER Thu-P-9-D-2 — On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval]]</div>|<div class="cpsessionviewpapertitle">On the Contributions of Visual and Textual Supervision in Low-Resource Semantic Speech Retrieval</div><div class="cpsessionviewpaperauthor">[[Ankita Pasad|AUTHOR Ankita Pasad]], [[Bowen Shi|AUTHOR Bowen Shi]], [[Herman Kamper|AUTHOR Herman Kamper]], [[Karen Livescu|AUTHOR Karen Livescu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-3|PAPER Thu-P-9-D-3 — Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of Off-Topic Spoken Responses Using Very Deep Convolutional Neural Networks</div><div class="cpsessionviewpaperauthor">[[Xinhao Wang|AUTHOR Xinhao Wang]], [[Su-Youn Yoon|AUTHOR Su-Youn Yoon]], [[Keelan Evanini|AUTHOR Keelan Evanini]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Yao Qian|AUTHOR Yao Qian]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191817.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-4|PAPER Thu-P-9-D-4 — Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings]]</div>|<div class="cpsessionviewpapertitle">Rescoring Keyword Search Confidence Estimates with Graph-Based Re-Ranking Using Acoustic Word Embeddings</div><div class="cpsessionviewpaperauthor">[[Anna Piunova|AUTHOR Anna Piunova]], [[Eugen Beck|AUTHOR Eugen Beck]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191749.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-5|PAPER Thu-P-9-D-5 — SpeechYOLO: Detection and Localization of Speech Objects]]</div>|<div class="cpsessionviewpapertitle">SpeechYOLO: Detection and Localization of Speech Objects</div><div class="cpsessionviewpaperauthor">[[Yael Segal|AUTHOR Yael Segal]], [[Tzeviya Sylvia Fuchs|AUTHOR Tzeviya Sylvia Fuchs]], [[Joseph Keshet|AUTHOR Joseph Keshet]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191621.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-6|PAPER Thu-P-9-D-6 — Prosodic Phrase Alignment for Machine Dubbing]]</div>|<div class="cpsessionviewpapertitle">Prosodic Phrase Alignment for Machine Dubbing</div><div class="cpsessionviewpaperauthor">[[Alp Öktem|AUTHOR Alp Öktem]], [[Mireia Farrús|AUTHOR Mireia Farrús]], [[Antonio Bonafonte|AUTHOR Antonio Bonafonte]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191553.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-7|PAPER Thu-P-9-D-7 — Spot the Pleasant People! Navigating the Cocktail Party Buzz]]</div>|<div class="cpsessionviewpapertitle">Spot the Pleasant People! Navigating the Cocktail Party Buzz</div><div class="cpsessionviewpaperauthor">[[Christina Tånnander|AUTHOR Christina Tånnander]], [[Per Fallgren|AUTHOR Per Fallgren]], [[Jens Edlund|AUTHOR Jens Edlund]], [[Joakim Gusafsson|AUTHOR Joakim Gusafsson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191417.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-8|PAPER Thu-P-9-D-8 — Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels]]</div>|<div class="cpsessionviewpapertitle">Neural Text Clustering with Document-Level Attention Based on Dynamic Soft Labels</div><div class="cpsessionviewpaperauthor">[[Zhi Chen|AUTHOR Zhi Chen]], [[Wu Guo|AUTHOR Wu Guo]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Jun Du|AUTHOR Jun Du]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191336.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-9|PAPER Thu-P-9-D-9 — Noisy BiLSTM-Based Models for Disfluency Detection]]</div>|<div class="cpsessionviewpapertitle">Noisy BiLSTM-Based Models for Disfluency Detection</div><div class="cpsessionviewpaperauthor">[[Nguyen Bach|AUTHOR Nguyen Bach]], [[Fei Huang|AUTHOR Fei Huang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-10|PAPER Thu-P-9-D-10 — Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search]]</div>|<div class="cpsessionviewpapertitle">Subword RNNLM Approximations for Out-Of-Vocabulary Keyword Search</div><div class="cpsessionviewpaperauthor">[[Mittul Singh|AUTHOR Mittul Singh]], [[Sami Virpioja|AUTHOR Sami Virpioja]], [[Peter Smit|AUTHOR Peter Smit]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191180.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-D-11|PAPER Thu-P-9-D-11 — Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint]]</div>|<div class="cpsessionviewpapertitle">Simultaneous Detection and Localization of a Wake-Up Word Using Multi-Task Learning of the Duration and Endpoint</div><div class="cpsessionviewpaperauthor">[[Takashi Maekaku|AUTHOR Takashi Maekaku]], [[Yusuke Kida|AUTHOR Yusuke Kida]], [[Akihiko Sugiyama|AUTHOR Akihiko Sugiyama]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Antonio Peinado|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-1|PAPER Thu-P-9-E-1 — On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks]]</div>|<div class="cpsessionviewpapertitle">On Mitigating Acoustic Feedback in Hearing Aids with Frequency Warping by All-Pass Networks</div><div class="cpsessionviewpaperauthor">[[Ching-Hua Lee|AUTHOR Ching-Hua Lee]], [[Kuan-Lin Chen|AUTHOR Kuan-Lin Chen]], [[fred harris|AUTHOR fred harris]], [[Bhaskar D. Rao|AUTHOR Bhaskar D. Rao]], [[Harinath Garudadri|AUTHOR Harinath Garudadri]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192908.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-2|PAPER Thu-P-9-E-2 — Deep Multitask Acoustic Echo Cancellation]]</div>|<div class="cpsessionviewpapertitle">Deep Multitask Acoustic Echo Cancellation</div><div class="cpsessionviewpaperauthor">[[Amin Fazel|AUTHOR Amin Fazel]], [[Mostafa El-Khamy|AUTHOR Mostafa El-Khamy]], [[Jungwon Lee|AUTHOR Jungwon Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192651.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-3|PAPER Thu-P-9-E-3 — Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions]]</div>|<div class="cpsessionviewpapertitle">Deep Learning for Joint Acoustic Echo and Noise Cancellation with Nonlinear Distortions</div><div class="cpsessionviewpaperauthor">[[Hao Zhang|AUTHOR Hao Zhang]], [[Ke Tan|AUTHOR Ke Tan]], [[DeLiang Wang|AUTHOR DeLiang Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192929.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-4|PAPER Thu-P-9-E-4 — Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction]]</div>|<div class="cpsessionviewpapertitle">Harmonic Beamformers for Non-Intrusive Speech Intelligibility Prediction</div><div class="cpsessionviewpaperauthor">[[Charlotte Sørensen|AUTHOR Charlotte Sørensen]], [[Jesper B. Boldt|AUTHOR Jesper B. Boldt]], [[Mads G. Christensen|AUTHOR Mads G. Christensen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191850.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-5|PAPER Thu-P-9-E-5 — Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients]]</div>|<div class="cpsessionviewpapertitle">Convolutional Neural Network-Based Speech Enhancement for Cochlear Implant Recipients</div><div class="cpsessionviewpaperauthor">[[Nursadul Mamun|AUTHOR Nursadul Mamun]], [[Soheil Khorram|AUTHOR Soheil Khorram]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191625.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-6|PAPER Thu-P-9-E-6 — Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech]]</div>|<div class="cpsessionviewpapertitle">Validation of the Non-Intrusive Codebook-Based Short Time Objective Intelligibility Metric for Processed Speech</div><div class="cpsessionviewpaperauthor">[[Charlotte Sørensen|AUTHOR Charlotte Sørensen]], [[Jesper B. Boldt|AUTHOR Jesper B. Boldt]], [[Mads G. Christensen|AUTHOR Mads G. Christensen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191381.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-7|PAPER Thu-P-9-E-7 — Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System]]</div>|<div class="cpsessionviewpapertitle">Predicting Speech Intelligibility of Enhanced Speech Using Phone Accuracy of DNN-Based ASR System</div><div class="cpsessionviewpaperauthor">[[Kenichi Arai|AUTHOR Kenichi Arai]], [[Shoko Araki|AUTHOR Shoko Araki]], [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]], [[Katsuhiko Yamamoto|AUTHOR Katsuhiko Yamamoto]], [[Toshio Irino|AUTHOR Toshio Irino]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-8|PAPER Thu-P-9-E-8 — A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR]]</div>|<div class="cpsessionviewpapertitle">A Novel Method to Correct Steering Vectors in MVDR Beamformer for Noise Robust ASR</div><div class="cpsessionviewpaperauthor">[[Suliang Bu|AUTHOR Suliang Bu]], [[Yunxin Zhao|AUTHOR Yunxin Zhao]], [[Mei-Yuh Hwang|AUTHOR Mei-Yuh Hwang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192397.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-9|PAPER Thu-P-9-E-9 — End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform]]</div>|<div class="cpsessionviewpapertitle">End-to-End Multi-Channel Speech Enhancement Using Inter-Channel Time-Restricted Attention on Raw Waveform</div><div class="cpsessionviewpaperauthor">[[Hyeonseung Lee|AUTHOR Hyeonseung Lee]], [[Hyung Yong Kim|AUTHOR Hyung Yong Kim]], [[Woo Hyun Kang|AUTHOR Woo Hyun Kang]], [[Jeunghun Kim|AUTHOR Jeunghun Kim]], [[Nam Soo Kim|AUTHOR Nam Soo Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192266.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-10|PAPER Thu-P-9-E-10 — Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information]]</div>|<div class="cpsessionviewpapertitle">Neural Spatial Filter: Target Speaker Speech Separation Assisted with Directional Information</div><div class="cpsessionviewpaperauthor">[[Rongzhi Gu|AUTHOR Rongzhi Gu]], [[Lianwu Chen|AUTHOR Lianwu Chen]], [[Shi-Xiong Zhang|AUTHOR Shi-Xiong Zhang]], [[Jimeng Zheng|AUTHOR Jimeng Zheng]], [[Yong Xu|AUTHOR Yong Xu]], [[Meng Yu|AUTHOR Meng Yu]], [[Dan Su|AUTHOR Dan Su]], [[Yuexian Zou|AUTHOR Yuexian Zou]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193114.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-P-9-E-11|PAPER Thu-P-9-E-11 — My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions]]</div>|<div class="cpsessionviewpapertitle">My Lips Are Concealed: Audio-Visual Speech Enhancement Through Obstructions</div><div class="cpsessionviewpaperauthor">[[Triantafyllos Afouras|AUTHOR Triantafyllos Afouras]], [[Joon Son Chung|AUTHOR Joon Son Chung]], [[Andrew Zisserman|AUTHOR Andrew Zisserman]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>

</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 4|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-S&T-6-1|PAPER Thu-S&T-6-1 — Elpis, an Accessible Speech-to-Text Tool]]</div>|<div class="cpsessionviewpapertitle">Elpis, an Accessible Speech-to-Text Tool</div><div class="cpsessionviewpaperauthor">[[Ben Foley|AUTHOR Ben Foley]], [[Alina Rakhi|AUTHOR Alina Rakhi]], [[Nicholas Lambourne|AUTHOR Nicholas Lambourne]], [[Nicholas Buckeridge|AUTHOR Nicholas Buckeridge]], [[Janet Wiles|AUTHOR Janet Wiles]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198009.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-S&T-6-2|PAPER Thu-S&T-6-2 — Framework for Conducting Tasks Requiring Human Assessment]]</div>|<div class="cpsessionviewpapertitle">Framework for Conducting Tasks Requiring Human Assessment</div><div class="cpsessionviewpaperauthor">[[Martin Grůber|AUTHOR Martin Grůber]], [[Adam Chýlek|AUTHOR Adam Chýlek]], [[Jindřich Matoušek|AUTHOR Jindřich Matoušek]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-S&T-6-3|PAPER Thu-S&T-6-3 — Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin]]</div>|<div class="cpsessionviewpapertitle">Multimedia Simultaneous Translation System for Minority Language Communication with Mandarin</div><div class="cpsessionviewpaperauthor">[[Shen Huang|AUTHOR Shen Huang]], [[Bojie Hu|AUTHOR Bojie Hu]], [[Shan Huang|AUTHOR Shan Huang]], [[Pengfei Hu|AUTHOR Pengfei Hu]], [[Jian Kang|AUTHOR Jian Kang]], [[Zhiqiang Lv|AUTHOR Zhiqiang Lv]], [[Jinghao Yan|AUTHOR Jinghao Yan]], [[Qi Ju|AUTHOR Qi Ju]], [[Shiyin Kang|AUTHOR Shiyin Kang]], [[Deyi Tuo|AUTHOR Deyi Tuo]], [[Guangzhi Li|AUTHOR Guangzhi Li]], [[Nurmemet Yolwas|AUTHOR Nurmemet Yolwas]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-S&T-6-4|PAPER Thu-S&T-6-4 — The SAIL LABS Media Mining Indexer and the CAVA Framework]]</div>|<div class="cpsessionviewpapertitle">The SAIL LABS Media Mining Indexer and the CAVA Framework</div><div class="cpsessionviewpaperauthor">[[Erinc Dikici|AUTHOR Erinc Dikici]], [[Gerhard Backfried|AUTHOR Gerhard Backfried]], [[Jürgen Riedler|AUTHOR Jürgen Riedler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-S&T-6-5|PAPER Thu-S&T-6-5 — CaptionAI: A Real-Time Multilingual Captioning Application]]</div>|<div class="cpsessionviewpapertitle">CaptionAI: A Real-Time Multilingual Captioning Application</div><div class="cpsessionviewpaperauthor">[[Nagendra Kumar Goel|AUTHOR Nagendra Kumar Goel]], [[Mousmita Sarma|AUTHOR Mousmita Sarma]], [[Saikiran Valluri|AUTHOR Saikiran Valluri]], [[Dharmeshkumar Agrawal|AUTHOR Dharmeshkumar Agrawal]], [[Steve Braich|AUTHOR Steve Braich]], [[Tejendra Singh Kuswah|AUTHOR Tejendra Singh Kuswah]], [[Zikra Iqbal|AUTHOR Zikra Iqbal]], [[Surbhi Chauhan|AUTHOR Surbhi Chauhan]], [[Raj Karbar|AUTHOR Raj Karbar]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Thursday 19 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Sunayana Sitaram, Alan W. Black|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-1|PAPER Thu-O-10-5-1 — Improving Code-Switched Language Modeling Performance Using Cognate Features]]</div>|<div class="cpsessionviewpapertitle">Improving Code-Switched Language Modeling Performance Using Cognate Features</div><div class="cpsessionviewpaperauthor">[[Victor Soto|AUTHOR Victor Soto]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191382.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-2|PAPER Thu-O-10-5-2 — Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling]]</div>|<div class="cpsessionviewpapertitle">Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling</div><div class="cpsessionviewpaperauthor">[[Grandee Lee|AUTHOR Grandee Lee]], [[Xianghu Yue|AUTHOR Xianghu Yue]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-3|PAPER Thu-O-10-5-3 — Variational Attention Using Articulatory Priors for Generating Code Mixed Speech Using Monolingual Corpora]]</div>|<div class="cpsessionviewpapertitle">Variational Attention Using Articulatory Priors for Generating Code Mixed Speech Using Monolingual Corpora</div><div class="cpsessionviewpaperauthor">[[SaiKrishna Rallabandi|AUTHOR SaiKrishna Rallabandi]], [[Alan W. Black|AUTHOR Alan W. Black]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-4|PAPER Thu-O-10-5-4 — Code-Switching Detection Using ASR-Generated Language Posteriors]]</div>|<div class="cpsessionviewpapertitle">Code-Switching Detection Using ASR-Generated Language Posteriors</div><div class="cpsessionviewpaperauthor">[[Qinyi Wang|AUTHOR Qinyi Wang]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Adem Derinel|AUTHOR Adem Derinel]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-5|PAPER Thu-O-10-5-5 — Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Acoustic Model Training for Five-Lingual Code-Switched ASR</div><div class="cpsessionviewpaperauthor">[[Astik Biswas|AUTHOR Astik Biswas]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Febe de Wet|AUTHOR Febe de Wet]], [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]], [[Thomas Niesler|AUTHOR Thomas Niesler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191125.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-6|PAPER Thu-O-10-5-6 — Multi-Graph Decoding for Code-Switching ASR]]</div>|<div class="cpsessionviewpapertitle">Multi-Graph Decoding for Code-Switching ASR</div><div class="cpsessionviewpaperauthor">[[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Samuel Cohen|AUTHOR Samuel Cohen]], [[Xianghu Yue|AUTHOR Xianghu Yue]], [[David A. van Leeuwen|AUTHOR David A. van Leeuwen]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193038.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-O-10-5-7|PAPER Thu-O-10-5-7 — End-to-End Multilingual Multi-Speaker Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">End-to-End Multilingual Multi-Speaker Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Hiroshi Seki|AUTHOR Hiroshi Seki]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]], [[John R. Hershey|AUTHOR John R. Hershey]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Thursday 19 Sept 2019, Hall 3|<|
|^Chair:&nbsp;|^Tom Bäckström, Stephan Sigg, Rainer Martin|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192647.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-9-6-1|PAPER Thu-SS-9-6-1 — The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding]]</div>|<div class="cpsessionviewpapertitle">The GDPR & Speech Data: Reflections of Legal and Technology Communities, First Steps Towards a Common Understanding</div><div class="cpsessionviewpaperauthor">[[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Catherine Jasserand|AUTHOR Catherine Jasserand]], [[Els Kindt|AUTHOR Els Kindt]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]], [[Nicholas Evans|AUTHOR Nicholas Evans]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192415.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-9-6-2|PAPER Thu-SS-9-6-2 — Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?]]</div>|<div class="cpsessionviewpapertitle">Privacy-Preserving Adversarial Representation Learning in ASR: Reality or Illusion?</div><div class="cpsessionviewpaperauthor">[[Brij Mohan Lal Srivastava|AUTHOR Brij Mohan Lal Srivastava]], [[Aurélien Bellet|AUTHOR Aurélien Bellet]], [[Marc Tommasi|AUTHOR Marc Tommasi]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191148.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-9-6-3|PAPER Thu-SS-9-6-3 — Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification]]</div>|<div class="cpsessionviewpapertitle">Privacy-Preserving Siamese Feature Extraction for Gender Recognition versus Speaker Identification</div><div class="cpsessionviewpaperauthor">[[Alexandru Nelus|AUTHOR Alexandru Nelus]], [[Silas Rech|AUTHOR Silas Rech]], [[Timm Koppelmann|AUTHOR Timm Koppelmann]], [[Henrik Biermann|AUTHOR Henrik Biermann]], [[Rainer Martin|AUTHOR Rainer Martin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191703.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-9-6-4|PAPER Thu-SS-9-6-4 — Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification]]</div>|<div class="cpsessionviewpapertitle">Privacy-Preserving Variational Information Feature Extraction for Domestic Activity Monitoring versus Speaker Identification</div><div class="cpsessionviewpaperauthor">[[Alexandru Nelus|AUTHOR Alexandru Nelus]], [[Janek Ebbers|AUTHOR Janek Ebbers]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]], [[Rainer Martin|AUTHOR Rainer Martin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191136.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-9-6-5|PAPER Thu-SS-9-6-5 — Extracting Mel-Frequency and Bark-Frequency Cepstral Coefficients from Encrypted Signals]]</div>|<div class="cpsessionviewpapertitle">Extracting Mel-Frequency and Bark-Frequency Cepstral Coefficients from Encrypted Signals</div><div class="cpsessionviewpaperauthor">[[Patricia Thaine|AUTHOR Patricia Thaine]], [[Gerald Penn|AUTHOR Gerald Penn]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191172.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Thu-SS-9-6-6|PAPER Thu-SS-9-6-6 — Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy]]</div>|<div class="cpsessionviewpapertitle">Sound Privacy: A Conversational Speech Corpus for Quantifying the Experience of Privacy</div><div class="cpsessionviewpaperauthor">[[Pablo Pérez Zarazaga|AUTHOR Pablo Pérez Zarazaga]], [[Sneha Das|AUTHOR Sneha Das]], [[Tom Bäckström|AUTHOR Tom Bäckström]], [[V. Vidyadhara Raju V.|AUTHOR V. Vidyadhara Raju V.]], [[Anil Kumar Vuppala|AUTHOR Anil Kumar Vuppala]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|08:30–09:30, Tuesday 17 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Tue-K-2|PAPER Tue-K-2 — Biosignal Processing for Human-Machine Interaction]]</div>|<div class="cpsessionviewpapertitle">Biosignal Processing for Human-Machine Interaction</div><div class="cpsessionviewpaperauthor">[[Tanja Schultz|AUTHOR Tanja Schultz]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Chiori Hori, Hermann Ney|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Tue-O-3-1-1|PAPER Tue-O-3-1-1 — Survey Talk: A Survey on Speech Translation]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: A Survey on Speech Translation</div><div class="cpsessionviewpaperauthor">[[Jan Niehues|AUTHOR Jan Niehues]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191951.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-1-2|PAPER Tue-O-3-1-2 — Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model]]</div>|<div class="cpsessionviewpapertitle">Direct Speech-to-Speech Translation with a Sequence-to-Sequence Model</div><div class="cpsessionviewpaperauthor">[[Ye Jia|AUTHOR Ye Jia]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Fadi Biadsy|AUTHOR Fadi Biadsy]], [[Wolfgang Macherey|AUTHOR Wolfgang Macherey]], [[Melvin Johnson|AUTHOR Melvin Johnson]], [[Zhifeng Chen|AUTHOR Zhifeng Chen]], [[Yonghui Wu|AUTHOR Yonghui Wu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192582.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-1-3|PAPER Tue-O-3-1-3 — End-to-End Speech Translation with Knowledge Distillation]]</div>|<div class="cpsessionviewpapertitle">End-to-End Speech Translation with Knowledge Distillation</div><div class="cpsessionviewpaperauthor">[[Yuchen Liu|AUTHOR Yuchen Liu]], [[Hao Xiong|AUTHOR Hao Xiong]], [[Jiajun Zhang|AUTHOR Jiajun Zhang]], [[Zhongjun He|AUTHOR Zhongjun He]], [[Hua Wu|AUTHOR Hua Wu]], [[Haifeng Wang|AUTHOR Haifeng Wang]], [[Chengqing Zong|AUTHOR Chengqing Zong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-1-4|PAPER Tue-O-3-1-4 — Adapting Transformer to End-to-End Spoken Language Translation]]</div>|<div class="cpsessionviewpapertitle">Adapting Transformer to End-to-End Spoken Language Translation</div><div class="cpsessionviewpaperauthor">[[Mattia A. Di Gangi|AUTHOR Mattia A. Di Gangi]], [[Matteo Negri|AUTHOR Matteo Negri]], [[Marco Turchi|AUTHOR Marco Turchi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-1-5|PAPER Tue-O-3-1-5 — Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Phonetic and Word Level Discovery for Speech to Speech Translation for Unwritten Languages</div><div class="cpsessionviewpaperauthor">[[Steven Hillis|AUTHOR Steven Hillis]], [[Anushree Prasanna Kumar|AUTHOR Anushree Prasanna Kumar]], [[Alan W. Black|AUTHOR Alan W. Black]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Kong Aik Lee, Oldřich Plchot|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193146.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-2-1|PAPER Tue-O-3-2-1 — Deep Speaker Recognition: Modular or Monolithic?]]</div>|<div class="cpsessionviewpapertitle">Deep Speaker Recognition: Modular or Monolithic?</div><div class="cpsessionviewpaperauthor">[[Gautam Bhattacharya|AUTHOR Gautam Bhattacharya]], [[Jahangir Alam|AUTHOR Jahangir Alam]], [[Patrick Kenny|AUTHOR Patrick Kenny]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193036.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-2-2|PAPER Tue-O-3-2-2 — On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction]]</div>|<div class="cpsessionviewpapertitle">On the Usage of Phonetic Information for Text-Independent Speaker Embedding Extraction</div><div class="cpsessionviewpaperauthor">[[Shuai Wang|AUTHOR Shuai Wang]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]], [[Jan Černocký|AUTHOR Jan Černocký]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192380.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-2-3|PAPER Tue-O-3-2-3 — Learning Speaker Representations with Mutual Information]]</div>|<div class="cpsessionviewpapertitle">Learning Speaker Representations with Mutual Information</div><div class="cpsessionviewpaperauthor">[[Mirco Ravanelli|AUTHOR Mirco Ravanelli]], [[Yoshua Bengio|AUTHOR Yoshua Bengio]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192264.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-2-4|PAPER Tue-O-3-2-4 — Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Learning with High-Order Statistics for x-Vector Based Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Lanhua You|AUTHOR Lanhua You]], [[Wu Guo|AUTHOR Wu Guo]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Jun Du|AUTHOR Jun Du]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-2-5|PAPER Tue-O-3-2-5 — Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Data Augmentation Using Variational Autoencoder for Embedding Based Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Zhanghao Wu|AUTHOR Zhanghao Wu]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191746.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-2-6|PAPER Tue-O-3-2-6 — Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Deep Neural Network Embeddings with Gating Mechanisms for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Lanhua You|AUTHOR Lanhua You]], [[Wu Guo|AUTHOR Wu Guo]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]], [[Jun Du|AUTHOR Jun Du]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Yannick Estéve, David Griol|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193075.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-3-1|PAPER Tue-O-3-3-1 — Neural Transition Systems for Modeling Hierarchical Semantic Representations]]</div>|<div class="cpsessionviewpapertitle">Neural Transition Systems for Modeling Hierarchical Semantic Representations</div><div class="cpsessionviewpaperauthor">[[Riyaz Bhat|AUTHOR Riyaz Bhat]], [[John Chen|AUTHOR John Chen]], [[Rashmi Prasad|AUTHOR Rashmi Prasad]], [[Srinivas Bangalore|AUTHOR Srinivas Bangalore]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192977.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-3-2|PAPER Tue-O-3-3-2 — Mining Polysemous Triplets with Recurrent Neural Networks for Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Mining Polysemous Triplets with Recurrent Neural Networks for Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Vedran Vukotić|AUTHOR Vedran Vukotić]], [[Christian Raymond|AUTHOR Christian Raymond]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192955.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-3-3|PAPER Tue-O-3-3-3 — Iterative Delexicalization for Improved Spoken Language Understanding]]</div>|<div class="cpsessionviewpapertitle">Iterative Delexicalization for Improved Spoken Language Understanding</div><div class="cpsessionviewpaperauthor">[[Avik Ray|AUTHOR Avik Ray]], [[Yilin Shen|AUTHOR Yilin Shen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192366.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-3-4|PAPER Tue-O-3-3-4 — End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios]]</div>|<div class="cpsessionviewpapertitle">End-to-End Spoken Language Understanding: Bootstrapping in Low Resource Scenarios</div><div class="cpsessionviewpaperauthor">[[Swapnil Bhosale|AUTHOR Swapnil Bhosale]], [[Imran Sheikh|AUTHOR Imran Sheikh]], [[Sri Harsha Dumpala|AUTHOR Sri Harsha Dumpala]], [[Sunil Kumar Kopparapu|AUTHOR Sunil Kumar Kopparapu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192121.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-3-5|PAPER Tue-O-3-3-5 — Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System]]</div>|<div class="cpsessionviewpapertitle">Recognition of Intentions of Users’ Short Responses for Conversational News Delivery System</div><div class="cpsessionviewpaperauthor">[[Hiroaki Takatsu|AUTHOR Hiroaki Takatsu]], [[Katsuya Yokoyama|AUTHOR Katsuya Yokoyama]], [[Yoichi Matsuyama|AUTHOR Yoichi Matsuyama]], [[Hiroshi Honda|AUTHOR Hiroshi Honda]], [[Shinya Fujie|AUTHOR Shinya Fujie]], [[Tetsunori Kobayashi|AUTHOR Tetsunori Kobayashi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-3-6|PAPER Tue-O-3-3-6 — Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability]]</div>|<div class="cpsessionviewpapertitle">Curriculum-Based Transfer Learning for an Effective End-to-End Spoken Language Understanding and Domain Portability</div><div class="cpsessionviewpaperauthor">[[Antoine Caubrière|AUTHOR Antoine Caubrière]], [[Natalia Tomashenko|AUTHOR Natalia Tomashenko]], [[Antoine Laurent|AUTHOR Antoine Laurent]], [[Emmanuel Morin|AUTHOR Emmanuel Morin]], [[Nathalie Camelin|AUTHOR Nathalie Camelin]], [[Yannick Estève|AUTHOR Yannick Estève]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Ann Bradlow, Okko Räsänen|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193105.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-4-1|PAPER Tue-O-3-4-1 — Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals]]</div>|<div class="cpsessionviewpapertitle">Spatial and Spectral Fingerprint in the Brain: Speaker Identification from Single Trial MEG Signals</div><div class="cpsessionviewpaperauthor">[[Debadatta Dash|AUTHOR Debadatta Dash]], [[Paul Ferrari|AUTHOR Paul Ferrari]], [[Jun Wang|AUTHOR Jun Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192729.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-4-2|PAPER Tue-O-3-4-2 — ERP Signal Analysis with Temporal Resolution Using a Time Window Bank]]</div>|<div class="cpsessionviewpapertitle">ERP Signal Analysis with Temporal Resolution Using a Time Window Bank</div><div class="cpsessionviewpaperauthor">[[Annika Nijveld|AUTHOR Annika Nijveld]], [[L. ten Bosch|AUTHOR L. ten Bosch]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192443.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-4-3|PAPER Tue-O-3-4-3 — Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics]]</div>|<div class="cpsessionviewpapertitle">Phase Synchronization Between EEG Signals as a Function of Differences Between Stimuli Characteristics</div><div class="cpsessionviewpaperauthor">[[L. ten Bosch|AUTHOR L. ten Bosch]], [[K. Mulder|AUTHOR K. Mulder]], [[L. Boves|AUTHOR L. Boves]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192528.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-4-4|PAPER Tue-O-3-4-4 — The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence]]</div>|<div class="cpsessionviewpapertitle">The Processing of Prosodic Cues to Rhetorical Question Interpretation: Psycholinguistic and Neurolinguistics Evidence</div><div class="cpsessionviewpaperauthor">[[Mariya Kharaman|AUTHOR Mariya Kharaman]], [[Manluolan Xu|AUTHOR Manluolan Xu]], [[Carsten Eulitz|AUTHOR Carsten Eulitz]], [[Bettina Braun|AUTHOR Bettina Braun]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-4-5|PAPER Tue-O-3-4-5 — The Neural Correlates Underlying Lexically-Guided Perceptual Learning]]</div>|<div class="cpsessionviewpapertitle">The Neural Correlates Underlying Lexically-Guided Perceptual Learning</div><div class="cpsessionviewpaperauthor">[[Odette Scharenborg|AUTHOR Odette Scharenborg]], [[Jiska Koemans|AUTHOR Jiska Koemans]], [[Cybelle Smith|AUTHOR Cybelle Smith]], [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]], [[Kara D. Federmeier|AUTHOR Kara D. Federmeier]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-4-6|PAPER Tue-O-3-4-6 — Speech Quality Evaluation of Synthesized Japanese Speech Using EEG]]</div>|<div class="cpsessionviewpapertitle">Speech Quality Evaluation of Synthesized Japanese Speech Using EEG</div><div class="cpsessionviewpaperauthor">[[Ivan Halim Parmonangan|AUTHOR Ivan Halim Parmonangan]], [[Hiroki Tanaka|AUTHOR Hiroki Tanaka]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Shinnosuke Takamichi|AUTHOR Shinnosuke Takamichi]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Sanjeev Khudanpur, Michael Seltzer|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193006.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-5-1|PAPER Tue-O-3-5-1 — Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection]]</div>|<div class="cpsessionviewpapertitle">Multi-Microphone Adaptive Noise Cancellation for Robust Hotword Detection</div><div class="cpsessionviewpaperauthor">[[Yiteng Huang|AUTHOR Yiteng Huang]], [[Turaj Z. Shabestary|AUTHOR Turaj Z. Shabestary]], [[Alexander Gruenstein|AUTHOR Alexander Gruenstein]], [[Li Wan|AUTHOR Li Wan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192078.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-5-2|PAPER Tue-O-3-5-2 — Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shengkui Zhao|AUTHOR Shengkui Zhao]], [[Chongjia Ni|AUTHOR Chongjia Ni]], [[Rong Tong|AUTHOR Rong Tong]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192645.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-5-3|PAPER Tue-O-3-5-3 — R-Vectors: New Technique for Adaptation to Room Acoustics]]</div>|<div class="cpsessionviewpapertitle">R-Vectors: New Technique for Adaptation to Room Acoustics</div><div class="cpsessionviewpaperauthor">[[Yuri Khokhlov|AUTHOR Yuri Khokhlov]], [[Alexander Zatvornitskiy|AUTHOR Alexander Zatvornitskiy]], [[Ivan Medennikov|AUTHOR Ivan Medennikov]], [[Ivan Sorokin|AUTHOR Ivan Sorokin]], [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]], [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]], [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]], [[Vladimir Bataev|AUTHOR Vladimir Bataev]], [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]], [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]], [[Oleg Petrov|AUTHOR Oleg Petrov]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191167.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-5-4|PAPER Tue-O-3-5-4 — Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR]]</div>|<div class="cpsessionviewpapertitle">Guided Source Separation Meets a Strong ASR Backend: Hitachi/Paderborn University Joint Investigation for Dinner Party ASR</div><div class="cpsessionviewpaperauthor">[[Naoyuki Kanda|AUTHOR Naoyuki Kanda]], [[Christoph Boeddeker|AUTHOR Christoph Boeddeker]], [[Jens Heitkaemper|AUTHOR Jens Heitkaemper]], [[Yusuke Fujita|AUTHOR Yusuke Fujita]], [[Shota Horiguchi|AUTHOR Shota Horiguchi]], [[Kenji Nagamatsu|AUTHOR Kenji Nagamatsu]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192549.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-5-5|PAPER Tue-O-3-5-5 — Unsupervised Training of Neural Mask-Based Beamforming]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Training of Neural Mask-Based Beamforming</div><div class="cpsessionviewpaperauthor">[[Lukas Drude|AUTHOR Lukas Drude]], [[Jahn Heymann|AUTHOR Jahn Heymann]], [[Reinhold Haeb-Umbach|AUTHOR Reinhold Haeb-Umbach]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192601.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-3-5-6|PAPER Tue-O-3-5-6 — Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge]]</div>|<div class="cpsessionviewpapertitle">Acoustic Model Ensembling Using Effective Data Augmentation for CHiME-5 Challenge</div><div class="cpsessionviewpaperauthor">[[Feng Ma|AUTHOR Feng Ma]], [[Li Chai|AUTHOR Li Chai]], [[Jun Du|AUTHOR Jun Du]], [[Diyuan Liu|AUTHOR Diyuan Liu]], [[Zhongfu Ye|AUTHOR Zhongfu Ye]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Haizhou Li, Md Jahangir Alam|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Tue-O-4-1-1|PAPER Tue-O-4-1-1 — Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: End-to-End Deep Neural Network Based Speaker and Language Recognition</div><div class="cpsessionviewpaperauthor">[[Ming Li|AUTHOR Ming Li]], [[Weicheng Cai|AUTHOR Weicheng Cai]], [[Danwei Cai|AUTHOR Danwei Cai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192371.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-1-2|PAPER Tue-O-4-1-2 — Attention Based Hybrid i-Vector BLSTM Model for Language Recognition]]</div>|<div class="cpsessionviewpapertitle">Attention Based Hybrid i-Vector BLSTM Model for Language Recognition</div><div class="cpsessionviewpaperauthor">[[Bharat Padi|AUTHOR Bharat Padi]], [[Anand Mohan|AUTHOR Anand Mohan]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191982.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-1-3|PAPER Tue-O-4-1-3 — RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">RawNet: Advanced End-to-End Deep Neural Network Using Raw Waveforms for Text-Independent Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Ju-ho Kim|AUTHOR Ju-ho Kim]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191410.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-1-4|PAPER Tue-O-4-1-4 — Target Speaker Extraction for Multi-Talker Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Target Speaker Extraction for Multi-Talker Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Wei Rao|AUTHOR Wei Rao]], [[Chenglin Xu|AUTHOR Chenglin Xu]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191916.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-1-5|PAPER Tue-O-4-1-5 — Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale]]</div>|<div class="cpsessionviewpapertitle">Improving Keyword Spotting and Language Identification via Neural Architecture Search at Scale</div><div class="cpsessionviewpaperauthor">[[Hanna Mazzawi|AUTHOR Hanna Mazzawi]], [[Xavi Gonzalvo|AUTHOR Xavi Gonzalvo]], [[Aleks Kracun|AUTHOR Aleks Kracun]], [[Prashant Sridhar|AUTHOR Prashant Sridhar]], [[Niranjan Subrahmanya|AUTHOR Niranjan Subrahmanya]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]], [[Hyun Jin Park|AUTHOR Hyun Jin Park]], [[Patrick Violette|AUTHOR Patrick Violette]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Esther Klabbers, Rob Clark|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192325.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-2-1|PAPER Tue-O-4-2-1 — Forward-Backward Decoding for Regularizing End-to-End TTS]]</div>|<div class="cpsessionviewpapertitle">Forward-Backward Decoding for Regularizing End-to-End TTS</div><div class="cpsessionviewpaperauthor">[[Yibin Zheng|AUTHOR Yibin Zheng]], [[Xi Wang|AUTHOR Xi Wang]], [[Lei He|AUTHOR Lei He]], [[Shifeng Pan|AUTHOR Shifeng Pan]], [[Frank K. Soong|AUTHOR Frank K. Soong]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Jianhua Tao|AUTHOR Jianhua Tao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-2-2|PAPER Tue-O-4-2-2 — A New GAN-Based End-to-End TTS Training Algorithm]]</div>|<div class="cpsessionviewpapertitle">A New GAN-Based End-to-End TTS Training Algorithm</div><div class="cpsessionviewpaperauthor">[[Haohan Guo|AUTHOR Haohan Guo]], [[Frank K. Soong|AUTHOR Frank K. Soong]], [[Lei He|AUTHOR Lei He]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191972.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-2-3|PAPER Tue-O-4-2-3 — Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS]]</div>|<div class="cpsessionviewpapertitle">Robust Sequence-to-Sequence Acoustic Modeling with Stepwise Monotonic Attention for Neural TTS</div><div class="cpsessionviewpaperauthor">[[Mutian He|AUTHOR Mutian He]], [[Yan Deng|AUTHOR Yan Deng]], [[Lei He|AUTHOR Lei He]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-2-4|PAPER Tue-O-4-2-4 — Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet]]</div>|<div class="cpsessionviewpapertitle">Joint Training Framework for Text-to-Speech and Voice Conversion Using Multi-Source Tacotron and WaveNet</div><div class="cpsessionviewpaperauthor">[[Mingyang Zhang|AUTHOR Mingyang Zhang]], [[Xin Wang|AUTHOR Xin Wang]], [[Fuming Fang|AUTHOR Fuming Fang]], [[Haizhou Li|AUTHOR Haizhou Li]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191311.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-2-5|PAPER Tue-O-4-2-5 — Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora]]</div>|<div class="cpsessionviewpapertitle">Training Multi-Speaker Neural Text-to-Speech Systems Using Speaker-Imbalanced Speech Corpora</div><div class="cpsessionviewpaperauthor">[[Hieu-Thi Luong|AUTHOR Hieu-Thi Luong]], [[Xin Wang|AUTHOR Xin Wang]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Nobuyuki Nishizawa|AUTHOR Nobuyuki Nishizawa]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191288.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-2-6|PAPER Tue-O-4-2-6 — Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders]]</div>|<div class="cpsessionviewpapertitle">Real-Time Neural Text-to-Speech with Sequence-to-Sequence Acoustic Model and WaveGlow or Single Gaussian WaveRNN Vocoders</div><div class="cpsessionviewpaperauthor">[[Takuma Okamoto|AUTHOR Takuma Okamoto]], [[Tomoki Toda|AUTHOR Tomoki Toda]], [[Yoshinori Shiga|AUTHOR Yoshinori Shiga]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Dilek Hakkani-Tür, Mari Ostendorf|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-3-1|PAPER Tue-O-4-3-1 — Fusion Strategy for Prosodic and Lexical Representations of Word Importance]]</div>|<div class="cpsessionviewpapertitle">Fusion Strategy for Prosodic and Lexical Representations of Word Importance</div><div class="cpsessionviewpaperauthor">[[Sushant Kafle|AUTHOR Sushant Kafle]], [[Cecilia Ovesdotter Alm|AUTHOR Cecilia Ovesdotter Alm]], [[Matt Huenerfauth|AUTHOR Matt Huenerfauth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191548.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-3-2|PAPER Tue-O-4-3-2 — Self Attention in Variational Sequential Learning for Summarization]]</div>|<div class="cpsessionviewpapertitle">Self Attention in Variational Sequential Learning for Summarization</div><div class="cpsessionviewpaperauthor">[[Jen-Tzung Chien|AUTHOR Jen-Tzung Chien]], [[Chun-Wei Wang|AUTHOR Chun-Wei Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192482.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-3-3|PAPER Tue-O-4-3-3 — Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis]]</div>|<div class="cpsessionviewpapertitle">Multi-Modal Sentiment Analysis Using Deep Canonical Correlation Analysis</div><div class="cpsessionviewpaperauthor">[[Zhongkai Sun|AUTHOR Zhongkai Sun]], [[Prathusha K. Sarma|AUTHOR Prathusha K. Sarma]], [[William Sethares|AUTHOR William Sethares]], [[Erik P. Bucy|AUTHOR Erik P. Bucy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-3-4|PAPER Tue-O-4-3-4 — Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance]]</div>|<div class="cpsessionviewpapertitle">Interpreting and Improving Deep Neural SLU Models via Vocabulary Importance</div><div class="cpsessionviewpaperauthor">[[Yilin Shen|AUTHOR Yilin Shen]], [[Wenhu Chen|AUTHOR Wenhu Chen]], [[Hongxia Jin|AUTHOR Hongxia Jin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-3-5|PAPER Tue-O-4-3-5 — Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization]]</div>|<div class="cpsessionviewpapertitle">Assessing the Semantic Space Bias Caused by ASR Error Propagation and its Effect on Spoken Document Summarization</div><div class="cpsessionviewpaperauthor">[[Máté Ákos Tündik|AUTHOR Máté Ákos Tündik]], [[Valér Kaszás|AUTHOR Valér Kaszás]], [[György Szaszák|AUTHOR György Szaszák]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192228.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-3-6|PAPER Tue-O-4-3-6 — Latent Topic Attention for Domain Classification]]</div>|<div class="cpsessionviewpapertitle">Latent Topic Attention for Domain Classification</div><div class="cpsessionviewpaperauthor">[[Peisong Huang|AUTHOR Peisong Huang]], [[Peijie Huang|AUTHOR Peijie Huang]], [[Wencheng Ai|AUTHOR Wencheng Ai]], [[Jiande Ding|AUTHOR Jiande Ding]], [[Jinchuan Zhang|AUTHOR Jinchuan Zhang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Elmar Nöth, Tomohiro Nakatani|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191272.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-5-1|PAPER Tue-O-4-5-1 — A Unified Bayesian Source Modelling for Determined Blind Source Separation]]</div>|<div class="cpsessionviewpapertitle">A Unified Bayesian Source Modelling for Determined Blind Source Separation</div><div class="cpsessionviewpaperauthor">[[Chaitanya Narisetty|AUTHOR Chaitanya Narisetty]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-5-2|PAPER Tue-O-4-5-2 — Recursive Speech Separation for Unknown Number of Speakers]]</div>|<div class="cpsessionviewpapertitle">Recursive Speech Separation for Unknown Number of Speakers</div><div class="cpsessionviewpaperauthor">[[Naoya Takahashi|AUTHOR Naoya Takahashi]], [[Sudarsanam Parthasaarathy|AUTHOR Sudarsanam Parthasaarathy]], [[Nabarun Goswami|AUTHOR Nabarun Goswami]], [[Yuki Mitsufuji|AUTHOR Yuki Mitsufuji]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-5-3|PAPER Tue-O-4-5-3 — Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation]]</div>|<div class="cpsessionviewpapertitle">Practical Applicability of Deep Neural Networks for Overlapping Speaker Separation</div><div class="cpsessionviewpaperauthor">[[Pieter Appeltans|AUTHOR Pieter Appeltans]], [[Jeroen Zegers|AUTHOR Jeroen Zegers]], [[Hugo Van hamme|AUTHOR Hugo Van hamme]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192076.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-5-4|PAPER Tue-O-4-5-4 — Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model]]</div>|<div class="cpsessionviewpapertitle">Speech Separation Using Independent Vector Analysis with an Amplitude Variable Gaussian Mixture Model</div><div class="cpsessionviewpaperauthor">[[Zhaoyi Gu|AUTHOR Zhaoyi Gu]], [[Jing Lu|AUTHOR Jing Lu]], [[Kai Chen|AUTHOR Kai Chen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192181.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-5-5|PAPER Tue-O-4-5-5 — Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering]]</div>|<div class="cpsessionviewpapertitle">Improved Speech Separation with Time-and-Frequency Cross-Domain Joint Embedding and Clustering</div><div class="cpsessionviewpaperauthor">[[Gene-Ping Yang|AUTHOR Gene-Ping Yang]], [[Chao-I Tuan|AUTHOR Chao-I Tuan]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]], [[Lin-shan Lee|AUTHOR Lin-shan Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192821.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-4-5-6|PAPER Tue-O-4-5-6 — WHAM!: Extending Speech Separation to Noisy Environments]]</div>|<div class="cpsessionviewpapertitle">WHAM!: Extending Speech Separation to Noisy Environments</div><div class="cpsessionviewpaperauthor">[[Gordon Wichern|AUTHOR Gordon Wichern]], [[Joe Antognini|AUTHOR Joe Antognini]], [[Michael Flynn|AUTHOR Michael Flynn]], [[Licheng Richard Zhu|AUTHOR Licheng Richard Zhu]], [[Emmett McQuinn|AUTHOR Emmett McQuinn]], [[Dwight Crow|AUTHOR Dwight Crow]], [[Ethan Manilow|AUTHOR Ethan Manilow]], [[Jonathan Le Roux|AUTHOR Jonathan Le Roux]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Martin Cooke, Sebastian Möller|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Tue-O-5-1-1|PAPER Tue-O-5-1-1 — Survey Talk: Preserving Privacy in Speaker and Speech Characterisation]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Preserving Privacy in Speaker and Speech Characterisation</div><div class="cpsessionviewpaperauthor">[[Andreas Nautsch|AUTHOR Andreas Nautsch]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191800.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-1-2|PAPER Tue-O-5-1-2 — Evaluating Near End Listening Enhancement Algorithms in Realistic Environments]]</div>|<div class="cpsessionviewpapertitle">Evaluating Near End Listening Enhancement Algorithms in Realistic Environments</div><div class="cpsessionviewpaperauthor">[[Carol Chermaz|AUTHOR Carol Chermaz]], [[Cassia Valentini-Botinhao|AUTHOR Cassia Valentini-Botinhao]], [[Henning Schepker|AUTHOR Henning Schepker]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192898.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-1-3|PAPER Tue-O-5-1-3 — Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation]]</div>|<div class="cpsessionviewpapertitle">Improvement and Assessment of Spectro-Temporal Modulation Analysis for Speech Intelligibility Estimation</div><div class="cpsessionviewpaperauthor">[[Amin Edraki|AUTHOR Amin Edraki]], [[Wai-Yip Chan|AUTHOR Wai-Yip Chan]], [[Jesper Jensen|AUTHOR Jesper Jensen]], [[Daniel Fogerty|AUTHOR Daniel Fogerty]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191369.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-1-4|PAPER Tue-O-5-1-4 — Listener Preference on the Local Criterion for Ideal Binary-Masked Speech]]</div>|<div class="cpsessionviewpapertitle">Listener Preference on the Local Criterion for Ideal Binary-Masked Speech</div><div class="cpsessionviewpaperauthor">[[Zhuohuang Zhang|AUTHOR Zhuohuang Zhang]], [[Yi Shen|AUTHOR Yi Shen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-1-5|PAPER Tue-O-5-1-5 — Using a Manifold Vocoder for Spectral Voice and Style Conversion]]</div>|<div class="cpsessionviewpapertitle">Using a Manifold Vocoder for Spectral Voice and Style Conversion</div><div class="cpsessionviewpaperauthor">[[Tuan Dinh|AUTHOR Tuan Dinh]], [[Alexander Kain|AUTHOR Alexander Kain]], [[Kris Tjaden|AUTHOR Kris Tjaden]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Brian Kingsbury, Renato DeMori|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192454.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-2-1|PAPER Tue-O-5-2-1 — Multi-Span Acoustic Modelling Using Raw Waveform Signals]]</div>|<div class="cpsessionviewpapertitle">Multi-Span Acoustic Modelling Using Raw Waveform Signals</div><div class="cpsessionviewpaperauthor">[[P. von Platen|AUTHOR P. von Platen]], [[Chao Zhang|AUTHOR Chao Zhang]], [[P.C. Woodland|AUTHOR P.C. Woodland]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192879.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-2-2|PAPER Tue-O-5-2-2 — An Analysis of Local Monotonic Attention Variants]]</div>|<div class="cpsessionviewpapertitle">An Analysis of Local Monotonic Attention Variants</div><div class="cpsessionviewpaperauthor">[[André Merboldt|AUTHOR André Merboldt]], [[Albert Zeyer|AUTHOR Albert Zeyer]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192971.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-2-3|PAPER Tue-O-5-2-3 — Layer Trajectory BLSTM]]</div>|<div class="cpsessionviewpapertitle">Layer Trajectory BLSTM</div><div class="cpsessionviewpaperauthor">[[Eric Sun|AUTHOR Eric Sun]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191938.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-2-4|PAPER Tue-O-5-2-4 — Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration]]</div>|<div class="cpsessionviewpapertitle">Improving Transformer-Based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration</div><div class="cpsessionviewpaperauthor">[[Shigeki Karita|AUTHOR Shigeki Karita]], [[Nelson Enrique Yalta Soplin|AUTHOR Nelson Enrique Yalta Soplin]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-2-5|PAPER Tue-O-5-2-5 — Trainable Dynamic Subsampling for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Trainable Dynamic Subsampling for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shucong Zhang|AUTHOR Shucong Zhang]], [[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Yumo Xu|AUTHOR Yumo Xu]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-2-6|PAPER Tue-O-5-2-6 — Shallow-Fusion End-to-End Contextual Biasing]]</div>|<div class="cpsessionviewpapertitle">Shallow-Fusion End-to-End Contextual Biasing</div><div class="cpsessionviewpaperauthor">[[Ding Zhao|AUTHOR Ding Zhao]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[David Rybach|AUTHOR David Rybach]], [[Pat Rondon|AUTHOR Pat Rondon]], [[Deepti Bhatia|AUTHOR Deepti Bhatia]], [[Bo Li|AUTHOR Bo Li]], [[Ruoming Pang|AUTHOR Ruoming Pang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Carol Espy-Wilson, Khiet Truong|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191900.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-3-1|PAPER Tue-O-5-3-1 — Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance]]</div>|<div class="cpsessionviewpapertitle">Modeling Interpersonal Linguistic Coordination in Conversations Using Word Mover’s Distance</div><div class="cpsessionviewpaperauthor">[[Md. Nasir|AUTHOR Md. Nasir]], [[Sandeep Nallan Chakravarthula|AUTHOR Sandeep Nallan Chakravarthula]], [[Brian R.W. Baucom|AUTHOR Brian R.W. Baucom]], [[David C. Atkins|AUTHOR David C. Atkins]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193059.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-3-2|PAPER Tue-O-5-3-2 — Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach]]</div>|<div class="cpsessionviewpapertitle">Bag-of-Acoustic-Words for Mental Health Assessment: A Deep Autoencoding Approach</div><div class="cpsessionviewpaperauthor">[[Wenchao Du|AUTHOR Wenchao Du]], [[Louis-Philippe Morency|AUTHOR Louis-Philippe Morency]], [[Jeffrey Cohn|AUTHOR Jeffrey Cohn]], [[Alan W. Black|AUTHOR Alan W. Black]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192960.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-3-3|PAPER Tue-O-5-3-3 — Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder]]</div>|<div class="cpsessionviewpapertitle">Objective Assessment of Social Skills Using Automated Language Analysis for Identification of Schizophrenia and Bipolar Disorder</div><div class="cpsessionviewpaperauthor">[[Rohit Voleti|AUTHOR Rohit Voleti]], [[Stephanie Woolridge|AUTHOR Stephanie Woolridge]], [[Julie M. Liss|AUTHOR Julie M. Liss]], [[Melissa Milanovic|AUTHOR Melissa Milanovic]], [[Christopher R. Bowie|AUTHOR Christopher R. Bowie]], [[Visar Berisha|AUTHOR Visar Berisha]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-3-4|PAPER Tue-O-5-3-4 — Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder]]</div>|<div class="cpsessionviewpapertitle">Into the Wild: Transitioning from Recognizing Mood in Clinical Interactions to Personal Conversations for Individuals with Bipolar Disorder</div><div class="cpsessionviewpaperauthor">[[Katie Matton|AUTHOR Katie Matton]], [[Melvin G. McInnis|AUTHOR Melvin G. McInnis]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192283.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-3-5|PAPER Tue-O-5-3-5 — Detecting Depression with Word-Level Multimodal Fusion]]</div>|<div class="cpsessionviewpapertitle">Detecting Depression with Word-Level Multimodal Fusion</div><div class="cpsessionviewpaperauthor">[[Morteza Rohanian|AUTHOR Morteza Rohanian]], [[Julian Hough|AUTHOR Julian Hough]], [[Matthew Purver|AUTHOR Matthew Purver]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-3-6|PAPER Tue-O-5-3-6 — Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables]]</div>|<div class="cpsessionviewpapertitle">Assessing Neuromotor Coordination in Depression Using Inverted Vocal Tract Variables</div><div class="cpsessionviewpaperauthor">[[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]], [[Adam C. Lammert|AUTHOR Adam C. Lammert]], [[Nadee Seneviratne|AUTHOR Nadee Seneviratne]], [[Thomas F. Quatieri|AUTHOR Thomas F. Quatieri]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Frederic Bechet, Giuseppe Riccardi|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-4-1|PAPER Tue-O-5-4-1 — Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues]]</div>|<div class="cpsessionviewpapertitle">Towards Universal Dialogue Act Tagging for Task-Oriented Dialogues</div><div class="cpsessionviewpaperauthor">[[Shachi Paul|AUTHOR Shachi Paul]], [[Rahul Goel|AUTHOR Rahul Goel]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-4-2|PAPER Tue-O-5-4-2 — HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking]]</div>|<div class="cpsessionviewpapertitle">HyST: A Hybrid Approach for Flexible and Accurate Dialogue State Tracking</div><div class="cpsessionviewpaperauthor">[[Rahul Goel|AUTHOR Rahul Goel]], [[Shachi Paul|AUTHOR Shachi Paul]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191691.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-4-3|PAPER Tue-O-5-4-3 — Multi-Lingual Dialogue Act Recognition with Deep Learning Methods]]</div>|<div class="cpsessionviewpapertitle">Multi-Lingual Dialogue Act Recognition with Deep Learning Methods</div><div class="cpsessionviewpaperauthor">[[Jiří Martínek|AUTHOR Jiří Martínek]], [[Pavel Král|AUTHOR Pavel Král]], [[Ladislav Lenc|AUTHOR Ladislav Lenc]], [[Christophe Cerisara|AUTHOR Christophe Cerisara]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-4-4|PAPER Tue-O-5-4-4 — BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer]]</div>|<div class="cpsessionviewpapertitle">BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer</div><div class="cpsessionviewpaperauthor">[[Guan-Lin Chao|AUTHOR Guan-Lin Chao]], [[Ian Lane|AUTHOR Ian Lane]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-4-5|PAPER Tue-O-5-4-5 — Discovering Dialog Rules by Means of an Evolutionary Approach]]</div>|<div class="cpsessionviewpapertitle">Discovering Dialog Rules by Means of an Evolutionary Approach</div><div class="cpsessionviewpaperauthor">[[David Griol|AUTHOR David Griol]], [[Zoraida Callejas|AUTHOR Zoraida Callejas]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191315.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-4-6|PAPER Tue-O-5-4-6 — Active Learning for Domain Classification in a Commercial Spoken Personal Assistant]]</div>|<div class="cpsessionviewpapertitle">Active Learning for Domain Classification in a Commercial Spoken Personal Assistant</div><div class="cpsessionviewpaperauthor">[[Xi C. Chen|AUTHOR Xi C. Chen]], [[Adithya Sagar|AUTHOR Adithya Sagar]], [[Justine T. Kao|AUTHOR Justine T. Kao]], [[Tony Y. Li|AUTHOR Tony Y. Li]], [[Christopher Klein|AUTHOR Christopher Klein]], [[Stephen Pulman|AUTHOR Stephen Pulman]], [[Ashish Garg|AUTHOR Ashish Garg]], [[Jason D. Williams|AUTHOR Jason D. Williams]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Tomi Kinnunen, Lukáš Burget|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191351.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-5-1|PAPER Tue-O-5-5-1 — The 2018 NIST Speaker Recognition Evaluation]]</div>|<div class="cpsessionviewpapertitle">The 2018 NIST Speaker Recognition Evaluation</div><div class="cpsessionviewpaperauthor">[[Seyed Omid Sadjadi|AUTHOR Seyed Omid Sadjadi]], [[Craig Greenberg|AUTHOR Craig Greenberg]], [[Elliot Singer|AUTHOR Elliot Singer]], [[Douglas Reynolds|AUTHOR Douglas Reynolds]], [[Lisa Mason|AUTHOR Lisa Mason]], [[Jaime Hernandez-Cordero|AUTHOR Jaime Hernandez-Cordero]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192713.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-5-2|PAPER Tue-O-5-5-2 — State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18]]</div>|<div class="cpsessionviewpapertitle">State-of-the-Art Speaker Recognition for Telephone and Video Speech: The JHU-MIT Submission for NIST SRE18</div><div class="cpsessionviewpaperauthor">[[Jesús Villalba|AUTHOR Jesús Villalba]], [[Nanxin Chen|AUTHOR Nanxin Chen]], [[David Snyder|AUTHOR David Snyder]], [[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]], [[Alan McCree|AUTHOR Alan McCree]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Jonas Borgstrom|AUTHOR Jonas Borgstrom]], [[Fred Richardson|AUTHOR Fred Richardson]], [[Suwon Shon|AUTHOR Suwon Shon]], [[François Grondin|AUTHOR François Grondin]], [[Réda Dehak|AUTHOR Réda Dehak]], [[Leibny Paola García-Perera|AUTHOR Leibny Paola García-Perera]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Pedro A. Torres-Carrasquillo|AUTHOR Pedro A. Torres-Carrasquillo]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192205.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-5-3|PAPER Tue-O-5-5-3 — x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">x-Vector DNN Refinement with Full-Length Recordings for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]], [[David Snyder|AUTHOR David Snyder]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Alan McCree|AUTHOR Alan McCree]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-5-4|PAPER Tue-O-5-5-4 — I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences]]</div>|<div class="cpsessionviewpapertitle">I4U Submission to NIST SRE 2018: Leveraging from a Decade of Shared Experiences</div><div class="cpsessionviewpaperauthor">[[Kong Aik Lee|AUTHOR Kong Aik Lee]], [[Ville Hautamäki|AUTHOR Ville Hautamäki]], [[Tomi H. Kinnunen|AUTHOR Tomi H. Kinnunen]], [[Hitoshi Yamamoto|AUTHOR Hitoshi Yamamoto]], [[Koji Okabe|AUTHOR Koji Okabe]], [[Ville Vestman|AUTHOR Ville Vestman]], [[Jing Huang|AUTHOR Jing Huang]], [[Guohong Ding|AUTHOR Guohong Ding]], [[Hanwu Sun|AUTHOR Hanwu Sun]], [[Anthony Larcher|AUTHOR Anthony Larcher]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]], [[Mickael Rouvier|AUTHOR Mickael Rouvier]], [[Pierre-Michel Bousquet|AUTHOR Pierre-Michel Bousquet]], [[Wei Rao|AUTHOR Wei Rao]], [[Qing Wang|AUTHOR Qing Wang]], [[Chunlei Zhang|AUTHOR Chunlei Zhang]], [[Fahimeh Bahmaninezhad|AUTHOR Fahimeh Bahmaninezhad]], [[Héctor Delgado|AUTHOR Héctor Delgado]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193179.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-5-5|PAPER Tue-O-5-5-5 — Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge]]</div>|<div class="cpsessionviewpapertitle">Pindrop Labs’ Submission to the First Multi-Target Speaker Detection and Identification Challenge</div><div class="cpsessionviewpaperauthor">[[Elie Khoury|AUTHOR Elie Khoury]], [[Khaled Lakhdhar|AUTHOR Khaled Lakhdhar]], [[Andrew Vaughan|AUTHOR Andrew Vaughan]], [[Ganesh Sivaraman|AUTHOR Ganesh Sivaraman]], [[Parav Nagarsheth|AUTHOR Parav Nagarsheth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-O-5-5-6|PAPER Tue-O-5-5-6 — Speaker Recognition Benchmark Using the CHiME-5 Corpus]]</div>|<div class="cpsessionviewpapertitle">Speaker Recognition Benchmark Using the CHiME-5 Corpus</div><div class="cpsessionviewpaperauthor">[[Daniel Garcia-Romero|AUTHOR Daniel Garcia-Romero]], [[David Snyder|AUTHOR David Snyder]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Alan McCree|AUTHOR Alan McCree]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Cassia Valentini-Botinhao|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-1|PAPER Tue-P-3-A-1 — Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems]]</div>|<div class="cpsessionviewpapertitle">Investigating the Effects of Noisy and Reverberant Speech in Text-to-Speech Systems</div><div class="cpsessionviewpaperauthor">[[David Ayllón|AUTHOR David Ayllón]], [[Héctor A. Sánchez-Hevia|AUTHOR Héctor A. Sánchez-Hevia]], [[Carol Figueroa|AUTHOR Carol Figueroa]], [[Pierre Lanchantin|AUTHOR Pierre Lanchantin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-2|PAPER Tue-P-3-A-2 — Selection and Training Schemes for Improving TTS Voice Built on Found Data]]</div>|<div class="cpsessionviewpapertitle">Selection and Training Schemes for Improving TTS Voice Built on Found Data</div><div class="cpsessionviewpaperauthor">[[F.-Y. Kuo|AUTHOR F.-Y. Kuo]], [[I.C. Ouyang|AUTHOR I.C. Ouyang]], [[S. Aryal|AUTHOR S. Aryal]], [[Pierre Lanchantin|AUTHOR Pierre Lanchantin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192448.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-3|PAPER Tue-P-3-A-3 — All Together Now: The Living Audio Dataset]]</div>|<div class="cpsessionviewpapertitle">All Together Now: The Living Audio Dataset</div><div class="cpsessionviewpaperauthor">[[David A. Braude|AUTHOR David A. Braude]], [[Matthew P. Aylett|AUTHOR Matthew P. Aylett]], [[Caoimhín Laoide-Kemp|AUTHOR Caoimhín Laoide-Kemp]], [[Simone Ashby|AUTHOR Simone Ashby]], [[Kristen M. Scott|AUTHOR Kristen M. Scott]], [[Brian Ó Raghallaigh|AUTHOR Brian Ó Raghallaigh]], [[Anna Braudo|AUTHOR Anna Braudo]], [[Alex Brouwer|AUTHOR Alex Brouwer]], [[Adriana Stan|AUTHOR Adriana Stan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192441.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-4|PAPER Tue-P-3-A-4 — LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech]]</div>|<div class="cpsessionviewpapertitle">LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech</div><div class="cpsessionviewpaperauthor">[[Heiga Zen|AUTHOR Heiga Zen]], [[Viet Dang|AUTHOR Viet Dang]], [[Rob Clark|AUTHOR Rob Clark]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Ye Jia|AUTHOR Ye Jia]], [[Zhifeng Chen|AUTHOR Zhifeng Chen]], [[Yonghui Wu|AUTHOR Yonghui Wu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192190.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-5|PAPER Tue-P-3-A-5 — Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis]]</div>|<div class="cpsessionviewpapertitle">Corpus Design Using Convolutional Auto-Encoder Embeddings for Audio-Book Synthesis</div><div class="cpsessionviewpaperauthor">[[Meysam Shamsi|AUTHOR Meysam Shamsi]], [[Damien Lolive|AUTHOR Damien Lolive]], [[Nelly Barbot|AUTHOR Nelly Barbot]], [[Jonathan Chevelu|AUTHOR Jonathan Chevelu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192188.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-6|PAPER Tue-P-3-A-6 — Evaluating Intention Communication by TTS Using Explicit Definitions of Illocutionary Act Performance]]</div>|<div class="cpsessionviewpapertitle">Evaluating Intention Communication by TTS Using Explicit Definitions of Illocutionary Act Performance</div><div class="cpsessionviewpaperauthor">[[Nobukatsu Hojo|AUTHOR Nobukatsu Hojo]], [[Noboru Miyazaki|AUTHOR Noboru Miyazaki]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192003.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-7|PAPER Tue-P-3-A-7 — MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">MOSNet: Deep Learning-Based Objective Assessment for Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Chen-Chou Lo|AUTHOR Chen-Chou Lo]], [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]], [[Wen-Chin Huang|AUTHOR Wen-Chin Huang]], [[Xin Wang|AUTHOR Xin Wang]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191824.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-8|PAPER Tue-P-3-A-8 — Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data]]</div>|<div class="cpsessionviewpapertitle">Investigating the Robustness of Sequence-to-Sequence Text-to-Speech Models to Imperfectly-Transcribed Training Data</div><div class="cpsessionviewpaperauthor">[[Jason Fong|AUTHOR Jason Fong]], [[Pilar Oplustil Gallegos|AUTHOR Pilar Oplustil Gallegos]], [[Zack Hodari|AUTHOR Zack Hodari]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-9|PAPER Tue-P-3-A-9 — Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise]]</div>|<div class="cpsessionviewpapertitle">Using Pupil Dilation to Measure Cognitive Load When Listening to Text-to-Speech in Quiet and in Noise</div><div class="cpsessionviewpaperauthor">[[Avashna Govender|AUTHOR Avashna Govender]], [[Anita E. Wagner|AUTHOR Anita E. Wagner]], [[Simon King|AUTHOR Simon King]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-10|PAPER Tue-P-3-A-10 — A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research]]</div>|<div class="cpsessionviewpapertitle">A Multimodal Real-Time MRI Articulatory Corpus of French for Speech Research</div><div class="cpsessionviewpaperauthor">[[Ioannis K. Douros|AUTHOR Ioannis K. Douros]], [[Jacques Felblinger|AUTHOR Jacques Felblinger]], [[Jens Frahm|AUTHOR Jens Frahm]], [[Karyna Isaieva|AUTHOR Karyna Isaieva]], [[Arun A. Joseph|AUTHOR Arun A. Joseph]], [[Yves Laprie|AUTHOR Yves Laprie]], [[Freddy Odille|AUTHOR Freddy Odille]], [[Anastasiia Tsukanova|AUTHOR Anastasiia Tsukanova]], [[Dirk Voit|AUTHOR Dirk Voit]], [[Pierre-André Vuissoz|AUTHOR Pierre-André Vuissoz]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191614.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-11|PAPER Tue-P-3-A-11 — A Chinese Dataset for Identifying Speakers in Novels]]</div>|<div class="cpsessionviewpapertitle">A Chinese Dataset for Identifying Speakers in Novels</div><div class="cpsessionviewpaperauthor">[[Jia-Xiang Chen|AUTHOR Jia-Xiang Chen]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191500.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-A-12|PAPER Tue-P-3-A-12 — CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages]]</div>|<div class="cpsessionviewpapertitle">CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages</div><div class="cpsessionviewpaperauthor">[[Kyubyong Park|AUTHOR Kyubyong Park]], [[Thomas Mulc|AUTHOR Thomas Mulc]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Alberto Abad|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193020.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-1|PAPER Tue-P-3-B-1 — Attention Model for Articulatory Features Detection]]</div>|<div class="cpsessionviewpapertitle">Attention Model for Articulatory Features Detection</div><div class="cpsessionviewpaperauthor">[[Ievgen Karaulov|AUTHOR Ievgen Karaulov]], [[Dmytro Tkanov|AUTHOR Dmytro Tkanov]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192678.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-2|PAPER Tue-P-3-B-2 — Unbiased Semi-Supervised LF-MMI Training Using Dropout]]</div>|<div class="cpsessionviewpapertitle">Unbiased Semi-Supervised LF-MMI Training Using Dropout</div><div class="cpsessionviewpaperauthor">[[Sibo Tong|AUTHOR Sibo Tong]], [[Apoorv Vyas|AUTHOR Apoorv Vyas]], [[Philip N. Garner|AUTHOR Philip N. Garner]], [[Hervé Bourlard|AUTHOR Hervé Bourlard]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-3|PAPER Tue-P-3-B-3 — Acoustic Model Optimization Based on Evolutionary Stochastic Gradient Descent with Anchors for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Acoustic Model Optimization Based on Evolutionary Stochastic Gradient Descent with Anchors for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[Michael Picheny|AUTHOR Michael Picheny]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192608.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-4|PAPER Tue-P-3-B-4 — Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Whether to Pretrain DNN or not?: An Empirical Analysis for Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Nirmesh J. Shah|AUTHOR Nirmesh J. Shah]], [[Hardik B. Sailor|AUTHOR Hardik B. Sailor]], [[Hemant A. Patil|AUTHOR Hemant A. Patil]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192587.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-5|PAPER Tue-P-3-B-5 — Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Detection of Glottal Closure Instants from Raw Speech Using Convolutional Neural Networks</div><div class="cpsessionviewpaperauthor">[[Mohit Goyal|AUTHOR Mohit Goyal]], [[Varun Srivastava|AUTHOR Varun Srivastava]], [[Prathosh A. P.|AUTHOR Prathosh A. P.]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192533.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-6|PAPER Tue-P-3-B-6 — Lattice-Based Lightly-Supervised Acoustic Model Training]]</div>|<div class="cpsessionviewpapertitle">Lattice-Based Lightly-Supervised Acoustic Model Training</div><div class="cpsessionviewpaperauthor">[[Joachim Fainberg|AUTHOR Joachim Fainberg]], [[Ondřej Klejch|AUTHOR Ondřej Klejch]], [[Steve Renals|AUTHOR Steve Renals]], [[Peter Bell|AUTHOR Peter Bell]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192254.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-7|PAPER Tue-P-3-B-7 — Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR]]</div>|<div class="cpsessionviewpapertitle">Comparison of Lattice-Free and Lattice-Based Sequence Discriminative Training Criteria for LVCSR</div><div class="cpsessionviewpaperauthor">[[Wilfried Michel|AUTHOR Wilfried Michel]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192111.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-8|PAPER Tue-P-3-B-8 — End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders]]</div>|<div class="cpsessionviewpapertitle">End-to-End Automatic Speech Recognition with a Reconstruction Criterion Using Speech-to-Text and Text-to-Speech Encoder-Decoders</div><div class="cpsessionviewpaperauthor">[[Ryo Masumura|AUTHOR Ryo Masumura]], [[Hiroshi Sato|AUTHOR Hiroshi Sato]], [[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Yusuke Ijima|AUTHOR Yusuke Ijima]], [[Takanobu Oba|AUTHOR Takanobu Oba]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191975.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-9|PAPER Tue-P-3-B-9 — Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning]]</div>|<div class="cpsessionviewpapertitle">Char+CV-CTC: Combining Graphemes and Consonant/Vowel Units for CTC-Based ASR Using Multitask Learning</div><div class="cpsessionviewpaperauthor">[[Abdelwahab Heba|AUTHOR Abdelwahab Heba]], [[Thomas Pellegrini|AUTHOR Thomas Pellegrini]], [[Jean-Pierre Lorré|AUTHOR Jean-Pierre Lorré]], [[Régine Andre-Obrecht|AUTHOR Régine Andre-Obrecht]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191952.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-10|PAPER Tue-P-3-B-10 — Guiding CTC Posterior Spike Timings for Improved Posterior Fusion and Knowledge Distillation]]</div>|<div class="cpsessionviewpapertitle">Guiding CTC Posterior Spike Timings for Improved Posterior Fusion and Knowledge Distillation</div><div class="cpsessionviewpaperauthor">[[Gakuto Kurata|AUTHOR Gakuto Kurata]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191930.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-11|PAPER Tue-P-3-B-11 — Direct Neuron-Wise Fusion of Cognate Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Direct Neuron-Wise Fusion of Cognate Neural Networks</div><div class="cpsessionviewpaperauthor">[[Takashi Fukuda|AUTHOR Takashi Fukuda]], [[Masayuki Suzuki|AUTHOR Masayuki Suzuki]], [[Gakuto Kurata|AUTHOR Gakuto Kurata]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191859.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-12|PAPER Tue-P-3-B-12 — Two Tiered Distributed Training Algorithm for Acoustic Modeling]]</div>|<div class="cpsessionviewpapertitle">Two Tiered Distributed Training Algorithm for Acoustic Modeling</div><div class="cpsessionviewpaperauthor">[[Pranav Ladkat|AUTHOR Pranav Ladkat]], [[Oleg Rybakov|AUTHOR Oleg Rybakov]], [[Radhika Arava|AUTHOR Radhika Arava]], [[Sree Hari Krishnan Parthasarathi|AUTHOR Sree Hari Krishnan Parthasarathi]], [[I-Fan Chen|AUTHOR I-Fan Chen]], [[Nikko Strom|AUTHOR Nikko Strom]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191717.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-13|PAPER Tue-P-3-B-13 — Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR]]</div>|<div class="cpsessionviewpapertitle">Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR</div><div class="cpsessionviewpaperauthor">[[Pin-Tuan Huang|AUTHOR Pin-Tuan Huang]], [[Hung-Shin Lee|AUTHOR Hung-Shin Lee]], [[Syu-Siang Wang|AUTHOR Syu-Siang Wang]], [[Kuan-Yu Chen|AUTHOR Kuan-Yu Chen]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-14|PAPER Tue-P-3-B-14 — Multi-Task CTC Training with Auxiliary Feature Reconstruction for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Task CTC Training with Auxiliary Feature Reconstruction for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Gakuto Kurata|AUTHOR Gakuto Kurata]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191117.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-B-15|PAPER Tue-P-3-B-15 — Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results]]</div>|<div class="cpsessionviewpapertitle">Framewise Supervised Training Towards End-to-End Speech Recognition Models: First Results</div><div class="cpsessionviewpaperauthor">[[Mohan Li|AUTHOR Mohan Li]], [[Yuanjiang Cao|AUTHOR Yuanjiang Cao]], [[Weicong Zhou|AUTHOR Weicong Zhou]], [[Min Liu|AUTHOR Min Liu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Emily Mower Provost|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-1|PAPER Tue-P-3-C-1 — Deep Hierarchical Fusion with Application in Sentiment Analysis]]</div>|<div class="cpsessionviewpapertitle">Deep Hierarchical Fusion with Application in Sentiment Analysis</div><div class="cpsessionviewpaperauthor">[[Efthymios Georgiou|AUTHOR Efthymios Georgiou]], [[Charilaos Papaioannou|AUTHOR Charilaos Papaioannou]], [[Alexandros Potamianos|AUTHOR Alexandros Potamianos]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-2|PAPER Tue-P-3-C-2 — Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice]]</div>|<div class="cpsessionviewpapertitle">Leveraging Acoustic Cues and Paralinguistic Embeddings to Detect Expression from Voice</div><div class="cpsessionviewpaperauthor">[[Vikramjit Mitra|AUTHOR Vikramjit Mitra]], [[Sue Booker|AUTHOR Sue Booker]], [[Erik Marchi|AUTHOR Erik Marchi]], [[David Scott Farrar|AUTHOR David Scott Farrar]], [[Ute Dorothea Peitz|AUTHOR Ute Dorothea Peitz]], [[Bridget Cheng|AUTHOR Bridget Cheng]], [[Ermine Teves|AUTHOR Ermine Teves]], [[Anuj Mehta|AUTHOR Anuj Mehta]], [[Devang Naik|AUTHOR Devang Naik]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192753.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-3|PAPER Tue-P-3-C-3 — Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Analysis of Deep Learning Architectures for Cross-Corpus Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Jack Parry|AUTHOR Jack Parry]], [[Dimitri Palaz|AUTHOR Dimitri Palaz]], [[Georgia Clarke|AUTHOR Georgia Clarke]], [[Pauline Lecomte|AUTHOR Pauline Lecomte]], [[Rebecca Mead|AUTHOR Rebecca Mead]], [[Michael Berger|AUTHOR Michael Berger]], [[Gregor Hofer|AUTHOR Gregor Hofer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192624.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-4|PAPER Tue-P-3-C-4 — A Path Signature Approach for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">A Path Signature Approach for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Bo Wang|AUTHOR Bo Wang]], [[Maria Liakata|AUTHOR Maria Liakata]], [[Hao Ni|AUTHOR Hao Ni]], [[Terry Lyons|AUTHOR Terry Lyons]], [[Alejo J. Nevado-Holgado|AUTHOR Alejo J. Nevado-Holgado]], [[Kate Saunders|AUTHOR Kate Saunders]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192502.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-5|PAPER Tue-P-3-C-5 — Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts]]</div>|<div class="cpsessionviewpapertitle">Employing Bottleneck and Convolutional Features for Speech-Based Physical Load Detection on Limited Data Amounts</div><div class="cpsessionviewpaperauthor">[[Olga Egorow|AUTHOR Olga Egorow]], [[Tarik Mrech|AUTHOR Tarik Mrech]], [[Norman Weißkirchen|AUTHOR Norman Weißkirchen]], [[Andreas Wendemuth|AUTHOR Andreas Wendemuth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-6|PAPER Tue-P-3-C-6 — Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling]]</div>|<div class="cpsessionviewpapertitle">Speech Emotion Recognition in Dyadic Dialogues with Attentive Interaction Modeling</div><div class="cpsessionviewpaperauthor">[[Jinming Zhao|AUTHOR Jinming Zhao]], [[Shizhe Chen|AUTHOR Shizhe Chen]], [[Jingjun Liang|AUTHOR Jingjun Liang]], [[Qin Jin|AUTHOR Qin Jin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-7|PAPER Tue-P-3-C-7 — Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task]]</div>|<div class="cpsessionviewpapertitle">Predicting Group Performances Using a Personality Composite-Network Architecture During Collaborative Task</div><div class="cpsessionviewpaperauthor">[[Shun-Chang Zhong|AUTHOR Shun-Chang Zhong]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chun-Min Chang|AUTHOR Chun-Min Chang]], [[Yi-Ching Liu|AUTHOR Yi-Ching Liu]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-8|PAPER Tue-P-3-C-8 — Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning]]</div>|<div class="cpsessionviewpapertitle">Enforcing Semantic Consistency for Cross Corpus Valence Regression from Speech Using Adversarial Discrepancy Learning</div><div class="cpsessionviewpaperauthor">[[Gao-Yi Chao|AUTHOR Gao-Yi Chao]], [[Yun-Shao Lin|AUTHOR Yun-Shao Lin]], [[Chun-Min Chang|AUTHOR Chun-Min Chang]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191968.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-9|PAPER Tue-P-3-C-9 — Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Deep Learning of Segment-Level Feature Representation with Multiple Instance Learning for Utterance-Level Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Shuiyang Mao|AUTHOR Shuiyang Mao]], [[P.C. Ching|AUTHOR P.C. Ching]], [[Tan Lee|AUTHOR Tan Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-10|PAPER Tue-P-3-C-10 — Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Towards Robust Speech Emotion Recognition Using Deep Residual Networks for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Andreas Triantafyllopoulos|AUTHOR Andreas Triantafyllopoulos]], [[Gil Keren|AUTHOR Gil Keren]], [[Johannes Wagner|AUTHOR Johannes Wagner]], [[Ingmar Steiner|AUTHOR Ingmar Steiner]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191683.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-11|PAPER Tue-P-3-C-11 — Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Towards Discriminative Representations and Unbiased Predictions: Class-Specific Angular Softmax for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Zhixuan Li|AUTHOR Zhixuan Li]], [[Liang He|AUTHOR Liang He]], [[Jingyang Li|AUTHOR Jingyang Li]], [[Li Wang|AUTHOR Li Wang]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-C-12|PAPER Tue-P-3-C-12 — Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Learning Temporal Clusters Using Capsule Routing for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Md. Asif Jalal|AUTHOR Md. Asif Jalal]], [[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Roger K. Moore|AUTHOR Roger K. Moore]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Michele Gubian|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192934.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-1|PAPER Tue-P-3-D-1 — L2 Pronunciation Accuracy and Context: A Pilot Study on the Realization of Geminates in Italian as L2 by French Learners]]</div>|<div class="cpsessionviewpapertitle">L2 Pronunciation Accuracy and Context: A Pilot Study on the Realization of Geminates in Italian as L2 by French Learners</div><div class="cpsessionviewpaperauthor">[[Sonia d’Apolito|AUTHOR Sonia d’Apolito]], [[Barbara Gili Fivela|AUTHOR Barbara Gili Fivela]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192866.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-2|PAPER Tue-P-3-D-2 — The Monophthongs of Formal Nigerian English: An Acoustic Analysis]]</div>|<div class="cpsessionviewpapertitle">The Monophthongs of Formal Nigerian English: An Acoustic Analysis</div><div class="cpsessionviewpaperauthor">[[Nisad Jamakovic|AUTHOR Nisad Jamakovic]], [[Robert Fuchs|AUTHOR Robert Fuchs]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-3|PAPER Tue-P-3-D-3 — Quantifying Fundamental Frequency Modulation as a Function of Language, Speaking Style and Speaker]]</div>|<div class="cpsessionviewpapertitle">Quantifying Fundamental Frequency Modulation as a Function of Language, Speaking Style and Speaker</div><div class="cpsessionviewpaperauthor">[[Pablo Arantes|AUTHOR Pablo Arantes]], [[Anders Eriksson|AUTHOR Anders Eriksson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192529.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-4|PAPER Tue-P-3-D-4 — The Voicing Contrast in Stops and Affricates in the Western Armenian of Lebanon]]</div>|<div class="cpsessionviewpapertitle">The Voicing Contrast in Stops and Affricates in the Western Armenian of Lebanon</div><div class="cpsessionviewpaperauthor">[[Niamh E. Kelly|AUTHOR Niamh E. Kelly]], [[Lara Keshishian|AUTHOR Lara Keshishian]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192329.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-5|PAPER Tue-P-3-D-5 — “ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora]]</div>|<div class="cpsessionviewpapertitle">“ Gra[f] e!” Word-Final Devoicing of Obstruents in Standard French: An Acoustic Study Based on Large Corpora</div><div class="cpsessionviewpaperauthor">[[Adèle Jatteau|AUTHOR Adèle Jatteau]], [[Ioana Vasilescu|AUTHOR Ioana Vasilescu]], [[Lori Lamel|AUTHOR Lori Lamel]], [[Martine Adda-Decker|AUTHOR Martine Adda-Decker]], [[Nicolas Audibert|AUTHOR Nicolas Audibert]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-6|PAPER Tue-P-3-D-6 — Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game]]</div>|<div class="cpsessionviewpapertitle">Acoustic Indicators of Deception in Mandarin Daily Conversations Recorded from an Interactive Game</div><div class="cpsessionviewpaperauthor">[[Chih-Hsiang Huang|AUTHOR Chih-Hsiang Huang]], [[Huang-Cheng Chou|AUTHOR Huang-Cheng Chou]], [[Yi-Tong Wu|AUTHOR Yi-Tong Wu]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]], [[Yi-Wen Liu|AUTHOR Yi-Wen Liu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-7|PAPER Tue-P-3-D-7 — Prosodic Effects on Plosive Duration in German and Austrian German]]</div>|<div class="cpsessionviewpapertitle">Prosodic Effects on Plosive Duration in German and Austrian German</div><div class="cpsessionviewpaperauthor">[[Barbara Schuppler|AUTHOR Barbara Schuppler]], [[Margaret Zellers|AUTHOR Margaret Zellers]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192184.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-8|PAPER Tue-P-3-D-8 — Cross-Lingual Consistency of Phonological Features: An Empirical Study]]</div>|<div class="cpsessionviewpapertitle">Cross-Lingual Consistency of Phonological Features: An Empirical Study</div><div class="cpsessionviewpaperauthor">[[Cibu Johny|AUTHOR Cibu Johny]], [[Alexander Gutkin|AUTHOR Alexander Gutkin]], [[Martin Jansche|AUTHOR Martin Jansche]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-9|PAPER Tue-P-3-D-9 — Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications]]</div>|<div class="cpsessionviewpapertitle">Are IP Initial Vowels Acoustically More Distinct? Results from LDA and CNN Classifications</div><div class="cpsessionviewpaperauthor">[[Fanny Guitard-Ivent|AUTHOR Fanny Guitard-Ivent]], [[Gabriele Chignoli|AUTHOR Gabriele Chignoli]], [[Cécile Fougeron|AUTHOR Cécile Fougeron]], [[Laurianne Georgeton|AUTHOR Laurianne Georgeton]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192102.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-10|PAPER Tue-P-3-D-10 — Neural Network-Based Modeling of Phonetic Durations]]</div>|<div class="cpsessionviewpapertitle">Neural Network-Based Modeling of Phonetic Durations</div><div class="cpsessionviewpaperauthor">[[Xizi Wei|AUTHOR Xizi Wei]], [[Melvyn Hunt|AUTHOR Melvyn Hunt]], [[Adrian Skilling|AUTHOR Adrian Skilling]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191806.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-11|PAPER Tue-P-3-D-11 — An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence]]</div>|<div class="cpsessionviewpapertitle">An Acoustic Study of Vowel Undershoot in a System with Several Degrees of Prominence</div><div class="cpsessionviewpaperauthor">[[Janina Mołczanow|AUTHOR Janina Mołczanow]], [[Beata Łukaszewicz|AUTHOR Beata Łukaszewicz]], [[Anna Łukaszewicz|AUTHOR Anna Łukaszewicz]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191664.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-12|PAPER Tue-P-3-D-12 — A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes]]</div>|<div class="cpsessionviewpapertitle">A Preliminary Study of Charismatic Speech on YouTube: Correlating Prosodic Variation with Counts of Subscribers, Views and Likes</div><div class="cpsessionviewpaperauthor">[[Stephanie Berger|AUTHOR Stephanie Berger]], [[Oliver Niebuhr|AUTHOR Oliver Niebuhr]], [[Margaret Zellers|AUTHOR Margaret Zellers]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191412.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-13|PAPER Tue-P-3-D-13 — Phonetic Detail Encoding in Explaining the Size of Speech Planning Window]]</div>|<div class="cpsessionviewpapertitle">Phonetic Detail Encoding in Explaining the Size of Speech Planning Window</div><div class="cpsessionviewpaperauthor">[[Shan Luo|AUTHOR Shan Luo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191189.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-14|PAPER Tue-P-3-D-14 — Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic]]</div>|<div class="cpsessionviewpapertitle">Acoustic Cues to Topic and Narrow Focus in Egyptian Arabic</div><div class="cpsessionviewpaperauthor">[[Dina El Zarka|AUTHOR Dina El Zarka]], [[Barbara Schuppler|AUTHOR Barbara Schuppler]], [[Francesco Cangemi|AUTHOR Francesco Cangemi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192196.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-D-15|PAPER Tue-P-3-D-15 — Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female]]</div>|<div class="cpsessionviewpapertitle">Acoustic and Articulatory Study of Ewe Vowels: A Comparative Study of Male and Female</div><div class="cpsessionviewpaperauthor">[[Kowovi Comivi Alowonou|AUTHOR Kowovi Comivi Alowonou]], [[Jianguo Wei|AUTHOR Jianguo Wei]], [[Wenhuan Lu|AUTHOR Wenhuan Lu]], [[Zhicheng Liu|AUTHOR Zhicheng Liu]], [[Kiyoshi Honda|AUTHOR Kiyoshi Honda]], [[Jianwu Dang|AUTHOR Jianwu Dang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Mahadeva Prasanna|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192712.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-1|PAPER Tue-P-3-E-1 — Speech Augmentation via Speaker-Specific Noise in Unseen Environment]]</div>|<div class="cpsessionviewpapertitle">Speech Augmentation via Speaker-Specific Noise in Unseen Environment</div><div class="cpsessionviewpaperauthor">[[Ya’nan Guo|AUTHOR Ya’nan Guo]], [[Ziping Zhao|AUTHOR Ziping Zhao]], [[Yide Ma|AUTHOR Yide Ma]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191567.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-2|PAPER Tue-P-3-E-2 — UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition]]</div>|<div class="cpsessionviewpapertitle">UNetGAN: A Robust Speech Enhancement Approach in Time Domain for Extremely Low Signal-to-Noise Ratio Condition</div><div class="cpsessionviewpaperauthor">[[Xiang Hao|AUTHOR Xiang Hao]], [[Xiangdong Su|AUTHOR Xiangdong Su]], [[Zhiyu Wang|AUTHOR Zhiyu Wang]], [[Hui Zhang|AUTHOR Hui Zhang]], [[Batushiren|AUTHOR Batushiren]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-3|PAPER Tue-P-3-E-3 — Towards Generalized Speech Enhancement with Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Towards Generalized Speech Enhancement with Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Santiago Pascual|AUTHOR Santiago Pascual]], [[Joan Serrà|AUTHOR Joan Serrà]], [[Antonio Bonafonte|AUTHOR Antonio Bonafonte]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192472.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-4|PAPER Tue-P-3-E-4 — A Convolutional Neural Network with Non-Local Module for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Convolutional Neural Network with Non-Local Module for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Xiaoqi Li|AUTHOR Xiaoqi Li]], [[Yaxing Li|AUTHOR Yaxing Li]], [[Meng Li|AUTHOR Meng Li]], [[Shan Xu|AUTHOR Shan Xu]], [[Yuanjie Dong|AUTHOR Yuanjie Dong]], [[Xinrong Sun|AUTHOR Xinrong Sun]], [[Shengwu Xiong|AUTHOR Shengwu Xiong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-5|PAPER Tue-P-3-E-5 — IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network]]</div>|<div class="cpsessionviewpapertitle">IA-NET: Acceleration and Compression of Speech Enhancement Using Integer-Adder Deep Neural Network</div><div class="cpsessionviewpaperauthor">[[Yu-Chen Lin|AUTHOR Yu-Chen Lin]], [[Yi-Te Hsu|AUTHOR Yi-Te Hsu]], [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Tei-Wei Kuo|AUTHOR Tei-Wei Kuo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192426.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-6|PAPER Tue-P-3-E-6 — KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">KL-Divergence Regularized Deep Neural Network Adaptation for Low-Resource Speaker-Dependent Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Li Chai|AUTHOR Li Chai]], [[Jun Du|AUTHOR Jun Du]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191745.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-7|PAPER Tue-P-3-E-7 — Speech Enhancement with Wide Residual Networks in Reverberant Environments]]</div>|<div class="cpsessionviewpapertitle">Speech Enhancement with Wide Residual Networks in Reverberant Environments</div><div class="cpsessionviewpaperauthor">[[Jorge Llombart|AUTHOR Jorge Llombart]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Luis Vicente|AUTHOR Luis Vicente]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193087.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-8|PAPER Tue-P-3-E-8 — A Scalable Noisy Speech Dataset and Online Subjective Test Framework]]</div>|<div class="cpsessionviewpapertitle">A Scalable Noisy Speech Dataset and Online Subjective Test Framework</div><div class="cpsessionviewpaperauthor">[[Chandan K.A. Reddy|AUTHOR Chandan K.A. Reddy]], [[Ebrahim Beyrami|AUTHOR Ebrahim Beyrami]], [[Jamie Pool|AUTHOR Jamie Pool]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Sriram Srinivasan|AUTHOR Sriram Srinivasan]], [[Johannes Gehrke|AUTHOR Johannes Gehrke]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192648.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-9|PAPER Tue-P-3-E-9 — Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN]]</div>|<div class="cpsessionviewpapertitle">Speech Enhancement for Noise-Robust Speech Synthesis Using Wasserstein GAN</div><div class="cpsessionviewpaperauthor">[[Nagaraj Adiga|AUTHOR Nagaraj Adiga]], [[Yannis Pantazis|AUTHOR Yannis Pantazis]], [[Vassilis Tsiaras|AUTHOR Vassilis Tsiaras]], [[Yannis Stylianou|AUTHOR Yannis Stylianou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192622.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-10|PAPER Tue-P-3-E-10 — A Non-Causal FFTNet Architecture for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">A Non-Causal FFTNet Architecture for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Muhammed Shifas P.V.|AUTHOR Muhammed Shifas P.V.]], [[Nagaraj Adiga|AUTHOR Nagaraj Adiga]], [[Vassilis Tsiaras|AUTHOR Vassilis Tsiaras]], [[Yannis Stylianou|AUTHOR Yannis Stylianou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191809.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-3-E-11|PAPER Tue-P-3-E-11 — Speech Enhancement with Variance Constrained Autoencoders]]</div>|<div class="cpsessionviewpapertitle">Speech Enhancement with Variance Constrained Autoencoders</div><div class="cpsessionviewpaperauthor">[[D.T. Braithwaite|AUTHOR D.T. Braithwaite]], [[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Febe De Wet|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193186.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-1|PAPER Tue-P-4-B-1 — A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech]]</div>|<div class="cpsessionviewpapertitle">A Deep Learning Approach to Automatic Characterisation of Rhythm in Non-Native English Speech</div><div class="cpsessionviewpaperauthor">[[Konstantinos Kyriakopoulos|AUTHOR Konstantinos Kyriakopoulos]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193067.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-2|PAPER Tue-P-4-B-2 — Language Learning Using Speech to Image Retrieval]]</div>|<div class="cpsessionviewpapertitle">Language Learning Using Speech to Image Retrieval</div><div class="cpsessionviewpaperauthor">[[Danny Merkx|AUTHOR Danny Merkx]], [[Stefan L. Frank|AUTHOR Stefan L. Frank]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-3|PAPER Tue-P-4-B-3 — Using Alexa for Flashcard-Based Learning]]</div>|<div class="cpsessionviewpapertitle">Using Alexa for Flashcard-Based Learning</div><div class="cpsessionviewpaperauthor">[[Lucy Skidmore|AUTHOR Lucy Skidmore]], [[Roger K. Moore|AUTHOR Roger K. Moore]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192301.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-4|PAPER Tue-P-4-B-4 — The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio]]</div>|<div class="cpsessionviewpapertitle">The 2019 Inaugural Fearless Steps Challenge: A Giant Leap for Naturalistic Audio</div><div class="cpsessionviewpaperauthor">[[John H.L. Hansen|AUTHOR John H.L. Hansen]], [[Aditya Joglekar|AUTHOR Aditya Joglekar]], [[Meena Chandra Shekhar|AUTHOR Meena Chandra Shekhar]], [[Vinay Kothapally|AUTHOR Vinay Kothapally]], [[Chengzhu Yu|AUTHOR Chengzhu Yu]], [[Lakshmish Kaushik|AUTHOR Lakshmish Kaushik]], [[Abhijeet Sangwan|AUTHOR Abhijeet Sangwan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192068.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-5|PAPER Tue-P-4-B-5 — Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models]]</div>|<div class="cpsessionviewpapertitle">Completely Unsupervised Phoneme Recognition by a Generative Adversarial Network Harmonized with Iteratively Refined Hidden Markov Models</div><div class="cpsessionviewpaperauthor">[[Kuan-Yu Chen|AUTHOR Kuan-Yu Chen]], [[Che-Ping Tsai|AUTHOR Che-Ping Tsai]], [[Da-Rong Liu|AUTHOR Da-Rong Liu]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]], [[Lin-shan Lee|AUTHOR Lin-shan Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-6|PAPER Tue-P-4-B-6 — Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction]]</div>|<div class="cpsessionviewpapertitle">Analysis of Native Listeners’ Facial Microexpressions While Shadowing Non-Native Speech — Potential of Shadowers’ Facial Expressions for Comprehensibility Prediction</div><div class="cpsessionviewpaperauthor">[[Tasavat Trisitichoke|AUTHOR Tasavat Trisitichoke]], [[Shintaro Ando|AUTHOR Shintaro Ando]], [[Daisuke Saito|AUTHOR Daisuke Saito]], [[Nobuaki Minematsu|AUTHOR Nobuaki Minematsu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191785.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-7|PAPER Tue-P-4-B-7 — Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance]]</div>|<div class="cpsessionviewpapertitle">Transparent Pronunciation Scoring Using Articulatorily Weighted Phoneme Edit Distance</div><div class="cpsessionviewpaperauthor">[[Reima Karhila|AUTHOR Reima Karhila]], [[Anna-Riikka Smolander|AUTHOR Anna-Riikka Smolander]], [[Sari Ylinen|AUTHOR Sari Ylinen]], [[Mikko Kurimo|AUTHOR Mikko Kurimo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-8|PAPER Tue-P-4-B-8 — Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment]]</div>|<div class="cpsessionviewpapertitle">Development of Robust Automated Scoring Models Using Adversarial Input for Oral Proficiency Assessment</div><div class="cpsessionviewpaperauthor">[[Su-Youn Yoon|AUTHOR Su-Youn Yoon]], [[Chong Min Lee|AUTHOR Chong Min Lee]], [[Klaus Zechner|AUTHOR Klaus Zechner]], [[Keelan Evanini|AUTHOR Keelan Evanini]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191706.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-9|PAPER Tue-P-4-B-9 — Impact of ASR Performance on Spoken Grammatical Error Detection]]</div>|<div class="cpsessionviewpapertitle">Impact of ASR Performance on Spoken Grammatical Error Detection</div><div class="cpsessionviewpaperauthor">[[Y. Lu|AUTHOR Y. Lu]], [[Mark J.F. Gales|AUTHOR Mark J.F. Gales]], [[Kate M. Knill|AUTHOR Kate M. Knill]], [[P. Manakul|AUTHOR P. Manakul]], [[L. Wang|AUTHOR L. Wang]], [[Y. Wang|AUTHOR Y. Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-B-10|PAPER Tue-P-4-B-10 — Self-Imitating Feedback Generation Using GAN for Computer-Assisted Pronunciation Training]]</div>|<div class="cpsessionviewpapertitle">Self-Imitating Feedback Generation Using GAN for Computer-Assisted Pronunciation Training</div><div class="cpsessionviewpaperauthor">[[Seung Hee Yang|AUTHOR Seung Hee Yang]], [[Minhwa Chung|AUTHOR Minhwa Chung]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193143.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-1|PAPER Tue-P-4-C-1 — Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog]]</div>|<div class="cpsessionviewpapertitle">Joint Student-Teacher Learning for Audio-Visual Scene-Aware Dialog</div><div class="cpsessionviewpaperauthor">[[Chiori Hori|AUTHOR Chiori Hori]], [[Anoop Cherian|AUTHOR Anoop Cherian]], [[Tim K. Marks|AUTHOR Tim K. Marks]], [[Takaaki Hori|AUTHOR Takaaki Hori]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193079.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-2|PAPER Tue-P-4-C-2 — Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations]]</div>|<div class="cpsessionviewpapertitle">Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations</div><div class="cpsessionviewpaperauthor">[[Karthik Gopalakrishnan|AUTHOR Karthik Gopalakrishnan]], [[Behnam Hedayatnia|AUTHOR Behnam Hedayatnia]], [[Qinlang Chen|AUTHOR Qinlang Chen]], [[Anna Gottardi|AUTHOR Anna Gottardi]], [[Sanjeev Kwatra|AUTHOR Sanjeev Kwatra]], [[Anu Venkatesh|AUTHOR Anu Venkatesh]], [[Raefer Gabriel|AUTHOR Raefer Gabriel]], [[Dilek Hakkani-Tür|AUTHOR Dilek Hakkani-Tür]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193062.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-3|PAPER Tue-P-4-C-3 — Analyzing Verbal and Nonverbal Features for Predicting Group Performance]]</div>|<div class="cpsessionviewpapertitle">Analyzing Verbal and Nonverbal Features for Predicting Group Performance</div><div class="cpsessionviewpaperauthor">[[Uliyana Kubasova|AUTHOR Uliyana Kubasova]], [[Gabriel Murray|AUTHOR Gabriel Murray]], [[McKenzie Braley|AUTHOR McKenzie Braley]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192829.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-4|PAPER Tue-P-4-C-4 — Identifying Therapist and Client Personae for Therapeutic Alliance Estimation]]</div>|<div class="cpsessionviewpapertitle">Identifying Therapist and Client Personae for Therapeutic Alliance Estimation</div><div class="cpsessionviewpaperauthor">[[Victor R. Martinez|AUTHOR Victor R. Martinez]], [[Nikolaos Flemotomos|AUTHOR Nikolaos Flemotomos]], [[Victor Ardulov|AUTHOR Victor Ardulov]], [[Krishna Somandepalli|AUTHOR Krishna Somandepalli]], [[Simon B. Goldberg|AUTHOR Simon B. Goldberg]], [[Zac E. Imel|AUTHOR Zac E. Imel]], [[David C. Atkins|AUTHOR David C. Atkins]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192820.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-5|PAPER Tue-P-4-C-5 — Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study]]</div>|<div class="cpsessionviewpapertitle">Do Hesitations Facilitate Processing of Partially Defective System Utterances? An Exploratory Eye Tracking Study</div><div class="cpsessionviewpaperauthor">[[Kristin Haake|AUTHOR Kristin Haake]], [[Sarah Schimke|AUTHOR Sarah Schimke]], [[Simon Betz|AUTHOR Simon Betz]], [[Sina Zarrieß|AUTHOR Sina Zarrieß]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192291.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-6|PAPER Tue-P-4-C-6 — Influence of Contextuality on Prosodic Realization of Information Structure in Chinese Dialogues]]</div>|<div class="cpsessionviewpapertitle">Influence of Contextuality on Prosodic Realization of Information Structure in Chinese Dialogues</div><div class="cpsessionviewpaperauthor">[[Bin Li|AUTHOR Bin Li]], [[Yuan Jia|AUTHOR Yuan Jia]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192163.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-7|PAPER Tue-P-4-C-7 — Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems]]</div>|<div class="cpsessionviewpapertitle">Cross-Lingual Transfer Learning for Affective Spoken Dialogue Systems</div><div class="cpsessionviewpaperauthor">[[Kristijan Gjoreski|AUTHOR Kristijan Gjoreski]], [[Aleksandar Gjoreski|AUTHOR Aleksandar Gjoreski]], [[Ivan Kraljevski|AUTHOR Ivan Kraljevski]], [[Diane Hirschfeld|AUTHOR Diane Hirschfeld]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191886.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-8|PAPER Tue-P-4-C-8 — Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue]]</div>|<div class="cpsessionviewpapertitle">Identifying Personality Traits Using Overlap Dynamics in Multiparty Dialogue</div><div class="cpsessionviewpaperauthor">[[Mingzhi Yu|AUTHOR Mingzhi Yu]], [[Emer Gilmartin|AUTHOR Emer Gilmartin]], [[Diane Litman|AUTHOR Diane Litman]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-9|PAPER Tue-P-4-C-9 — Identifying Mood Episodes Using Dialogue Features from Clinical Interviews]]</div>|<div class="cpsessionviewpapertitle">Identifying Mood Episodes Using Dialogue Features from Clinical Interviews</div><div class="cpsessionviewpaperauthor">[[Zakaria Aldeneh|AUTHOR Zakaria Aldeneh]], [[Mimansa Jaiswal|AUTHOR Mimansa Jaiswal]], [[Michael Picheny|AUTHOR Michael Picheny]], [[Melvin G. McInnis|AUTHOR Melvin G. McInnis]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191786.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-10|PAPER Tue-P-4-C-10 — Do Conversational Partners Entrain on Articulatory Precision?]]</div>|<div class="cpsessionviewpapertitle">Do Conversational Partners Entrain on Articulatory Precision?</div><div class="cpsessionviewpaperauthor">[[Nichola Lubold|AUTHOR Nichola Lubold]], [[Stephanie A. Borrie|AUTHOR Stephanie A. Borrie]], [[Tyson S. Barrett|AUTHOR Tyson S. Barrett]], [[Megan Willi|AUTHOR Megan Willi]], [[Visar Berisha|AUTHOR Visar Berisha]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191577.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-C-11|PAPER Tue-P-4-C-11 — Conversational Emotion Analysis via Attention Mechanisms]]</div>|<div class="cpsessionviewpapertitle">Conversational Emotion Analysis via Attention Mechanisms</div><div class="cpsessionviewpaperauthor">[[Zheng Lian|AUTHOR Zheng Lian]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Bin Liu|AUTHOR Bin Liu]], [[Jian Huang|AUTHOR Jian Huang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Immacolata Sonia d’Apolito|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193042.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-1|PAPER Tue-P-4-D-1 — The Effect of Phoneme Distribution on Perceptual Similarity in English]]</div>|<div class="cpsessionviewpapertitle">The Effect of Phoneme Distribution on Perceptual Similarity in English</div><div class="cpsessionviewpaperauthor">[[Emma O’Neill|AUTHOR Emma O’Neill]], [[Julie Carson-Berndsen|AUTHOR Julie Carson-Berndsen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192984.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-2|PAPER Tue-P-4-D-2 — Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features]]</div>|<div class="cpsessionviewpapertitle">Prosodic Representations of Prominence Classification Neural Networks and Autoencoders Using Bottleneck Features</div><div class="cpsessionviewpaperauthor">[[Sofoklis Kakouros|AUTHOR Sofoklis Kakouros]], [[Antti Suni|AUTHOR Antti Suni]], [[Juraj Šimko|AUTHOR Juraj Šimko]], [[Martti Vainio|AUTHOR Martti Vainio]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192950.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-3|PAPER Tue-P-4-D-3 — Compensation for French Liquid Deletion During Auditory Sentence Processing]]</div>|<div class="cpsessionviewpapertitle">Compensation for French Liquid Deletion During Auditory Sentence Processing</div><div class="cpsessionviewpaperauthor">[[Sharon Peperkamp|AUTHOR Sharon Peperkamp]], [[Alvaro Martin Iturralde Zurita|AUTHOR Alvaro Martin Iturralde Zurita]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192918.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-4|PAPER Tue-P-4-D-4 — Prosodic Factors Influencing Vowel Reduction in Russian]]</div>|<div class="cpsessionviewpapertitle">Prosodic Factors Influencing Vowel Reduction in Russian</div><div class="cpsessionviewpaperauthor">[[Daniil Kocharov|AUTHOR Daniil Kocharov]], [[Tatiana Kachkovskaia|AUTHOR Tatiana Kachkovskaia]], [[Pavel Skrelin|AUTHOR Pavel Skrelin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-5|PAPER Tue-P-4-D-5 — Time to Frequency Domain Mapping of the Voice Source: The Influence of Open Quotient and Glottal Skew on the Low End of the Source Spectrum]]</div>|<div class="cpsessionviewpapertitle">Time to Frequency Domain Mapping of the Voice Source: The Influence of Open Quotient and Glottal Skew on the Low End of the Source Spectrum</div><div class="cpsessionviewpaperauthor">[[Christer Gobl|AUTHOR Christer Gobl]], [[Ailbhe Ní Chasaide|AUTHOR Ailbhe Ní Chasaide]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192684.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-6|PAPER Tue-P-4-D-6 — Testing the Distinctiveness of Intonational Tunes: Evidence from Imitative Productions in American English]]</div>|<div class="cpsessionviewpapertitle">Testing the Distinctiveness of Intonational Tunes: Evidence from Imitative Productions in American English</div><div class="cpsessionviewpaperauthor">[[Eleanor Chodroff|AUTHOR Eleanor Chodroff]], [[Jennifer S. Cole|AUTHOR Jennifer S. Cole]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192507.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-7|PAPER Tue-P-4-D-7 — A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs]]</div>|<div class="cpsessionviewpapertitle">A Study of a Cross-Language Perception Based on Cortical Analysis Using Biomimetic STRFs</div><div class="cpsessionviewpaperauthor">[[Sangwook Park|AUTHOR Sangwook Park]], [[David K. Han|AUTHOR David K. Han]], [[Mounya Elhilali|AUTHOR Mounya Elhilali]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192082.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-8|PAPER Tue-P-4-D-8 — Perceptual Evaluation of Early versus Late F0 Peaks in the Intonation Structure of Czech Question-Word Questions]]</div>|<div class="cpsessionviewpapertitle">Perceptual Evaluation of Early versus Late F0 Peaks in the Intonation Structure of Czech Question-Word Questions</div><div class="cpsessionviewpaperauthor">[[Pavel Šturm|AUTHOR Pavel Šturm]], [[Jan Volín|AUTHOR Jan Volín]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192066.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-9|PAPER Tue-P-4-D-9 — Acoustic Correlates of Phonation Type in Chichimec]]</div>|<div class="cpsessionviewpapertitle">Acoustic Correlates of Phonation Type in Chichimec</div><div class="cpsessionviewpaperauthor">[[Anneliese Kelterer|AUTHOR Anneliese Kelterer]], [[Barbara Schuppler|AUTHOR Barbara Schuppler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191326.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-10|PAPER Tue-P-4-D-10 — F0 Variability Measures Based on Glottal Closure Instants]]</div>|<div class="cpsessionviewpapertitle">F0 Variability Measures Based on Glottal Closure Instants</div><div class="cpsessionviewpaperauthor">[[Yu-Ren Chien|AUTHOR Yu-Ren Chien]], [[Michal Borský|AUTHOR Michal Borský]], [[Jón Guðnason|AUTHOR Jón Guðnason]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-D-11|PAPER Tue-P-4-D-11 — Recognition of Creaky Voice from Emergency Calls]]</div>|<div class="cpsessionviewpapertitle">Recognition of Creaky Voice from Emergency Calls</div><div class="cpsessionviewpaperauthor">[[Lauri Tavi|AUTHOR Lauri Tavi]], [[Tanel Alumäe|AUTHOR Tanel Alumäe]], [[Stefan Werner|AUTHOR Stefan Werner]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Hiroshi Shimodaira|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193267.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-1|PAPER Tue-P-4-E-1 — Direct F0 Estimation with Neural-Network-Based Regression]]</div>|<div class="cpsessionviewpapertitle">Direct F0 Estimation with Neural-Network-Based Regression</div><div class="cpsessionviewpaperauthor">[[Shuzhuang Xu|AUTHOR Shuzhuang Xu]], [[Hiroshi Shimodaira|AUTHOR Hiroshi Shimodaira]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193253.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-2|PAPER Tue-P-4-E-2 — Real Time Online Visual End Point Detection Using Unidirectional LSTM]]</div>|<div class="cpsessionviewpapertitle">Real Time Online Visual End Point Detection Using Unidirectional LSTM</div><div class="cpsessionviewpaperauthor">[[Tanay Sharma|AUTHOR Tanay Sharma]], [[Rohith Chandrashekar Aralikatti|AUTHOR Rohith Chandrashekar Aralikatti]], [[Dilip Kumar Margam|AUTHOR Dilip Kumar Margam]], [[Abhinav Thanda|AUTHOR Abhinav Thanda]], [[Sharad Roy|AUTHOR Sharad Roy]], [[Pujitha Appan Kandala|AUTHOR Pujitha Appan Kandala]], [[Shankar M. Venkatesan|AUTHOR Shankar M. Venkatesan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192815.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-3|PAPER Tue-P-4-E-3 — Fully-Convolutional Network for Pitch Estimation of Speech Signals]]</div>|<div class="cpsessionviewpapertitle">Fully-Convolutional Network for Pitch Estimation of Speech Signals</div><div class="cpsessionviewpaperauthor">[[Luc Ardaillon|AUTHOR Luc Ardaillon]], [[Axel Roebel|AUTHOR Axel Roebel]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192286.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-4|PAPER Tue-P-4-E-4 — Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network]]</div>|<div class="cpsessionviewpapertitle">Vocal Pitch Extraction in Polyphonic Music Using Convolutional Residual Network</div><div class="cpsessionviewpaperauthor">[[Mingye Dong|AUTHOR Mingye Dong]], [[Jie Wu|AUTHOR Jie Wu]], [[Jian Luan|AUTHOR Jian Luan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191928.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-5|PAPER Tue-P-4-E-5 — Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments]]</div>|<div class="cpsessionviewpapertitle">Multi-Level Adaptive Speech Activity Detector for Speech in Naturalistic Environments</div><div class="cpsessionviewpaperauthor">[[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191925.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-6|PAPER Tue-P-4-E-6 — On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music]]</div>|<div class="cpsessionviewpapertitle">On the Importance of Audio-Source Separation for Singer Identification in Polyphonic Music</div><div class="cpsessionviewpaperauthor">[[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191864.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-7|PAPER Tue-P-4-E-7 — Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing]]</div>|<div class="cpsessionviewpapertitle">Investigating the Physiological and Acoustic Contrasts Between Choral and Operatic Singing</div><div class="cpsessionviewpaperauthor">[[Hiroko Terasawa|AUTHOR Hiroko Terasawa]], [[Kenta Wakasa|AUTHOR Kenta Wakasa]], [[Hideki Kawahara|AUTHOR Hideki Kawahara]], [[Ken-Ichi Sakakibara|AUTHOR Ken-Ichi Sakakibara]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191776.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-8|PAPER Tue-P-4-E-8 — Optimizing Voice Activity Detection for Noisy Conditions]]</div>|<div class="cpsessionviewpapertitle">Optimizing Voice Activity Detection for Noisy Conditions</div><div class="cpsessionviewpaperauthor">[[Ruixi Lin|AUTHOR Ruixi Lin]], [[Charles Costello|AUTHOR Charles Costello]], [[Charles Jankowski|AUTHOR Charles Jankowski]], [[Vishwas Mruthyunjaya|AUTHOR Vishwas Mruthyunjaya]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191662.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-9|PAPER Tue-P-4-E-9 — Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network]]</div>|<div class="cpsessionviewpapertitle">Small-Footprint Magic Word Detection Method Using Convolutional LSTM Neural Network</div><div class="cpsessionviewpaperauthor">[[Taiki Yamamoto|AUTHOR Taiki Yamamoto]], [[Ryota Nishimura|AUTHOR Ryota Nishimura]], [[Masayuki Misaki|AUTHOR Masayuki Misaki]], [[Norihide Kitaoka|AUTHOR Norihide Kitaoka]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191520.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-10|PAPER Tue-P-4-E-10 — Acoustic Modeling for Automatic Lyrics-to-Audio Alignment]]</div>|<div class="cpsessionviewpapertitle">Acoustic Modeling for Automatic Lyrics-to-Audio Alignment</div><div class="cpsessionviewpaperauthor">[[Chitralekha Gupta|AUTHOR Chitralekha Gupta]], [[Emre Yılmaz|AUTHOR Emre Yılmaz]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191354.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-11|PAPER Tue-P-4-E-11 — Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection]]</div>|<div class="cpsessionviewpapertitle">Two-Dimensional Convolutional Recurrent Neural Networks for Speech Activity Detection</div><div class="cpsessionviewpaperauthor">[[Anastasios Vafeiadis|AUTHOR Anastasios Vafeiadis]], [[Eleftherios Fanioudakis|AUTHOR Eleftherios Fanioudakis]], [[Ilyas Potamitis|AUTHOR Ilyas Potamitis]], [[Konstantinos Votis|AUTHOR Konstantinos Votis]], [[Dimitrios Giakoumis|AUTHOR Dimitrios Giakoumis]], [[Dimitrios Tzovaras|AUTHOR Dimitrios Tzovaras]], [[Liming Chen|AUTHOR Liming Chen]], [[Raouf Hamzaoui|AUTHOR Raouf Hamzaoui]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191153.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-4-E-12|PAPER Tue-P-4-E-12 — A Study of Soprano Singing in Light of the Source-Filter Interaction]]</div>|<div class="cpsessionviewpapertitle">A Study of Soprano Singing in Light of the Source-Filter Interaction</div><div class="cpsessionviewpaperauthor">[[Tokihiko Kaburagi|AUTHOR Tokihiko Kaburagi]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Erica Cooper|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193233.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-1|PAPER Tue-P-5-A-1 — Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring]]</div>|<div class="cpsessionviewpapertitle">Boosting Character-Based Chinese Speech Synthesis via Multi-Task Learning and Dictionary Tutoring</div><div class="cpsessionviewpaperauthor">[[Yuxiang Zou|AUTHOR Yuxiang Zou]], [[Linhao Dong|AUTHOR Linhao Dong]], [[Bo Xu|AUTHOR Bo Xu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193191.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-2|PAPER Tue-P-5-A-2 — Building a Mixed-Lingual Neural TTS System with Only Monolingual Data]]</div>|<div class="cpsessionviewpapertitle">Building a Mixed-Lingual Neural TTS System with Only Monolingual Data</div><div class="cpsessionviewpaperauthor">[[Liumeng Xue|AUTHOR Liumeng Xue]], [[Wei Song|AUTHOR Wei Song]], [[Guanghui Xu|AUTHOR Guanghui Xu]], [[Lei Xie|AUTHOR Lei Xie]], [[Zhizheng Wu|AUTHOR Zhizheng Wu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193176.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-3|PAPER Tue-P-5-A-3 — Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion]]</div>|<div class="cpsessionviewpapertitle">Neural Machine Translation for Multilingual Grapheme-to-Phoneme Conversion</div><div class="cpsessionviewpaperauthor">[[Alex Sokolov|AUTHOR Alex Sokolov]], [[Tracy Rohlin|AUTHOR Tracy Rohlin]], [[Ariya Rastrow|AUTHOR Ariya Rastrow]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-4|PAPER Tue-P-5-A-4 — Analysis of Pronunciation Learning in End-to-End Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Analysis of Pronunciation Learning in End-to-End Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Jason Taylor|AUTHOR Jason Taylor]], [[Korin Richmond|AUTHOR Korin Richmond]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192730.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-5|PAPER Tue-P-5-A-5 — End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning]]</div>|<div class="cpsessionviewpapertitle">End-to-End Text-to-Speech for Low-Resource Languages by Cross-Lingual Transfer Learning</div><div class="cpsessionviewpaperauthor">[[Yuan-Jui Chen|AUTHOR Yuan-Jui Chen]], [[Tao Tu|AUTHOR Tao Tu]], [[Cheng-chieh Yeh|AUTHOR Cheng-chieh Yeh]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-6|PAPER Tue-P-5-A-6 — Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning]]</div>|<div class="cpsessionviewpapertitle">Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning</div><div class="cpsessionviewpaperauthor">[[Yu Zhang|AUTHOR Yu Zhang]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Heiga Zen|AUTHOR Heiga Zen]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Zhifeng Chen|AUTHOR Zhifeng Chen]], [[R.J. Skerry-Ryan|AUTHOR R.J. Skerry-Ryan]], [[Ye Jia|AUTHOR Ye Jia]], [[Andrew Rosenberg|AUTHOR Andrew Rosenberg]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192335.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-7|PAPER Tue-P-5-A-7 — Unified Language-Independent DNN-Based G2P Converter]]</div>|<div class="cpsessionviewpapertitle">Unified Language-Independent DNN-Based G2P Converter</div><div class="cpsessionviewpaperauthor">[[Markéta Jůzová|AUTHOR Markéta Jůzová]], [[Daniel Tihelka|AUTHOR Daniel Tihelka]], [[Jakub Vít|AUTHOR Jakub Vít]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192292.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-8|PAPER Tue-P-5-A-8 — Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT]]</div>|<div class="cpsessionviewpapertitle">Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-Trained BERT</div><div class="cpsessionviewpaperauthor">[[Dongyang Dai|AUTHOR Dongyang Dai]], [[Zhiyong Wu|AUTHOR Zhiyong Wu]], [[Shiyin Kang|AUTHOR Shiyin Kang]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Jia Jia|AUTHOR Jia Jia]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-9|PAPER Tue-P-5-A-9 — Transformer Based Grapheme-to-Phoneme Conversion]]</div>|<div class="cpsessionviewpapertitle">Transformer Based Grapheme-to-Phoneme Conversion</div><div class="cpsessionviewpaperauthor">[[Sevinj Yolchuyeva|AUTHOR Sevinj Yolchuyeva]], [[Géza Németh|AUTHOR Géza Németh]], [[Bálint Gyires-Tóth|AUTHOR Bálint Gyires-Tóth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191781.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-10|PAPER Tue-P-5-A-10 — Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages]]</div>|<div class="cpsessionviewpapertitle">Developing Pronunciation Models in New Languages Faster by Exploiting Common Grapheme-to-Phoneme Correspondences Across Languages</div><div class="cpsessionviewpaperauthor">[[Harry Bleyan|AUTHOR Harry Bleyan]], [[Sandy Ritchie|AUTHOR Sandy Ritchie]], [[Jonas Fromseier Mortensen|AUTHOR Jonas Fromseier Mortensen]], [[Daan van Esch|AUTHOR Daan van Esch]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191632.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-11|PAPER Tue-P-5-A-11 — Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding]]</div>|<div class="cpsessionviewpapertitle">Cross-Lingual, Multi-Speaker Text-To-Speech Synthesis Using Neural Speaker Embedding</div><div class="cpsessionviewpaperauthor">[[Mengnan Chen|AUTHOR Mengnan Chen]], [[Minchuan Chen|AUTHOR Minchuan Chen]], [[Shuang Liang|AUTHOR Shuang Liang]], [[Jun Ma|AUTHOR Jun Ma]], [[Lei Chen|AUTHOR Lei Chen]], [[Shaojun Wang|AUTHOR Shaojun Wang]], [[Jing Xiao|AUTHOR Jing Xiao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191235.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-12|PAPER Tue-P-5-A-12 — Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features]]</div>|<div class="cpsessionviewpapertitle">Polyphone Disambiguation for Mandarin Chinese Using Conditional Neural Network with Multi-Level Embedding Features</div><div class="cpsessionviewpaperauthor">[[Zexin Cai|AUTHOR Zexin Cai]], [[Yaogen Yang|AUTHOR Yaogen Yang]], [[Chuxiong Zhang|AUTHOR Chuxiong Zhang]], [[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-A-13|PAPER Tue-P-5-A-13 — Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion]]</div>|<div class="cpsessionviewpapertitle">Token-Level Ensemble Distillation for Grapheme-to-Phoneme Conversion</div><div class="cpsessionviewpaperauthor">[[Hao Sun|AUTHOR Hao Sun]], [[Xu Tan|AUTHOR Xu Tan]], [[Jun-Wei Gan|AUTHOR Jun-Wei Gan]], [[Hongzhi Liu|AUTHOR Hongzhi Liu]], [[Sheng Zhao|AUTHOR Sheng Zhao]], [[Tao Qin|AUTHOR Tao Qin]], [[Tie-Yan Liu|AUTHOR Tie-Yan Liu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Sakriani Sakti|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193052.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-1|PAPER Tue-P-5-B-1 — Multilingual Speech Recognition with Corpus Relatedness Sampling]]</div>|<div class="cpsessionviewpapertitle">Multilingual Speech Recognition with Corpus Relatedness Sampling</div><div class="cpsessionviewpaperauthor">[[Xinjian Li|AUTHOR Xinjian Li]], [[Siddharth Dalmia|AUTHOR Siddharth Dalmia]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Florian Metze|AUTHOR Florian Metze]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192881.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-2|PAPER Tue-P-5-B-2 — Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors]]</div>|<div class="cpsessionviewpapertitle">Multi-Dialect Acoustic Modeling Using Phone Mapping and Online i-Vectors</div><div class="cpsessionviewpaperauthor">[[Harish Arsikere|AUTHOR Harish Arsikere]], [[Ashtosh Sapru|AUTHOR Ashtosh Sapru]], [[Sri Garimella|AUTHOR Sri Garimella]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-3|PAPER Tue-P-5-B-3 — Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model]]</div>|<div class="cpsessionviewpapertitle">Large-Scale Multilingual Speech Recognition with a Streaming End-to-End Model</div><div class="cpsessionviewpaperauthor">[[Anjuli Kannan|AUTHOR Anjuli Kannan]], [[Arindrima Datta|AUTHOR Arindrima Datta]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Eugene Weinstein|AUTHOR Eugene Weinstein]], [[Bhuvana Ramabhadran|AUTHOR Bhuvana Ramabhadran]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Ankur Bapna|AUTHOR Ankur Bapna]], [[Zhifeng Chen|AUTHOR Zhifeng Chen]], [[Seungji Lee|AUTHOR Seungji Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192772.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-4|PAPER Tue-P-5-B-4 — Recognition of Latin American Spanish Using Multi-Task Learning]]</div>|<div class="cpsessionviewpapertitle">Recognition of Latin American Spanish Using Multi-Task Learning</div><div class="cpsessionviewpaperauthor">[[Carlos Mendes|AUTHOR Carlos Mendes]], [[Alberto Abad|AUTHOR Alberto Abad]], [[João Paulo Neto|AUTHOR João Paulo Neto]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-5|PAPER Tue-P-5-B-5 — End-to-End Accented Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">End-to-End Accented Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Thibault Viglino|AUTHOR Thibault Viglino]], [[Petr Motlicek|AUTHOR Petr Motlicek]], [[Milos Cernak|AUTHOR Milos Cernak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192092.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-6|PAPER Tue-P-5-B-6 — End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">End-to-End Articulatory Attribute Modeling for Low-Resource Multilingual Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Sheng Li|AUTHOR Sheng Li]], [[Chenchen Ding|AUTHOR Chenchen Ding]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Peng Shen|AUTHOR Peng Shen]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191959.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-7|PAPER Tue-P-5-B-7 — Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Exploiting Monolingual Speech Corpora for Code-Mixed Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Karan Taneja|AUTHOR Karan Taneja]], [[Satarupa Guha|AUTHOR Satarupa Guha]], [[Preethi Jyothi|AUTHOR Preethi Jyothi]], [[Basil Abraham|AUTHOR Basil Abraham]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191868.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-8|PAPER Tue-P-5-B-8 — Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models]]</div>|<div class="cpsessionviewpapertitle">Phoneme-Based Contextualization for Cross-Lingual Speech Recognition in End-to-End Models</div><div class="cpsessionviewpaperauthor">[[Ke Hu|AUTHOR Ke Hu]], [[Antoine Bruguier|AUTHOR Antoine Bruguier]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]], [[Golan Pundak|AUTHOR Golan Pundak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191867.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-9|PAPER Tue-P-5-B-9 — Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data]]</div>|<div class="cpsessionviewpapertitle">Constrained Output Embeddings for End-to-End Code-Switching Speech Recognition with Only Monolingual Data</div><div class="cpsessionviewpaperauthor">[[Yerbolat Khassanov|AUTHOR Yerbolat Khassanov]], [[Haihua Xu|AUTHOR Haihua Xu]], [[Van Tung Pham|AUTHOR Van Tung Pham]], [[Zhiping Zeng|AUTHOR Zhiping Zeng]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Chongjia Ni|AUTHOR Chongjia Ni]], [[Bin Ma|AUTHOR Bin Ma]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191429.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-10|PAPER Tue-P-5-B-10 — On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">On the End-to-End Solution to Mandarin-English Code-Switching Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Zhiping Zeng|AUTHOR Zhiping Zeng]], [[Yerbolat Khassanov|AUTHOR Yerbolat Khassanov]], [[Van Tung Pham|AUTHOR Van Tung Pham]], [[Haihua Xu|AUTHOR Haihua Xu]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191365.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-B-11|PAPER Tue-P-5-B-11 — Towards Language-Universal Mandarin-English Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Towards Language-Universal Mandarin-English Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Yuan Liu|AUTHOR Yuan Liu]], [[Ming Lei|AUTHOR Ming Lei]], [[Bin Ma|AUTHOR Bin Ma]], [[Lei Xie|AUTHOR Lei Xie]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Yu Tsao|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191241.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-1|PAPER Tue-P-5-C-1 — Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings]]</div>|<div class="cpsessionviewpapertitle">Improving ASR Confidence Scores for Alexa Using Acoustic and Hypothesis Embeddings</div><div class="cpsessionviewpaperauthor">[[Prakhar Swarup|AUTHOR Prakhar Swarup]], [[Roland Maas|AUTHOR Roland Maas]], [[Sri Garimella|AUTHOR Sri Garimella]], [[Sri Harish Mallidi|AUTHOR Sri Harish Mallidi]], [[Björn Hoffmeister|AUTHOR Björn Hoffmeister]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191290.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-2|PAPER Tue-P-5-C-2 — Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Investigation of Transformer Based Spelling Correction Model for CTC-Based End-to-End Mandarin Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Ming Lei|AUTHOR Ming Lei]], [[Zhijie Yan|AUTHOR Zhijie Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191345.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-3|PAPER Tue-P-5-C-3 — Improving Performance of End-to-End ASR on Numeric Sequences]]</div>|<div class="cpsessionviewpapertitle">Improving Performance of End-to-End ASR on Numeric Sequences</div><div class="cpsessionviewpaperauthor">[[Cal Peyser|AUTHOR Cal Peyser]], [[Hao Zhang|AUTHOR Hao Zhang]], [[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Zelin Wu|AUTHOR Zelin Wu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-4|PAPER Tue-P-5-C-4 — A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting]]</div>|<div class="cpsessionviewpapertitle">A Time Delay Neural Network with Shared Weight Self-Attention for Small-Footprint Keyword Spotting</div><div class="cpsessionviewpaperauthor">[[Ye Bai|AUTHOR Ye Bai]], [[Jiangyan Yi|AUTHOR Jiangyan Yi]], [[Jianhua Tao|AUTHOR Jianhua Tao]], [[Zhengqi Wen|AUTHOR Zhengqi Wen]], [[Zhengkun Tian|AUTHOR Zhengkun Tian]], [[Chenghao Zhao|AUTHOR Chenghao Zhao]], [[Cunhang Fan|AUTHOR Cunhang Fan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191766.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-5|PAPER Tue-P-5-C-5 — Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification]]</div>|<div class="cpsessionviewpapertitle">Sub-Band Convolutional Neural Networks for Small-Footprint Spoken Term Classification</div><div class="cpsessionviewpaperauthor">[[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Ming Sun|AUTHOR Ming Sun]], [[Yixin Gao|AUTHOR Yixin Gao]], [[Shiv Vitaladevuni|AUTHOR Shiv Vitaladevuni]], [[Chao Wang|AUTHOR Chao Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192104.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-6|PAPER Tue-P-5-C-6 — Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese]]</div>|<div class="cpsessionviewpapertitle">Investigating Radical-Based End-to-End Speech Recognition Systems for Chinese Dialects and Japanese</div><div class="cpsessionviewpaperauthor">[[Sheng Li|AUTHOR Sheng Li]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Chenchen Ding|AUTHOR Chenchen Ding]], [[Peng Shen|AUTHOR Peng Shen]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-7|PAPER Tue-P-5-C-7 — Joint Decoding of CTC Based Systems for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Joint Decoding of CTC Based Systems for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Jiaqi Guo|AUTHOR Jiaqi Guo]], [[Yongbin You|AUTHOR Yongbin You]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192263.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-8|PAPER Tue-P-5-C-8 — A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge]]</div>|<div class="cpsessionviewpapertitle">A Joint End-to-End and DNN-HMM Hybrid Automatic Speech Recognition System with Transferring Sharable Knowledge</div><div class="cpsessionviewpaperauthor">[[Tomohiro Tanaka|AUTHOR Tomohiro Tanaka]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Takanobu Oba|AUTHOR Takanobu Oba]], [[Yushi Aono|AUTHOR Yushi Aono]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192316.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-9|PAPER Tue-P-5-C-9 — Active Learning Methods for Low Resource End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Active Learning Methods for Low Resource End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Karan Malhotra|AUTHOR Karan Malhotra]], [[Shubham Bansal|AUTHOR Shubham Bansal]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192355.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-10|PAPER Tue-P-5-C-10 — Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems]]</div>|<div class="cpsessionviewpapertitle">Analysis of Multilingual Sequence-to-Sequence Speech Recognition Systems</div><div class="cpsessionviewpaperauthor">[[Martin Karafiát|AUTHOR Martin Karafiát]], [[Murali Karthick Baskar|AUTHOR Murali Karthick Baskar]], [[Shinji Watanabe|AUTHOR Shinji Watanabe]], [[Takaaki Hori|AUTHOR Takaaki Hori]], [[Matthew Wiesner|AUTHOR Matthew Wiesner]], [[Jan Černocký|AUTHOR Jan Černocký]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192667.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-11|PAPER Tue-P-5-C-11 — Lattice Generation in Attention-Based Speech Recognition Models]]</div>|<div class="cpsessionviewpapertitle">Lattice Generation in Attention-Based Speech Recognition Models</div><div class="cpsessionviewpaperauthor">[[Michał Zapotoczny|AUTHOR Michał Zapotoczny]], [[Piotr Pietrzak|AUTHOR Piotr Pietrzak]], [[Adrian Łańcucki|AUTHOR Adrian Łańcucki]], [[Jan Chorowski|AUTHOR Jan Chorowski]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192740.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-12|PAPER Tue-P-5-C-12 — Sampling from Stochastic Finite Automata with Applications to CTC Decoding]]</div>|<div class="cpsessionviewpapertitle">Sampling from Stochastic Finite Automata with Applications to CTC Decoding</div><div class="cpsessionviewpaperauthor">[[Martin Jansche|AUTHOR Martin Jansche]], [[Alexander Gutkin|AUTHOR Alexander Gutkin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192811.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-13|PAPER Tue-P-5-C-13 — ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning]]</div>|<div class="cpsessionviewpapertitle">ShrinkML: End-to-End ASR Model Compression Using Reinforcement Learning</div><div class="cpsessionviewpaperauthor">[[Łukasz Dudziak|AUTHOR Łukasz Dudziak]], [[Mohamed S. Abdelfattah|AUTHOR Mohamed S. Abdelfattah]], [[Ravichander Vipperla|AUTHOR Ravichander Vipperla]], [[Stefanos Laskaridis|AUTHOR Stefanos Laskaridis]], [[Nicholas D. Lane|AUTHOR Nicholas D. Lane]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193056.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-14|PAPER Tue-P-5-C-14 — Acoustic-to-Phrase Models for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Acoustic-to-Phrase Models for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Yashesh Gaur|AUTHOR Yashesh Gaur]], [[Jinyu Li|AUTHOR Jinyu Li]], [[Zhong Meng|AUTHOR Zhong Meng]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-C-15|PAPER Tue-P-5-C-15 — Performance Monitoring for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Performance Monitoring for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Ruizhi Li|AUTHOR Ruizhi Li]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Ellen Marklund|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193103.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-1|PAPER Tue-P-5-D-1 — The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English]]</div>|<div class="cpsessionviewpapertitle">The Role of Musical Experience in the Perceptual Weighting of Acoustic Cues for the Obstruent Coda Voicing Contrast in American English</div><div class="cpsessionviewpaperauthor">[[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]], [[Santiago Barreda|AUTHOR Santiago Barreda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192989.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-2|PAPER Tue-P-5-D-2 — Individual Differences in Implicit Attention to Phonetic Detail in Speech Perception]]</div>|<div class="cpsessionviewpapertitle">Individual Differences in Implicit Attention to Phonetic Detail in Speech Perception</div><div class="cpsessionviewpaperauthor">[[Natalie Lewandowski|AUTHOR Natalie Lewandowski]], [[Daniel Duran|AUTHOR Daniel Duran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192931.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-3|PAPER Tue-P-5-D-3 — Effects of Natural Variability in Cross-Modal Temporal Correlations on Audiovisual Speech Recognition Benefit]]</div>|<div class="cpsessionviewpapertitle">Effects of Natural Variability in Cross-Modal Temporal Correlations on Audiovisual Speech Recognition Benefit</div><div class="cpsessionviewpaperauthor">[[Kaylah Lalonde|AUTHOR Kaylah Lalonde]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192741.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-4|PAPER Tue-P-5-D-4 — Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech]]</div>|<div class="cpsessionviewpapertitle">Listening with Great Expectations: An Investigation of Word Form Anticipations in Naturalistic Speech</div><div class="cpsessionviewpaperauthor">[[M. Bentum|AUTHOR M. Bentum]], [[L. ten Bosch|AUTHOR L. ten Bosch]], [[A. van den Bosch|AUTHOR A. van den Bosch]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192685.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-5|PAPER Tue-P-5-D-5 — Quantifying Expectation Modulation in Human Speech Processing]]</div>|<div class="cpsessionviewpapertitle">Quantifying Expectation Modulation in Human Speech Processing</div><div class="cpsessionviewpaperauthor">[[M. Bentum|AUTHOR M. Bentum]], [[L. ten Bosch|AUTHOR L. ten Bosch]], [[A. van den Bosch|AUTHOR A. van den Bosch]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192619.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-6|PAPER Tue-P-5-D-6 — Perception of Pitch Contours in Speech and Nonspeech]]</div>|<div class="cpsessionviewpapertitle">Perception of Pitch Contours in Speech and Nonspeech</div><div class="cpsessionviewpaperauthor">[[Daniel R. Turner|AUTHOR Daniel R. Turner]], [[Ann R. Bradlow|AUTHOR Ann R. Bradlow]], [[Jennifer S. Cole|AUTHOR Jennifer S. Cole]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192611.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-7|PAPER Tue-P-5-D-7 — Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments]]</div>|<div class="cpsessionviewpapertitle">Analyzing Reaction Time and Error Sequences in Lexical Decision Experiments</div><div class="cpsessionviewpaperauthor">[[L. ten Bosch|AUTHOR L. ten Bosch]], [[L. Boves|AUTHOR L. Boves]], [[K. Mulder|AUTHOR K. Mulder]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192353.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-8|PAPER Tue-P-5-D-8 — Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of the Temporal Segmentation of Hand Movements in British English Cued Speech</div><div class="cpsessionviewpaperauthor">[[Li Liu|AUTHOR Li Liu]], [[Jianze Li|AUTHOR Jianze Li]], [[Gang Feng|AUTHOR Gang Feng]], [[Xiao-Ping Zhang|AUTHOR Xiao-Ping Zhang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192302.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-9|PAPER Tue-P-5-D-9 — Place Shift as an Autonomous Process: Evidence from Japanese Listeners]]</div>|<div class="cpsessionviewpapertitle">Place Shift as an Autonomous Process: Evidence from Japanese Listeners</div><div class="cpsessionviewpaperauthor">[[Yuriko Yokoe|AUTHOR Yuriko Yokoe]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192251.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-10|PAPER Tue-P-5-D-10 — A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective]]</div>|<div class="cpsessionviewpapertitle">A Perceptual Study of CV Syllables in Both Spoken and Whistled Speech: A Tashlhiyt Berber Perspective</div><div class="cpsessionviewpaperauthor">[[Julien Meyer|AUTHOR Julien Meyer]], [[Laure Dentel|AUTHOR Laure Dentel]], [[Silvain Gerber|AUTHOR Silvain Gerber]], [[Rachid Ridouane|AUTHOR Rachid Ridouane]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191893.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-11|PAPER Tue-P-5-D-11 — Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study]]</div>|<div class="cpsessionviewpapertitle">Consonant Classification in Mandarin Based on the Depth Image Feature: A Pilot Study</div><div class="cpsessionviewpaperauthor">[[Han-Chi Hsieh|AUTHOR Han-Chi Hsieh]], [[Wei-Zhong Zheng|AUTHOR Wei-Zhong Zheng]], [[Ko-Chiang Chen|AUTHOR Ko-Chiang Chen]], [[Ying-Hui Lai|AUTHOR Ying-Hui Lai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191795.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-12|PAPER Tue-P-5-D-12 — The Different Roles of Expectations in Phonetic and Lexical Processing]]</div>|<div class="cpsessionviewpapertitle">The Different Roles of Expectations in Phonetic and Lexical Processing</div><div class="cpsessionviewpaperauthor">[[Shiri Lev-Ari|AUTHOR Shiri Lev-Ari]], [[Robin Dodsworth|AUTHOR Robin Dodsworth]], [[Jeff Mielke|AUTHOR Jeff Mielke]], [[Sharon Peperkamp|AUTHOR Sharon Peperkamp]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191433.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-13|PAPER Tue-P-5-D-13 — Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers]]</div>|<div class="cpsessionviewpapertitle">Perceptual Adaptation to Device and Human Voices: Learning and Generalization of a Phonetic Shift Across Real and Voice-AI Talkers</div><div class="cpsessionviewpaperauthor">[[Bruno Ferenc Segedin|AUTHOR Bruno Ferenc Segedin]], [[Michelle Cohn|AUTHOR Michelle Cohn]], [[Georgia Zellou|AUTHOR Georgia Zellou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192422.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-D-14|PAPER Tue-P-5-D-14 — End-to-End Convolutional Sequence Learning for ASL Fingerspelling Recognition]]</div>|<div class="cpsessionviewpapertitle">End-to-End Convolutional Sequence Learning for ASL Fingerspelling Recognition</div><div class="cpsessionviewpaperauthor">[[Katerina Papadimitriou|AUTHOR Katerina Papadimitriou]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-1|PAPER Tue-P-5-E-1 — Multiview Shared Subspace Learning Across Speakers and Speech Commands]]</div>|<div class="cpsessionviewpapertitle">Multiview Shared Subspace Learning Across Speakers and Speech Commands</div><div class="cpsessionviewpaperauthor">[[Krishna Somandepalli|AUTHOR Krishna Somandepalli]], [[Naveen Kumar|AUTHOR Naveen Kumar]], [[Arindam Jati|AUTHOR Arindam Jati]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193091.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-2|PAPER Tue-P-5-E-2 — A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms]]</div>|<div class="cpsessionviewpapertitle">A Machine Learning Based Clustering Protocol for Determining Hearing Aid Initial Configurations from Pure-Tone Audiograms</div><div class="cpsessionviewpaperauthor">[[Chelzy Belitz|AUTHOR Chelzy Belitz]], [[Hussnain Ali|AUTHOR Hussnain Ali]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-3|PAPER Tue-P-5-E-3 — Acoustic Scene Classification with Mismatched Devices Using CliqueNets and Mixup Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">Acoustic Scene Classification with Mismatched Devices Using CliqueNets and Mixup Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Truc Nguyen|AUTHOR Truc Nguyen]], [[Franz Pernkopf|AUTHOR Franz Pernkopf]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192953.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-4|PAPER Tue-P-5-E-4 — DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients]]</div>|<div class="cpsessionviewpapertitle">DeepLung: Smartphone Convolutional Neural Network-Based Inference of Lung Anomalies for Pulmonary Patients</div><div class="cpsessionviewpaperauthor">[[Mohsin Y. Ahmed|AUTHOR Mohsin Y. Ahmed]], [[Md. Mahbubur Rahman|AUTHOR Md. Mahbubur Rahman]], [[Jilong Kuang|AUTHOR Jilong Kuang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192711.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-5|PAPER Tue-P-5-E-5 — On the Use/Misuse of the Term ‘Phoneme’]]</div>|<div class="cpsessionviewpapertitle">On the Use/Misuse of the Term ‘Phoneme’</div><div class="cpsessionviewpaperauthor">[[Roger K. Moore|AUTHOR Roger K. Moore]], [[Lucy Skidmore|AUTHOR Lucy Skidmore]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-6|PAPER Tue-P-5-E-6 — Understanding and Visualizing Raw Waveform-Based CNNs]]</div>|<div class="cpsessionviewpapertitle">Understanding and Visualizing Raw Waveform-Based CNNs</div><div class="cpsessionviewpaperauthor">[[Hannah Muckenhirn|AUTHOR Hannah Muckenhirn]], [[Vinayak Abrol|AUTHOR Vinayak Abrol]], [[Mathew Magimai-Doss|AUTHOR Mathew Magimai-Doss]], [[Sébastien Marcel|AUTHOR Sébastien Marcel]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192219.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-7|PAPER Tue-P-5-E-7 — Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms]]</div>|<div class="cpsessionviewpapertitle">Fréchet Audio Distance: A Reference-Free Metric for Evaluating Music Enhancement Algorithms</div><div class="cpsessionviewpaperauthor">[[Kevin Kilgour|AUTHOR Kevin Kilgour]], [[Mauricio Zuluaga|AUTHOR Mauricio Zuluaga]], [[Dominik Roblek|AUTHOR Dominik Roblek]], [[Matthew Sharifi|AUTHOR Matthew Sharifi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191541.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-8|PAPER Tue-P-5-E-8 — ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems]]</div>|<div class="cpsessionviewpapertitle">ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems</div><div class="cpsessionviewpaperauthor">[[Yuan Gong|AUTHOR Yuan Gong]], [[Jian Yang|AUTHOR Jian Yang]], [[Jacob Huber|AUTHOR Jacob Huber]], [[Mitchell MacKnight|AUTHOR Mitchell MacKnight]], [[Christian Poellabauer|AUTHOR Christian Poellabauer]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191492.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-P-5-E-9|PAPER Tue-P-5-E-9 — Analyzing Intra-Speaker and Inter-Speaker Vocal Tract Impedance Characteristics in a Low-Dimensional Feature Space Using t-SNE]]</div>|<div class="cpsessionviewpapertitle">Analyzing Intra-Speaker and Inter-Speaker Vocal Tract Impedance Characteristics in a Low-Dimensional Feature Space Using t-SNE</div><div class="cpsessionviewpaperauthor">[[Balamurali B.T.|AUTHOR Balamurali B.T.]], [[Jer-Ming Chen|AUTHOR Jer-Ming Chen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Tuesday 17 Sept 2019, Hall 4|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198005.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-1|PAPER Tue-S&T-2-1 — Directional Audio Rendering Using a Neural Network Based Personalized HRTF]]</div>|<div class="cpsessionviewpapertitle">Directional Audio Rendering Using a Neural Network Based Personalized HRTF</div><div class="cpsessionviewpaperauthor">[[Geon Woo Lee|AUTHOR Geon Woo Lee]], [[Jung Hyuk Lee|AUTHOR Jung Hyuk Lee]], [[Seong Ju Kim|AUTHOR Seong Ju Kim]], [[Hong Kook Kim|AUTHOR Hong Kook Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198007.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-2|PAPER Tue-S&T-2-2 — Online Speech Processing and Analysis Suite]]</div>|<div class="cpsessionviewpapertitle">Online Speech Processing and Analysis Suite</div><div class="cpsessionviewpaperauthor">[[Wikus Pienaar|AUTHOR Wikus Pienaar]], [[Daan Wissing|AUTHOR Daan Wissing]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198017.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-3|PAPER Tue-S&T-2-3 — Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence]]</div>|<div class="cpsessionviewpapertitle">Formant Pattern and Spectral Shape Ambiguity of Vowel Sounds, and Related Phenomena of Vowel Acoustics — Exemplary Evidence</div><div class="cpsessionviewpaperauthor">[[Dieter Maurer|AUTHOR Dieter Maurer]], [[Heidy Suter|AUTHOR Heidy Suter]], [[Christian d’Hereuse|AUTHOR Christian d’Hereuse]], [[Volker Dellwo|AUTHOR Volker Dellwo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198022.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-4|PAPER Tue-S&T-2-4 —  Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech]]</div>|<div class="cpsessionviewpapertitle"> Sound Tools eXtended (STx) 5.0 — A Powerful Sound Analysis Tool Optimized for Speech</div><div class="cpsessionviewpaperauthor">[[Anton Noll|AUTHOR Anton Noll]], [[Jonathan Stuefer|AUTHOR Jonathan Stuefer]], [[Nicola Klingler|AUTHOR Nicola Klingler]], [[Hannah Leykum|AUTHOR Hannah Leykum]], [[Carina Lozo|AUTHOR Carina Lozo]], [[Jan Luttenberger|AUTHOR Jan Luttenberger]], [[Michael Pucher|AUTHOR Michael Pucher]], [[Carolin Schmid|AUTHOR Carolin Schmid]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198030.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-5|PAPER Tue-S&T-2-5 — FarSpeech: Arabic Natural Language Processing for Live Arabic Speech]]</div>|<div class="cpsessionviewpapertitle">FarSpeech: Arabic Natural Language Processing for Live Arabic Speech</div><div class="cpsessionviewpaperauthor">[[Mohamed Eldesouki|AUTHOR Mohamed Eldesouki]], [[Naassih Gopee|AUTHOR Naassih Gopee]], [[Ahmed Ali|AUTHOR Ahmed Ali]], [[Kareem Darwish|AUTHOR Kareem Darwish]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198037.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-6|PAPER Tue-S&T-2-6 — A System for Real-Time Privacy Preserving Data Collection for Ambient Assisted Living]]</div>|<div class="cpsessionviewpapertitle">A System for Real-Time Privacy Preserving Data Collection for Ambient Assisted Living</div><div class="cpsessionviewpaperauthor">[[Fasih Haider|AUTHOR Fasih Haider]], [[Saturnino Luz|AUTHOR Saturnino Luz]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-S&T-2-7|PAPER Tue-S&T-2-7 —  NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion]]</div>|<div class="cpsessionviewpapertitle"> NUS Speak-to-Sing: A Web Platform for Personalized Speech-to-Singing Conversion</div><div class="cpsessionviewpaperauthor">[[Chitralekha Gupta|AUTHOR Chitralekha Gupta]], [[Karthika Vijayan|AUTHOR Karthika Vijayan]], [[Bidisha Sharma|AUTHOR Bidisha Sharma]], [[Xiaoxue Gao|AUTHOR Xiaoxue Gao]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Tuesday 17 Sept 2019, Hall 3|<|
|^Chair:&nbsp;|^Neville Ryant, Alejandrina Cristia|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191268.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-3-6-1|PAPER Tue-SS-3-6-1 — The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines]]</div>|<div class="cpsessionviewpapertitle">The Second DIHARD Diarization Challenge: Dataset, Task, and Baselines</div><div class="cpsessionviewpaperauthor">[[Neville Ryant|AUTHOR Neville Ryant]], [[Kenneth Church|AUTHOR Kenneth Church]], [[Christopher Cieri|AUTHOR Christopher Cieri]], [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]], [[Jun Du|AUTHOR Jun Du]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]], [[Mark Liberman|AUTHOR Mark Liberman]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192716.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-3-6-2|PAPER Tue-SS-3-6-2 — LEAP Diarization System for the Second DIHARD Challenge]]</div>|<div class="cpsessionviewpapertitle">LEAP Diarization System for the Second DIHARD Challenge</div><div class="cpsessionviewpaperauthor">[[Prachi Singh|AUTHOR Prachi Singh]], [[Harsha Vardhan M.A.|AUTHOR Harsha Vardhan M.A.]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]], [[A. Kanagasundaram|AUTHOR A. Kanagasundaram]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192462.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-3-6-3|PAPER Tue-SS-3-6-3 — ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">ViVoLAB Speaker Diarization System for the DIHARD 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Ignacio Viñals|AUTHOR Ignacio Viñals]], [[Pablo Gimeno|AUTHOR Pablo Gimeno]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191385.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-3-6-4|PAPER Tue-SS-3-6-4 — UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">UWB-NTIS Speaker Diarization System for the DIHARD II 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Zbyněk Zajíc|AUTHOR Zbyněk Zajíc]], [[Marie Kunešová|AUTHOR Marie Kunešová]], [[Marek Hrúz|AUTHOR Marek Hrúz]], [[Jan Vaněk|AUTHOR Jan Vaněk]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191903.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-3-6-5|PAPER Tue-SS-3-6-5 — The Second DIHARD Challenge: System Description for USC-SAIL Team]]</div>|<div class="cpsessionviewpapertitle">The Second DIHARD Challenge: System Description for USC-SAIL Team</div><div class="cpsessionviewpaperauthor">[[Tae Jin Park|AUTHOR Tae Jin Park]], [[Manoj Kumar|AUTHOR Manoj Kumar]], [[Nikolaos Flemotomos|AUTHOR Nikolaos Flemotomos]], [[Monisankha Pal|AUTHOR Monisankha Pal]], [[Raghuveer Peri|AUTHOR Raghuveer Peri]], [[Rimita Lahiri|AUTHOR Rimita Lahiri]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192757.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-3-6-6|PAPER Tue-SS-3-6-6 — Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II]]</div>|<div class="cpsessionviewpapertitle">Speaker Diarization with Deep Speaker Embeddings for DIHARD Challenge II</div><div class="cpsessionviewpaperauthor">[[Sergey Novoselov|AUTHOR Sergey Novoselov]], [[Aleksei Gusev|AUTHOR Aleksei Gusev]], [[Artem Ivanov|AUTHOR Artem Ivanov]], [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]], [[Andrey Shulipa|AUTHOR Andrey Shulipa]], [[Anastasia Avdeeva|AUTHOR Anastasia Avdeeva]], [[Artem Gorlanov|AUTHOR Artem Gorlanov]], [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|14:30–15:30, Tuesday 17 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Junichi Yamagishi, Nicholas Evans|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-16|PAPER Tue-SS-4-4-16 — ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection]]</div>|<div class="cpsessionviewpapertitle">ASVspoof 2019: Future Horizons in Spoofed and Fake Audio Detection</div><div class="cpsessionviewpaperauthor">[[Massimiliano Todisco|AUTHOR Massimiliano Todisco]], [[Xin Wang|AUTHOR Xin Wang]], [[Ville Vestman|AUTHOR Ville Vestman]], [[Md. Sahidullah|AUTHOR Md. Sahidullah]], [[Héctor Delgado|AUTHOR Héctor Delgado]], [[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Nicholas Evans|AUTHOR Nicholas Evans]], [[Tomi H. Kinnunen|AUTHOR Tomi H. Kinnunen]], [[Kong Aik Lee|AUTHOR Kong Aik Lee]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-17|PAPER Tue-SS-4-4-17 — Discussion]]</div>|<div class="cpsessionviewpapertitle">Discussion</div><div class="cpsessionviewpaperauthor"></div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–14:30, Tuesday 17 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Junichi Yamagishi, Nicholas Evans|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191794.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-1|PAPER Tue-SS-4-4-1 — ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks]]</div>|<div class="cpsessionviewpapertitle">ASSERT: Anti-Spoofing with Squeeze-Excitation and Residual Networks</div><div class="cpsessionviewpaperauthor">[[Cheng-I Lai|AUTHOR Cheng-I Lai]], [[Nanxin Chen|AUTHOR Nanxin Chen]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192505.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-2|PAPER Tue-SS-4-4-2 — Ensemble Models for Spoofing Detection in Automatic Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Ensemble Models for Spoofing Detection in Automatic Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Bhusan Chettri|AUTHOR Bhusan Chettri]], [[Daniel Stoller|AUTHOR Daniel Stoller]], [[Veronica Morfi|AUTHOR Veronica Morfi]], [[Marco A. Martínez Ramírez|AUTHOR Marco A. Martínez Ramírez]], [[Emmanouil Benetos|AUTHOR Emmanouil Benetos]], [[Bob L. Sturm|AUTHOR Bob L. Sturm]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191230.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-3|PAPER Tue-SS-4-4-3 — The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion]]</div>|<div class="cpsessionviewpapertitle">The DKU Replay Detection System for the ASVspoof 2019 Challenge: On Data Augmentation, Feature Representation, Classification, and Fusion</div><div class="cpsessionviewpaperauthor">[[Weicheng Cai|AUTHOR Weicheng Cai]], [[Haiwei Wu|AUTHOR Haiwei Wu]], [[Danwei Cai|AUTHOR Danwei Cai]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192676.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-4|PAPER Tue-SS-4-4-4 — Robust Bayesian and Light Neural Networks for Voice Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">Robust Bayesian and Light Neural Networks for Voice Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Radosław Białobrzeski|AUTHOR Radosław Białobrzeski]], [[Michał Kośmider|AUTHOR Michał Kośmider]], [[Mateusz Matuszewski|AUTHOR Mateusz Matuszewski]], [[Marcin Plata|AUTHOR Marcin Plata]], [[Alexander Rakowski|AUTHOR Alexander Rakowski]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191768.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-5|PAPER Tue-SS-4-4-5 — STC Antispoofing Systems for the ASVspoof2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">STC Antispoofing Systems for the ASVspoof2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]], [[Sergey Novoselov|AUTHOR Sergey Novoselov]], [[Andzhukaev Tseren|AUTHOR Andzhukaev Tseren]], [[Marina Volkova|AUTHOR Marina Volkova]], [[Artem Gorlanov|AUTHOR Artem Gorlanov]], [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192170.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-6|PAPER Tue-SS-4-4-6 — The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">The SJTU Robust Anti-Spoofing System for the ASVspoof 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Yexin Yang|AUTHOR Yexin Yang]], [[Hongji Wang|AUTHOR Hongji Wang]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Zhengyang Chen|AUTHOR Zhengyang Chen]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191623.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-7|PAPER Tue-SS-4-4-7 — IIIT-H Spoofing Countermeasures for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">IIIT-H Spoofing Countermeasures for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2019</div><div class="cpsessionviewpaperauthor">[[K.N.R.K. Raju Alluri|AUTHOR K.N.R.K. Raju Alluri]], [[Anil Kumar Vuppala|AUTHOR Anil Kumar Vuppala]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191698.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-8|PAPER Tue-SS-4-4-8 — Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning]]</div>|<div class="cpsessionviewpapertitle">Anti-Spoofing Speaker Verification System with Multi-Feature Integration and Multi-Task Learning</div><div class="cpsessionviewpaperauthor">[[Rongjin Li|AUTHOR Rongjin Li]], [[Miao Zhao|AUTHOR Miao Zhao]], [[Zheng Li|AUTHOR Zheng Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191760.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-9|PAPER Tue-SS-4-4-9 — Speech Replay Detection with x-Vector Attack Embeddings and Spectral Features]]</div>|<div class="cpsessionviewpapertitle">Speech Replay Detection with x-Vector Attack Embeddings and Spectral Features</div><div class="cpsessionviewpaperauthor">[[Jennifer Williams|AUTHOR Jennifer Williams]], [[Joanna Rownicka|AUTHOR Joanna Rownicka]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191887.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-10|PAPER Tue-SS-4-4-10 — Long Range Acoustic Features for Spoofed Speech Detection]]</div>|<div class="cpsessionviewpapertitle">Long Range Acoustic Features for Spoofed Speech Detection</div><div class="cpsessionviewpaperauthor">[[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Jichen Yang|AUTHOR Jichen Yang]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-11|PAPER Tue-SS-4-4-11 — Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System]]</div>|<div class="cpsessionviewpapertitle">Transfer-Representation Learning for Detecting Spoofing Attacks with Converted and Synthesized Speech in Automatic Speaker Verification System</div><div class="cpsessionviewpaperauthor">[[Su-Yu Chang|AUTHOR Su-Yu Chang]], [[Kai-Cheng Wu|AUTHOR Kai-Cheng Wu]], [[Chia-Ping Chen|AUTHOR Chia-Ping Chen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192212.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-12|PAPER Tue-SS-4-4-12 — A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">A Light Convolutional GRU-RNN Deep Feature Extractor for ASV Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Alejandro Gomez-Alanis|AUTHOR Alejandro Gomez-Alanis]], [[Antonio M. Peinado|AUTHOR Antonio M. Peinado]], [[Jose A. Gonzalez|AUTHOR Jose A. Gonzalez]], [[Angel M. Gomez|AUTHOR Angel M. Gomez]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192892.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-13|PAPER Tue-SS-4-4-13 — Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">Detecting Spoofing Attacks Using VGG and SincNet: BUT-Omilia Submission to ASVspoof 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Hossein Zeinali|AUTHOR Hossein Zeinali]], [[Themos Stafylakis|AUTHOR Themos Stafylakis]], [[Georgia Athanasopoulou|AUTHOR Georgia Athanasopoulou]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Ioannis Gkinis|AUTHOR Ioannis Gkinis]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Jan Černocký|AUTHOR Jan Černocký]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193174.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-14|PAPER Tue-SS-4-4-14 — Deep Residual Neural Networks for Audio Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">Deep Residual Neural Networks for Audio Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Moustafa Alzantot|AUTHOR Moustafa Alzantot]], [[Ziqi Wang|AUTHOR Ziqi Wang]], [[Mani B. Srivastava|AUTHOR Mani B. Srivastava]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191991.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-4-4-15|PAPER Tue-SS-4-4-15 — Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">Replay Attack Detection with Complementary High-Resolution Information Using End-to-End DNN for the ASVspoof 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Jee-weon Jung|AUTHOR Jee-weon Jung]], [[Hye-jin Shim|AUTHOR Hye-jin Shim]], [[Hee-Soo Heo|AUTHOR Hee-Soo Heo]], [[Ha-Jin Yu|AUTHOR Ha-Jin Yu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Tuesday 17 Sept 2019, Hall 3|<|
|^Chair:&nbsp;|^Ewan Dunbar, Laurent Besacier|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192904.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-1|PAPER Tue-SS-5-6-1 — The Zero Resource Speech Challenge 2019: TTS Without T]]</div>|<div class="cpsessionviewpapertitle">The Zero Resource Speech Challenge 2019: TTS Without T</div><div class="cpsessionviewpaperauthor">[[Ewan Dunbar|AUTHOR Ewan Dunbar]], [[Robin Algayres|AUTHOR Robin Algayres]], [[Julien Karadayi|AUTHOR Julien Karadayi]], [[Mathieu Bernard|AUTHOR Mathieu Bernard]], [[Juan Benjumea|AUTHOR Juan Benjumea]], [[Xuan-Nga Cao|AUTHOR Xuan-Nga Cao]], [[Lucie Miskic|AUTHOR Lucie Miskic]], [[Charlotte Dugrain|AUTHOR Charlotte Dugrain]], [[Lucas Ondel|AUTHOR Lucas Ondel]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Laurent Besacier|AUTHOR Laurent Besacier]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Emmanuel Dupoux|AUTHOR Emmanuel Dupoux]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191337.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-2|PAPER Tue-SS-5-6-2 — Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling]]</div>|<div class="cpsessionviewpapertitle">Combining Adversarial Training and Disentangled Speech Representation for Robust Zero-Resource Subword Modeling</div><div class="cpsessionviewpaperauthor">[[Siyuan Feng|AUTHOR Siyuan Feng]], [[Tan Lee|AUTHOR Tan Lee]], [[Zhiyuan Peng|AUTHOR Zhiyuan Peng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-3|PAPER Tue-SS-5-6-3 — Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">Temporally-Aware Acoustic Unit Discovery for Zerospeech 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Bolaji Yusuf|AUTHOR Bolaji Yusuf]], [[Alican Gök|AUTHOR Alican Gök]], [[Batuhan Gundogdu|AUTHOR Batuhan Gundogdu]], [[Oyku Deniz Kose|AUTHOR Oyku Deniz Kose]], [[Murat Saraclar|AUTHOR Murat Saraclar]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191518.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-4|PAPER Tue-SS-5-6-4 — Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Acoustic Unit Discovery for Speech Synthesis Using Discrete Latent-Variable Neural Networks</div><div class="cpsessionviewpaperauthor">[[Ryan Eloff|AUTHOR Ryan Eloff]], [[André Nortje|AUTHOR André Nortje]], [[Benjamin van Niekerk|AUTHOR Benjamin van Niekerk]], [[Avashna Govender|AUTHOR Avashna Govender]], [[Leanne Nortje|AUTHOR Leanne Nortje]], [[Arnu Pretorius|AUTHOR Arnu Pretorius]], [[Elan van Biljon|AUTHOR Elan van Biljon]], [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]], [[Lisa van Staden|AUTHOR Lisa van Staden]], [[Herman Kamper|AUTHOR Herman Kamper]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192048.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-5|PAPER Tue-SS-5-6-5 — Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Unsupervised End-to-End Learning of Discrete Linguistic Units for Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Andy T. Liu|AUTHOR Andy T. Liu]], [[Po-chun Hsu|AUTHOR Po-chun Hsu]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192336.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-6|PAPER Tue-SS-5-6-6 — Zero Resource Speech Synthesis Using Transcripts Derived from Perceptual Acoustic Units]]</div>|<div class="cpsessionviewpapertitle">Zero Resource Speech Synthesis Using Transcripts Derived from Perceptual Acoustic Units</div><div class="cpsessionviewpaperauthor">[[Karthik Pandia D. S.|AUTHOR Karthik Pandia D. S.]], [[Hema A. Murthy|AUTHOR Hema A. Murthy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-7|PAPER Tue-SS-5-6-7 — VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">VQVAE Unsupervised Unit Discovery and Multi-Scale Code2Spec Inverter for Zerospeech Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Andros Tjandra|AUTHOR Andros Tjandra]], [[Berrak Sisman|AUTHOR Berrak Sisman]], [[Mingyang Zhang|AUTHOR Mingyang Zhang]], [[Sakriani Sakti|AUTHOR Sakriani Sakti]], [[Haizhou Li|AUTHOR Haizhou Li]], [[Satoshi Nakamura|AUTHOR Satoshi Nakamura]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Tue-SS-5-6-8|PAPER Tue-SS-5-6-8 — General Discussion]]</div>|<div class="cpsessionviewpapertitle">General Discussion</div><div class="cpsessionviewpaperauthor"></div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|08:30–09:30, Wednesday 18 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-K-3|PAPER Wed-K-3 — Physiology and Physics of Voice Production]]</div>|<div class="cpsessionviewpapertitle">Physiology and Physics of Voice Production</div><div class="cpsessionviewpaperauthor">[[Manfred Kaltenbacher|AUTHOR Manfred Kaltenbacher]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Margaret Zellers, Jan Michalsky|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-O-6-1-1|PAPER Wed-O-6-1-1 — Survey Talk: Prosody Research and Applications: The State of the Art]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Prosody Research and Applications: The State of the Art</div><div class="cpsessionviewpaperauthor">[[Nigel G. Ward|AUTHOR Nigel G. Ward]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-1-2|PAPER Wed-O-6-1-2 — Dimensions of Prosodic Prominence in an Attractor Model]]</div>|<div class="cpsessionviewpapertitle">Dimensions of Prosodic Prominence in an Attractor Model</div><div class="cpsessionviewpaperauthor">[[Simon Roessig|AUTHOR Simon Roessig]], [[Doris Mücke|AUTHOR Doris Mücke]], [[Lena Pagel|AUTHOR Lena Pagel]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-1-3|PAPER Wed-O-6-1-3 — Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings]]</div>|<div class="cpsessionviewpapertitle">Comparative Analysis of Prosodic Characteristics Using WaveNet Embeddings</div><div class="cpsessionviewpaperauthor">[[Antti Suni|AUTHOR Antti Suni]], [[Marcin Włodarczak|AUTHOR Marcin Włodarczak]], [[Martti Vainio|AUTHOR Martti Vainio]], [[Juraj Šimko|AUTHOR Juraj Šimko]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-1-4|PAPER Wed-O-6-1-4 — The Role of Voice Quality in the Perception of Prominence in Synthetic Speech]]</div>|<div class="cpsessionviewpapertitle">The Role of Voice Quality in the Perception of Prominence in Synthetic Speech</div><div class="cpsessionviewpaperauthor">[[Andy Murphy|AUTHOR Andy Murphy]], [[Irena Yanushevskaya|AUTHOR Irena Yanushevskaya]], [[Ailbhe Ní Chasaide|AUTHOR Ailbhe Ní Chasaide]], [[Christer Gobl|AUTHOR Christer Gobl]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192856.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-1-5|PAPER Wed-O-6-1-5 — Phonological Awareness of French Rising Contours in Japanese Learners]]</div>|<div class="cpsessionviewpapertitle">Phonological Awareness of French Rising Contours in Japanese Learners</div><div class="cpsessionviewpaperauthor">[[Rachel Albar|AUTHOR Rachel Albar]], [[Hiyon Yoo|AUTHOR Hiyon Yoo]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Katrin Kirchhoff, Roger Moore|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191855.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-2-1|PAPER Wed-O-6-2-1 — Audio Classification of Bit-Representation Waveform]]</div>|<div class="cpsessionviewpapertitle">Audio Classification of Bit-Representation Waveform</div><div class="cpsessionviewpaperauthor">[[Masaki Okawa|AUTHOR Masaki Okawa]], [[Takuya Saito|AUTHOR Takuya Saito]], [[Naoki Sawada|AUTHOR Naoki Sawada]], [[Hiromitsu Nishizaki|AUTHOR Hiromitsu Nishizaki]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191421.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-2-2|PAPER Wed-O-6-2-2 — Locality-Constrained Linear Coding Based Fused Visual Features for Robust Acoustic Event Classification]]</div>|<div class="cpsessionviewpapertitle">Locality-Constrained Linear Coding Based Fused Visual Features for Robust Acoustic Event Classification</div><div class="cpsessionviewpaperauthor">[[Manjunath Mulimani|AUTHOR Manjunath Mulimani]], [[Shashidhar G. Koolagudi|AUTHOR Shashidhar G. Koolagudi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-2-3|PAPER Wed-O-6-2-3 — Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection]]</div>|<div class="cpsessionviewpapertitle">Learning How to Listen: A Temporal-Frequential Attention Model for Sound Event Detection</div><div class="cpsessionviewpaperauthor">[[Yu-Han Shen|AUTHOR Yu-Han Shen]], [[Ke-Xin He|AUTHOR Ke-Xin He]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192731.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-2-4|PAPER Wed-O-6-2-4 — A Deep Residual Network for Large-Scale Acoustic Scene Analysis]]</div>|<div class="cpsessionviewpapertitle">A Deep Residual Network for Large-Scale Acoustic Scene Analysis</div><div class="cpsessionviewpaperauthor">[[Logan Ford|AUTHOR Logan Ford]], [[Hao Tang|AUTHOR Hao Tang]], [[François Grondin|AUTHOR François Grondin]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193074.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-2-5|PAPER Wed-O-6-2-5 — Supervised Classifiers for Audio Impairments with Noisy Labels]]</div>|<div class="cpsessionviewpapertitle">Supervised Classifiers for Audio Impairments with Noisy Labels</div><div class="cpsessionviewpaperauthor">[[Chandan K.A. Reddy|AUTHOR Chandan K.A. Reddy]], [[Ross Cutler|AUTHOR Ross Cutler]], [[Johannes Gehrke|AUTHOR Johannes Gehrke]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-2-6|PAPER Wed-O-6-2-6 — Self-Attention for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Self-Attention for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Lorenzo Tarantino|AUTHOR Lorenzo Tarantino]], [[Philip N. Garner|AUTHOR Philip N. Garner]], [[Alexandros Lazaridis|AUTHOR Alexandros Lazaridis]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Julia Hirschberg, Sunayana Sitaram|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191761.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-3-1|PAPER Wed-O-6-3-1 — Unsupervised Singing Voice Conversion]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Singing Voice Conversion</div><div class="cpsessionviewpaperauthor">[[Eliya Nachmani|AUTHOR Eliya Nachmani]], [[Lior Wolf|AUTHOR Lior Wolf]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191722.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-3-2|PAPER Wed-O-6-3-2 — Adversarially Trained End-to-End Korean Singing Voice Synthesis System]]</div>|<div class="cpsessionviewpapertitle">Adversarially Trained End-to-End Korean Singing Voice Synthesis System</div><div class="cpsessionviewpaperauthor">[[Juheon Lee|AUTHOR Juheon Lee]], [[Hyeong-Seok Choi|AUTHOR Hyeong-Seok Choi]], [[Chang-Bin Jeon|AUTHOR Chang-Bin Jeon]], [[Junghyun Koo|AUTHOR Junghyun Koo]], [[Kyogu Lee|AUTHOR Kyogu Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191563.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-3-3|PAPER Wed-O-6-3-3 — Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling]]</div>|<div class="cpsessionviewpapertitle">Singing Voice Synthesis Using Deep Autoregressive Neural Networks for Acoustic Modeling</div><div class="cpsessionviewpaperauthor">[[Yuan-Hao Yi|AUTHOR Yuan-Hao Yi]], [[Yang Ai|AUTHOR Yang Ai]], [[Zhen-Hua Ling|AUTHOR Zhen-Hua Ling]], [[Li-Rong Dai|AUTHOR Li-Rong Dai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192848.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-3-4|PAPER Wed-O-6-3-4 — Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis]]</div>|<div class="cpsessionviewpapertitle">Conditional Variational Auto-Encoder for Text-Driven Expressive AudioVisual Speech Synthesis</div><div class="cpsessionviewpaperauthor">[[Sara Dahmani|AUTHOR Sara Dahmani]], [[Vincent Colotte|AUTHOR Vincent Colotte]], [[Valérian Girard|AUTHOR Valérian Girard]], [[Slim Ouni|AUTHOR Slim Ouni]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-3-5|PAPER Wed-O-6-3-5 — A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis]]</div>|<div class="cpsessionviewpapertitle">A Strategy for Improved Phone-Level Lyrics-to-Audio Alignment for Speech-to-Singing Synthesis</div><div class="cpsessionviewpaperauthor">[[David Ayllón|AUTHOR David Ayllón]], [[Fernando Villavicencio|AUTHOR Fernando Villavicencio]], [[Pierre Lanchantin|AUTHOR Pierre Lanchantin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192097.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-3-6|PAPER Wed-O-6-3-6 — Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning]]</div>|<div class="cpsessionviewpapertitle">Modeling Labial Coarticulation with Bidirectional Gated Recurrent Networks and Transfer Learning</div><div class="cpsessionviewpaperauthor">[[Théo Biasutto--Lervat|AUTHOR Théo Biasutto--Lervat]], [[Sara Dahmani|AUTHOR Sara Dahmani]], [[Slim Ouni|AUTHOR Slim Ouni]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Rohit Prabhavalkar, Jia Cui|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192680.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-5-1|PAPER Wed-O-6-5-1 — SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Daniel S. Park|AUTHOR Daniel S. Park]], [[William Chan|AUTHOR William Chan]], [[Yu Zhang|AUTHOR Yu Zhang]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]], [[Barret Zoph|AUTHOR Barret Zoph]], [[Ekin D. Cubuk|AUTHOR Ekin D. Cubuk]], [[Quoc V. Le|AUTHOR Quoc V. Le]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-5-2|PAPER Wed-O-6-5-2 — Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Forget a Bit to Learn Better: Soft Forgetting for CTC-Based Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[George Saon|AUTHOR George Saon]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[Michael Picheny|AUTHOR Michael Picheny]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-5-3|PAPER Wed-O-6-5-3 — Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Online Hybrid CTC/Attention Architecture for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Haoran Miao|AUTHOR Haoran Miao]], [[Gaofeng Cheng|AUTHOR Gaofeng Cheng]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Ta Li|AUTHOR Ta Li]], [[Yonghong Yan|AUTHOR Yonghong Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192700.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-5-4|PAPER Wed-O-6-5-4 — A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">A Highly Efficient Distributed Deep Learning System for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Wei Zhang|AUTHOR Wei Zhang]], [[Xiaodong Cui|AUTHOR Xiaodong Cui]], [[Ulrich Finkler|AUTHOR Ulrich Finkler]], [[George Saon|AUTHOR George Saon]], [[Abdullah Kayi|AUTHOR Abdullah Kayi]], [[Alper Buyuktosunoglu|AUTHOR Alper Buyuktosunoglu]], [[Brian Kingsbury|AUTHOR Brian Kingsbury]], [[David Kung|AUTHOR David Kung]], [[Michael Picheny|AUTHOR Michael Picheny]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193192.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-5-5|PAPER Wed-O-6-5-5 — Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System]]</div>|<div class="cpsessionviewpapertitle">Knowledge Distillation for End-to-End Monaural Multi-Talker ASR System</div><div class="cpsessionviewpaperauthor">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Xuankai Chang|AUTHOR Xuankai Chang]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191728.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-6-5-6|PAPER Wed-O-6-5-6 — Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech]]</div>|<div class="cpsessionviewpapertitle">Analysis of Deep Clustering as Preprocessing for Automatic Speech Recognition of Sparsely Overlapping Speech</div><div class="cpsessionviewpaperauthor">[[Tobias Menne|AUTHOR Tobias Menne]], [[Ilya Sklyar|AUTHOR Ilya Sklyar]], [[Ralf Schlüter|AUTHOR Ralf Schlüter]], [[Hermann Ney|AUTHOR Hermann Ney]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^Mirjam Broersma, Sharon Peperkamp|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-O-7-1-1|PAPER Wed-O-7-1-1 — Survey Talk: Recognition of Foreign-Accented Speech: Challenges and Opportunities for Human and Computer Speech Communication]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Recognition of Foreign-Accented Speech: Challenges and Opportunities for Human and Computer Speech Communication</div><div class="cpsessionviewpaperauthor">[[Ann R. Bradlow|AUTHOR Ann R. Bradlow]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192763.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-1-2|PAPER Wed-O-7-1-2 — The Effects of Time Expansion on English as a Second Language Individuals]]</div>|<div class="cpsessionviewpapertitle">The Effects of Time Expansion on English as a Second Language Individuals</div><div class="cpsessionviewpaperauthor">[[John S. Novak III|AUTHOR John S. Novak III]], [[Daniel Bunn|AUTHOR Daniel Bunn]], [[Robert V. Kenyon|AUTHOR Robert V. Kenyon]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-1-3|PAPER Wed-O-7-1-3 — Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features]]</div>|<div class="cpsessionviewpapertitle">Capturing L1 Influence on L2 Pronunciation by Simulating Perceptual Space Using Acoustic Features</div><div class="cpsessionviewpaperauthor">[[Shuju Shi|AUTHOR Shuju Shi]], [[Chilin Shih|AUTHOR Chilin Shih]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191403.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-1-4|PAPER Wed-O-7-1-4 — Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones]]</div>|<div class="cpsessionviewpapertitle">Cognitive Factors in Thai-Naïve Mandarin Speakers’ Imitation of Thai Lexical Tones</div><div class="cpsessionviewpaperauthor">[[Juqiang Chen|AUTHOR Juqiang Chen]], [[Catherine T. Best|AUTHOR Catherine T. Best]], [[Mark Antoniou|AUTHOR Mark Antoniou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192446.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-1-5|PAPER Wed-O-7-1-5 — Foreign-Language Knowledge Enhances Artificial-Language Segmentation]]</div>|<div class="cpsessionviewpapertitle">Foreign-Language Knowledge Enhances Artificial-Language Segmentation</div><div class="cpsessionviewpaperauthor">[[Annie Tremblay|AUTHOR Annie Tremblay]], [[Mirjam Broersma|AUTHOR Mirjam Broersma]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Stavros Tsakalidis, Xavier Anguera|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191305.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-2-1|PAPER Wed-O-7-2-1 — Neural Named Entity Recognition from Subword Units]]</div>|<div class="cpsessionviewpapertitle">Neural Named Entity Recognition from Subword Units</div><div class="cpsessionviewpaperauthor">[[Abdalghani Abujabal|AUTHOR Abdalghani Abujabal]], [[Judith Gaspers|AUTHOR Judith Gaspers]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192981.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-2-2|PAPER Wed-O-7-2-2 — Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Acoustic Segmentation and Clustering Using Siamese Network Embeddings</div><div class="cpsessionviewpaperauthor">[[Saurabhchand Bhati|AUTHOR Saurabhchand Bhati]], [[Shekhar Nayak|AUTHOR Shekhar Nayak]], [[K. Sri Rama Murty|AUTHOR K. Sri Rama Murty]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192413.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-2-3|PAPER Wed-O-7-2-3 — An Empirical Evaluation of DTW Subsampling Methods for Keyword Search]]</div>|<div class="cpsessionviewpapertitle">An Empirical Evaluation of DTW Subsampling Methods for Keyword Search</div><div class="cpsessionviewpaperauthor">[[Bolaji Yusuf|AUTHOR Bolaji Yusuf]], [[Murat Saraclar|AUTHOR Murat Saraclar]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193119.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-2-4|PAPER Wed-O-7-2-4 — Linguistically-Informed Training of Acoustic Word Embeddings for Low-Resource Languages]]</div>|<div class="cpsessionviewpapertitle">Linguistically-Informed Training of Acoustic Word Embeddings for Low-Resource Languages</div><div class="cpsessionviewpaperauthor">[[Zixiaofan Yang|AUTHOR Zixiaofan Yang]], [[Julia Hirschberg|AUTHOR Julia Hirschberg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191487.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-2-5|PAPER Wed-O-7-2-5 — Multimodal Word Discovery and Retrieval with Phone Sequence and Image Concepts]]</div>|<div class="cpsessionviewpapertitle">Multimodal Word Discovery and Retrieval with Phone Sequence and Image Concepts</div><div class="cpsessionviewpaperauthor">[[Liming Wang|AUTHOR Liming Wang]], [[Mark A. Hasegawa-Johnson|AUTHOR Mark A. Hasegawa-Johnson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192029.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-2-6|PAPER Wed-O-7-2-6 — Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings]]</div>|<div class="cpsessionviewpapertitle">Empirical Evaluation of Sequence-to-Sequence Models for Word Discovery in Low-Resource Settings</div><div class="cpsessionviewpaperauthor">[[Marcely Zanon Boito|AUTHOR Marcely Zanon Boito]], [[Aline Villavicencio|AUTHOR Aline Villavicencio]], [[Laurent Besacier|AUTHOR Laurent Besacier]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Hugo Van hamme, Masahito Togami|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191488.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-4-1|PAPER Wed-O-7-4-1 — Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation]]</div>|<div class="cpsessionviewpapertitle">Direct-Path Signal Cross-Correlation Estimation for Sound Source Localization in Reverberation</div><div class="cpsessionviewpaperauthor">[[Wei Xue|AUTHOR Wei Xue]], [[Ying Tong|AUTHOR Ying Tong]], [[Guohong Ding|AUTHOR Guohong Ding]], [[Chao Zhang|AUTHOR Chao Zhang]], [[Tao Ma|AUTHOR Tao Ma]], [[Xiaodong He|AUTHOR Xiaodong He]], [[Bowen Zhou|AUTHOR Bowen Zhou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192653.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-4-2|PAPER Wed-O-7-4-2 — Multiple Sound Source Localization with SVD-PHAT]]</div>|<div class="cpsessionviewpapertitle">Multiple Sound Source Localization with SVD-PHAT</div><div class="cpsessionviewpaperauthor">[[François Grondin|AUTHOR François Grondin]], [[James Glass|AUTHOR James Glass]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193158.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-4-3|PAPER Wed-O-7-4-3 — Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking]]</div>|<div class="cpsessionviewpapertitle">Robust DOA Estimation Based on Convolutional Neural Network and Time-Frequency Masking</div><div class="cpsessionviewpaperauthor">[[Wangyou Zhang|AUTHOR Wangyou Zhang]], [[Ying Zhou|AUTHOR Ying Zhou]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191289.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-4-4|PAPER Wed-O-7-4-4 — Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming]]</div>|<div class="cpsessionviewpapertitle">Multichannel Loss Function for Supervised Speech Source Separation by Mask-Based Beamforming</div><div class="cpsessionviewpaperauthor">[[Yoshiki Masuyama|AUTHOR Yoshiki Masuyama]], [[Masahito Togami|AUTHOR Masahito Togami]], [[Tatsuya Komatsu|AUTHOR Tatsuya Komatsu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191474.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-4-5|PAPER Wed-O-7-4-5 — Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction]]</div>|<div class="cpsessionviewpapertitle">Direction-Aware Speaker Beam for Multi-Channel Speaker Extraction</div><div class="cpsessionviewpaperauthor">[[Guanjun Li|AUTHOR Guanjun Li]], [[Shan Liang|AUTHOR Shan Liang]], [[Shuai Nie|AUTHOR Shuai Nie]], [[Wenju Liu|AUTHOR Wenju Liu]], [[Meng Yu|AUTHOR Meng Yu]], [[Lianwu Chen|AUTHOR Lianwu Chen]], [[Shouye Peng|AUTHOR Shouye Peng]], [[Changliang Li|AUTHOR Changliang Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191513.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-4-6|PAPER Wed-O-7-4-6 — Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues]]</div>|<div class="cpsessionviewpapertitle">Multimodal SpeakerBeam: Single Channel Target Speech Extraction with Audio-Visual Speaker Clues</div><div class="cpsessionviewpaperauthor">[[Tsubasa Ochiai|AUTHOR Tsubasa Ochiai]], [[Marc Delcroix|AUTHOR Marc Delcroix]], [[Keisuke Kinoshita|AUTHOR Keisuke Kinoshita]], [[Atsunori Ogawa|AUTHOR Atsunori Ogawa]], [[Tomohiro Nakatani|AUTHOR Tomohiro Nakatani]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^John H.L. Hansen, Timo Gerkmann|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191924.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-5-1|PAPER Wed-O-7-5-1 — Speech Denoising with Deep Feature Losses]]</div>|<div class="cpsessionviewpapertitle">Speech Denoising with Deep Feature Losses</div><div class="cpsessionviewpaperauthor">[[François G. Germain|AUTHOR François G. Germain]], [[Qifeng Chen|AUTHOR Qifeng Chen]], [[Vladlen Koltun|AUTHOR Vladlen Koltun]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191101.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-5-2|PAPER Wed-O-7-5-2 — VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking]]</div>|<div class="cpsessionviewpapertitle">VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking</div><div class="cpsessionviewpaperauthor">[[Quan Wang|AUTHOR Quan Wang]], [[Hannah Muckenhirn|AUTHOR Hannah Muckenhirn]], [[Kevin Wilson|AUTHOR Kevin Wilson]], [[Prashant Sridhar|AUTHOR Prashant Sridhar]], [[Zelin Wu|AUTHOR Zelin Wu]], [[John R. Hershey|AUTHOR John R. Hershey]], [[Rif A. Saurous|AUTHOR Rif A. Saurous]], [[Ron J. Weiss|AUTHOR Ron J. Weiss]], [[Ye Jia|AUTHOR Ye Jia]], [[Ignacio Lopez Moreno|AUTHOR Ignacio Lopez Moreno]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191777.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-5-3|PAPER Wed-O-7-5-3 — Incorporating Symbolic Sequential Modeling for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Incorporating Symbolic Sequential Modeling for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Chien-Feng Liao|AUTHOR Chien-Feng Liao]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191197.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-5-4|PAPER Wed-O-7-5-4 — Maximum a posteriori Speech Enhancement Based on Double Spectrum]]</div>|<div class="cpsessionviewpapertitle">Maximum a posteriori Speech Enhancement Based on Double Spectrum</div><div class="cpsessionviewpaperauthor">[[Pejman Mowlaee|AUTHOR Pejman Mowlaee]], [[Daniel Scheran|AUTHOR Daniel Scheran]], [[Johannes Stahl|AUTHOR Johannes Stahl]], [[Sean U.N. Wood|AUTHOR Sean U.N. Wood]], [[W. Bastiaan Kleijn|AUTHOR W. Bastiaan Kleijn]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192792.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-5-5|PAPER Wed-O-7-5-5 — Coarse-to-Fine Optimization for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Coarse-to-Fine Optimization for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Jian Yao|AUTHOR Jian Yao]], [[Ahmad Al-Dahle|AUTHOR Ahmad Al-Dahle]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191344.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-5-6|PAPER Wed-O-7-5-6 — Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Kernel Machines Beat Deep Neural Networks on Mask-Based Single-Channel Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Like Hui|AUTHOR Like Hui]], [[Siyuan Ma|AUTHOR Siyuan Ma]], [[Mikhail Belkin|AUTHOR Mikhail Belkin]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Main Hall|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-O-8-1-1|PAPER Wed-O-8-1-1 — Survey Talk: Multimodal Processing of Speech and Language]]</div>|<div class="cpsessionviewpapertitle">Survey Talk: Multimodal Processing of Speech and Language</div><div class="cpsessionviewpaperauthor">[[Florian Metze|AUTHOR Florian Metze]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193273.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-1-2|PAPER Wed-O-8-1-2 — MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices]]</div>|<div class="cpsessionviewpapertitle">MobiVSR : Efficient and Light-Weight Neural Network for Visual Speech Recognition on Mobile Devices</div><div class="cpsessionviewpaperauthor">[[Nilay Shrivastava|AUTHOR Nilay Shrivastava]], [[Astitwa Saxena|AUTHOR Astitwa Saxena]], [[Yaman Kumar|AUTHOR Yaman Kumar]], [[Rajiv Ratn Shah|AUTHOR Rajiv Ratn Shah]], [[Amanda Stent|AUTHOR Amanda Stent]], [[Debanjan Mahata|AUTHOR Debanjan Mahata]], [[Preeti Kaur|AUTHOR Preeti Kaur]], [[Roger Zimmermann|AUTHOR Roger Zimmermann]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193237.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-1-3|PAPER Wed-O-8-1-3 — Speaker Adaptation for Lip-Reading Using Visual Identity Vectors]]</div>|<div class="cpsessionviewpapertitle">Speaker Adaptation for Lip-Reading Using Visual Identity Vectors</div><div class="cpsessionviewpaperauthor">[[Pujitha Appan Kandala|AUTHOR Pujitha Appan Kandala]], [[Abhinav Thanda|AUTHOR Abhinav Thanda]], [[Dilip Kumar Margam|AUTHOR Dilip Kumar Margam]], [[Rohith Chandrashekar Aralikatti|AUTHOR Rohith Chandrashekar Aralikatti]], [[Tanay Sharma|AUTHOR Tanay Sharma]], [[Sharad Roy|AUTHOR Sharad Roy]], [[Shankar M. Venkatesan|AUTHOR Shankar M. Venkatesan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192618.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-1-4|PAPER Wed-O-8-1-4 — MobiLipNet: Resource-Efficient Deep Learning Based Lipreading]]</div>|<div class="cpsessionviewpapertitle">MobiLipNet: Resource-Efficient Deep Learning Based Lipreading</div><div class="cpsessionviewpaperauthor">[[Alexandros Koumparoulis|AUTHOR Alexandros Koumparoulis]], [[Gerasimos Potamianos|AUTHOR Gerasimos Potamianos]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191393.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-1-5|PAPER Wed-O-8-1-5 — LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading]]</div>|<div class="cpsessionviewpapertitle">LipSound: Neural Mel-Spectrogram Reconstruction for Lip Reading</div><div class="cpsessionviewpaperauthor">[[Leyuan Qu|AUTHOR Leyuan Qu]], [[Cornelius Weber|AUTHOR Cornelius Weber]], [[Stefan Wermter|AUTHOR Stefan Wermter]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 1|<|
|^Chair:&nbsp;|^Bhuvana Ramabhadran, Peter Bell|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191341.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-2-1|PAPER Wed-O-8-2-1 — Two-Pass End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Two-Pass End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Tara N. Sainath|AUTHOR Tara N. Sainath]], [[Ruoming Pang|AUTHOR Ruoming Pang]], [[David Rybach|AUTHOR David Rybach]], [[Yanzhang He|AUTHOR Yanzhang He]], [[Rohit Prabhavalkar|AUTHOR Rohit Prabhavalkar]], [[Wei Li|AUTHOR Wei Li]], [[Mirkó Visontai|AUTHOR Mirkó Visontai]], [[Qiao Liang|AUTHOR Qiao Liang]], [[Trevor Strohman|AUTHOR Trevor Strohman]], [[Yonghui Wu|AUTHOR Yonghui Wu]], [[Ian McGraw|AUTHOR Ian McGraw]], [[Chung-Cheng Chiu|AUTHOR Chung-Cheng Chiu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191626.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-2-2|PAPER Wed-O-8-2-2 — Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Extract, Adapt and Recognize: An End-to-End Neural Network for Corrupted Monaural Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Max W.Y. Lam|AUTHOR Max W.Y. Lam]], [[Jun Wang|AUTHOR Jun Wang]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]], [[Dan Su|AUTHOR Dan Su]], [[Dong Yu|AUTHOR Dong Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193216.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-2-3|PAPER Wed-O-8-2-3 — Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Multi-Resolution Char-to-BPE Cross-Attention Decoder for End-to-End Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Dhananjaya Gowda|AUTHOR Dhananjaya Gowda]], [[Abhinav Garg|AUTHOR Abhinav Garg]], [[Kwangyoun Kim|AUTHOR Kwangyoun Kim]], [[Mehul Kumar|AUTHOR Mehul Kumar]], [[Chanwoo Kim|AUTHOR Chanwoo Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191973.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-2-4|PAPER Wed-O-8-2-4 — Multi-Stride Self-Attention for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Multi-Stride Self-Attention for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Kyu J. Han|AUTHOR Kyu J. Han]], [[Jing Huang|AUTHOR Jing Huang]], [[Yun Tang|AUTHOR Yun Tang]], [[Xiaodong He|AUTHOR Xiaodong He]], [[Bowen Zhou|AUTHOR Bowen Zhou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192379.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-2-5|PAPER Wed-O-8-2-5 — LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">LF-MMI Training of Bayesian and Gaussian Process Time Delay Neural Networks for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xurong Xie|AUTHOR Xurong Xie]], [[Shansong Liu|AUTHOR Shansong Liu]], [[Max W.Y. Lam|AUTHOR Max W.Y. Lam]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191467.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-2-6|PAPER Wed-O-8-2-6 — Self-Teaching Networks]]</div>|<div class="cpsessionviewpapertitle">Self-Teaching Networks</div><div class="cpsessionviewpaperauthor">[[Liang Lu|AUTHOR Liang Lu]], [[Eric Sun|AUTHOR Eric Sun]], [[Yifan Gong|AUTHOR Yifan Gong]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Björn Schuller, Felix Burkhardt|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192594.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-3-1|PAPER Wed-O-8-3-1 — Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning]]</div>|<div class="cpsessionviewpapertitle">Improved End-to-End Speech Emotion Recognition Using Self Attention Mechanism and Multitask Learning</div><div class="cpsessionviewpaperauthor">[[Yuanchao Li|AUTHOR Yuanchao Li]], [[Tianyu Zhao|AUTHOR Tianyu Zhao]], [[Tatsuya Kawahara|AUTHOR Tatsuya Kawahara]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192710.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-3-2|PAPER Wed-O-8-3-2 — Continuous Emotion Recognition in Speech — Do We Need Recurrence?]]</div>|<div class="cpsessionviewpapertitle">Continuous Emotion Recognition in Speech — Do We Need Recurrence?</div><div class="cpsessionviewpaperauthor">[[Maximilian Schmitt|AUTHOR Maximilian Schmitt]], [[Nicholas Cummins|AUTHOR Nicholas Cummins]], [[Björn W. Schuller|AUTHOR Björn W. Schuller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-3-3|PAPER Wed-O-8-3-3 — Speech Based Emotion Prediction: Can a Linear Model Work?]]</div>|<div class="cpsessionviewpapertitle">Speech Based Emotion Prediction: Can a Linear Model Work?</div><div class="cpsessionviewpaperauthor">[[Anda Ouyang|AUTHOR Anda Ouyang]], [[Ting Dang|AUTHOR Ting Dang]], [[Vidhyasaharan Sethu|AUTHOR Vidhyasaharan Sethu]], [[Eliathamby Ambikairajah|AUTHOR Eliathamby Ambikairajah]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-3-4|PAPER Wed-O-8-3-4 — Speech Emotion Recognition Based on Multi-Label Emotion Existence Model]]</div>|<div class="cpsessionviewpapertitle">Speech Emotion Recognition Based on Multi-Label Emotion Existence Model</div><div class="cpsessionviewpaperauthor">[[Atsushi Ando|AUTHOR Atsushi Ando]], [[Ryo Masumura|AUTHOR Ryo Masumura]], [[Hosana Kamiyama|AUTHOR Hosana Kamiyama]], [[Satoshi Kobashikawa|AUTHOR Satoshi Kobashikawa]], [[Yushi Aono|AUTHOR Yushi Aono]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191708.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-3-5|PAPER Wed-O-8-3-5 — Gender De-Biasing in Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Gender De-Biasing in Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Cristina Gorrostieta|AUTHOR Cristina Gorrostieta]], [[Reza Lotfian|AUTHOR Reza Lotfian]], [[Kye Taylor|AUTHOR Kye Taylor]], [[Richard Brutti|AUTHOR Richard Brutti]], [[John Kane|AUTHOR John Kane]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192293.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-3-6|PAPER Wed-O-8-3-6 — CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">CycleGAN-Based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Fang Bao|AUTHOR Fang Bao]], [[Michael Neumann|AUTHOR Michael Neumann]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Helen Meng, Tomoki Toda|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191333.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-4-1|PAPER Wed-O-8-4-1 — Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System]]</div>|<div class="cpsessionviewpapertitle">Lombard Speech Synthesis Using Transfer Learning in a Tacotron Text-to-Speech System</div><div class="cpsessionviewpaperauthor">[[Bajibabu Bollepalli|AUTHOR Bajibabu Bollepalli]], [[Lauri Juvela|AUTHOR Lauri Juvela]], [[Paavo Alku|AUTHOR Paavo Alku]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191681.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-4-2|PAPER Wed-O-8-4-2 — Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion]]</div>|<div class="cpsessionviewpapertitle">Augmented CycleGANs for Continuous Scale Normal-to-Lombard Speaking Style Conversion</div><div class="cpsessionviewpaperauthor">[[Shreyas Seshadri|AUTHOR Shreyas Seshadri]], [[Lauri Juvela|AUTHOR Lauri Juvela]], [[Paavo Alku|AUTHOR Paavo Alku]], [[Okko Räsänen|AUTHOR Okko Räsänen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191778.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-4-3|PAPER Wed-O-8-4-3 — Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams]]</div>|<div class="cpsessionviewpapertitle">Foreign Accent Conversion by Synthesizing Speech from Phonetic Posteriorgrams</div><div class="cpsessionviewpaperauthor">[[Guanlong Zhao|AUTHOR Guanlong Zhao]], [[Shaojin Ding|AUTHOR Shaojin Ding]], [[Ricardo Gutierrez-Osuna|AUTHOR Ricardo Gutierrez-Osuna]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192512.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-4-4|PAPER Wed-O-8-4-4 — A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective]]</div>|<div class="cpsessionviewpapertitle">A Multi-Speaker Emotion Morphing Model Using Highway Networks and Maximum Likelihood Objective</div><div class="cpsessionviewpaperauthor">[[Ravi Shankar|AUTHOR Ravi Shankar]], [[Jacob Sager|AUTHOR Jacob Sager]], [[Archana Venkataraman|AUTHOR Archana Venkataraman]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192607.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-4-5|PAPER Wed-O-8-4-5 — Effects of Waveform PMF on Anti-Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">Effects of Waveform PMF on Anti-Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Itshak Lapidot|AUTHOR Itshak Lapidot]], [[Jean-François Bonastre|AUTHOR Jean-François Bonastre]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192878.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-4-6|PAPER Wed-O-8-4-6 — Nonparallel Emotional Speech Conversion]]</div>|<div class="cpsessionviewpapertitle">Nonparallel Emotional Speech Conversion</div><div class="cpsessionviewpaperauthor">[[Jian Gao|AUTHOR Jian Gao]], [[Deep Chakraborty|AUTHOR Deep Chakraborty]], [[Hamidou Tembine|AUTHOR Hamidou Tembine]], [[Olaitan Olaleye|AUTHOR Olaitan Olaleye]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 12|<|
|^Chair:&nbsp;|^Nicholas Evans, Seyed Omid Sadjadi|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-5-1|PAPER Wed-O-8-5-1 — Self-Supervised Speaker Embeddings]]</div>|<div class="cpsessionviewpapertitle">Self-Supervised Speaker Embeddings</div><div class="cpsessionviewpaperauthor">[[Themos Stafylakis|AUTHOR Themos Stafylakis]], [[Johan Rohdin|AUTHOR Johan Rohdin]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Petr Mizera|AUTHOR Petr Mizera]], [[Lukáš Burget|AUTHOR Lukáš Burget]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-5-2|PAPER Wed-O-8-5-2 — Privacy-Preserving Speaker Recognition with Cohort Score Normalisation]]</div>|<div class="cpsessionviewpapertitle">Privacy-Preserving Speaker Recognition with Cohort Score Normalisation</div><div class="cpsessionviewpaperauthor">[[Andreas Nautsch|AUTHOR Andreas Nautsch]], [[Jose Patino|AUTHOR Jose Patino]], [[Amos Treiber|AUTHOR Amos Treiber]], [[Themos Stafylakis|AUTHOR Themos Stafylakis]], [[Petr Mizera|AUTHOR Petr Mizera]], [[Massimiliano Todisco|AUTHOR Massimiliano Todisco]], [[Thomas Schneider|AUTHOR Thomas Schneider]], [[Nicholas Evans|AUTHOR Nicholas Evans]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192357.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-5-3|PAPER Wed-O-8-5-3 — Large Margin Softmax Loss for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Large Margin Softmax Loss for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Yi Liu|AUTHOR Yi Liu]], [[Liang He|AUTHOR Liang He]], [[Jia Liu|AUTHOR Jia Liu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192240.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-5-4|PAPER Wed-O-8-5-4 — A Deep Neural Network for Short-Segment Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">A Deep Neural Network for Short-Segment Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Amirhossein Hajavi|AUTHOR Amirhossein Hajavi]], [[Ali Etemad|AUTHOR Ali Etemad]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191704.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-5-5|PAPER Wed-O-8-5-5 — Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function]]</div>|<div class="cpsessionviewpapertitle">Deep Speaker Embedding Extraction with Channel-Wise Feature Responses and Additive Supervision Softmax Loss Function</div><div class="cpsessionviewpaperauthor">[[Jianfeng Zhou|AUTHOR Jianfeng Zhou]], [[Tao Jiang|AUTHOR Tao Jiang]], [[Zheng Li|AUTHOR Zheng Li]], [[Lin Li|AUTHOR Lin Li]], [[Qingyang Hong|AUTHOR Qingyang Hong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191496.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-8-5-6|PAPER Wed-O-8-5-6 — VoiceID Loss: Speech Enhancement for Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">VoiceID Loss: Speech Enhancement for Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Suwon Shon|AUTHOR Suwon Shon]], [[Hao Tang|AUTHOR Hao Tang]], [[James Glass|AUTHOR James Glass]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Jesus Villalba Lopez|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192956.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-1|PAPER Wed-P-6-A-1 — Blind Channel Response Estimation for Replay Attack Detection]]</div>|<div class="cpsessionviewpapertitle">Blind Channel Response Estimation for Replay Attack Detection</div><div class="cpsessionviewpaperauthor">[[Anderson R. Avila|AUTHOR Anderson R. Avila]], [[Jahangir Alam|AUTHOR Jahangir Alam]], [[Douglas O’Shaughnessy|AUTHOR Douglas O’Shaughnessy]], [[Tiago H. Falk|AUTHOR Tiago H. Falk]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192742.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-2|PAPER Wed-P-6-A-2 — Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection]]</div>|<div class="cpsessionviewpapertitle">Energy Separation-Based Instantaneous Frequency Estimation for Cochlear Cepstral Feature for Replay Spoof Detection</div><div class="cpsessionviewpaperauthor">[[Ankur T. Patil|AUTHOR Ankur T. Patil]], [[Rajul Acharya|AUTHOR Rajul Acharya]], [[Pulikonda Aditya Sai|AUTHOR Pulikonda Aditya Sai]], [[Hemant A. Patil|AUTHOR Hemant A. Patil]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192550.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-3|PAPER Wed-P-6-A-3 — Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems]]</div>|<div class="cpsessionviewpapertitle">Optimization of False Acceptance/Rejection Rates and Decision Threshold for End-to-End Text-Dependent Speaker Verification Systems</div><div class="cpsessionviewpaperauthor">[[Victoria Mingote|AUTHOR Victoria Mingote]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192457.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-4|PAPER Wed-P-6-A-4 — Deep Hashing for Speaker Identification and Retrieval]]</div>|<div class="cpsessionviewpapertitle">Deep Hashing for Speaker Identification and Retrieval</div><div class="cpsessionviewpaperauthor">[[Lei Fan|AUTHOR Lei Fan]], [[Qing-Yuan Jiang|AUTHOR Qing-Yuan Jiang]], [[Ya-Qi Yu|AUTHOR Ya-Qi Yu]], [[Wu-Jun Li|AUTHOR Wu-Jun Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192430.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-5|PAPER Wed-P-6-A-5 — Adversarial Optimization for Dictionary Attacks on Speaker Verification]]</div>|<div class="cpsessionviewpapertitle">Adversarial Optimization for Dictionary Attacks on Speaker Verification</div><div class="cpsessionviewpaperauthor">[[Mirko Marras|AUTHOR Mirko Marras]], [[Paweł Korus|AUTHOR Paweł Korus]], [[Nasir Memon|AUTHOR Nasir Memon]], [[Gianni Fenu|AUTHOR Gianni Fenu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192361.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-6|PAPER Wed-P-6-A-6 — An Adaptive-Q Cochlear Model for Replay Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">An Adaptive-Q Cochlear Model for Replay Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Tharshini Gunendradasan|AUTHOR Tharshini Gunendradasan]], [[Eliathamby Ambikairajah|AUTHOR Eliathamby Ambikairajah]], [[Julien Epps|AUTHOR Julien Epps]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192208.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-7|PAPER Wed-P-6-A-7 — An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network]]</div>|<div class="cpsessionviewpapertitle">An End-to-End Text-Independent Speaker Verification Framework with a Keyword Adversarial Network</div><div class="cpsessionviewpaperauthor">[[Sungrack Yun|AUTHOR Sungrack Yun]], [[Janghoon Cho|AUTHOR Janghoon Cho]], [[Jungyun Eum|AUTHOR Jungyun Eum]], [[Wonil Chang|AUTHOR Wonil Chang]], [[Kyuwoong Hwang|AUTHOR Kyuwoong Hwang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192195.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-8|PAPER Wed-P-6-A-8 — Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System]]</div>|<div class="cpsessionviewpapertitle">Shortcut Connections Based Deep Speaker Embeddings for End-to-End Speaker Verification System</div><div class="cpsessionviewpaperauthor">[[Soonshin Seo|AUTHOR Soonshin Seo]], [[Daniel Jun Rim|AUTHOR Daniel Jun Rim]], [[Minkyu Lim|AUTHOR Minkyu Lim]], [[Donghyun Lee|AUTHOR Donghyun Lee]], [[Hosung Park|AUTHOR Hosung Park]], [[Junseok Oh|AUTHOR Junseok Oh]], [[Changmin Kim|AUTHOR Changmin Kim]], [[Ji-Hwan Kim|AUTHOR Ji-Hwan Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192137.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-9|PAPER Wed-P-6-A-9 — Device Feature Extractor for Replay Spoofing Detection]]</div>|<div class="cpsessionviewpapertitle">Device Feature Extractor for Replay Spoofing Detection</div><div class="cpsessionviewpaperauthor">[[Chang Huai You|AUTHOR Chang Huai You]], [[Jichen Yang|AUTHOR Jichen Yang]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192120.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-10|PAPER Wed-P-6-A-10 — Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training]]</div>|<div class="cpsessionviewpapertitle">Cross-Domain Replay Spoofing Attack Detection Using Domain Adversarial Training</div><div class="cpsessionviewpaperauthor">[[Hongji Wang|AUTHOR Hongji Wang]], [[Heinrich Dinkel|AUTHOR Heinrich Dinkel]], [[Shuai Wang|AUTHOR Shuai Wang]], [[Yanmin Qian|AUTHOR Yanmin Qian]], [[Kai Yu|AUTHOR Kai Yu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191891.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-11|PAPER Wed-P-6-A-11 — A Study of x-Vector Based Speaker Recognition on Short Utterances]]</div>|<div class="cpsessionviewpapertitle">A Study of x-Vector Based Speaker Recognition on Short Utterances</div><div class="cpsessionviewpaperauthor">[[A. Kanagasundaram|AUTHOR A. Kanagasundaram]], [[S. Sridharan|AUTHOR S. Sridharan]], [[G. Sriram|AUTHOR G. Sriram]], [[S. Prachi|AUTHOR S. Prachi]], [[C. Fookes|AUTHOR C. Fookes]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-12|PAPER Wed-P-6-A-12 — Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings]]</div>|<div class="cpsessionviewpapertitle">Tied Mixture of Factor Analyzers Layer to Combine Frame Level Representations in Neural Speaker Embeddings</div><div class="cpsessionviewpaperauthor">[[Nanxin Chen|AUTHOR Nanxin Chen]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Najim Dehak|AUTHOR Najim Dehak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191535.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-13|PAPER Wed-P-6-A-13 — Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection]]</div>|<div class="cpsessionviewpapertitle">Biologically Inspired Adaptive-Q Filterbanks for Replay Spoofing Attack Detection</div><div class="cpsessionviewpaperauthor">[[Buddhi Wickramasinghe|AUTHOR Buddhi Wickramasinghe]], [[Eliathamby Ambikairajah|AUTHOR Eliathamby Ambikairajah]], [[Julien Epps|AUTHOR Julien Epps]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191524.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-14|PAPER Wed-P-6-A-14 — On Robustness of Unsupervised Domain Adaptation for Speaker Recognition]]</div>|<div class="cpsessionviewpapertitle">On Robustness of Unsupervised Domain Adaptation for Speaker Recognition</div><div class="cpsessionviewpaperauthor">[[Pierre-Michel Bousquet|AUTHOR Pierre-Michel Bousquet]], [[Mickael Rouvier|AUTHOR Mickael Rouvier]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191498.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-A-15|PAPER Wed-P-6-A-15 — Large-Scale Speaker Retrieval on Random Speaker Variability Subspace]]</div>|<div class="cpsessionviewpapertitle">Large-Scale Speaker Retrieval on Random Speaker Variability Subspace</div><div class="cpsessionviewpaperauthor">[[Suwon Shon|AUTHOR Suwon Shon]], [[Younggun Lee|AUTHOR Younggun Lee]], [[Taesu Kim|AUTHOR Taesu Kim]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Xunying Liu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193088.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-1|PAPER Wed-P-6-B-1 — Meeting Transcription Using Asynchronous Distant Microphones]]</div>|<div class="cpsessionviewpapertitle">Meeting Transcription Using Asynchronous Distant Microphones</div><div class="cpsessionviewpaperauthor">[[Takuya Yoshioka|AUTHOR Takuya Yoshioka]], [[Dimitrios Dimitriadis|AUTHOR Dimitrios Dimitriadis]], [[Andreas Stolcke|AUTHOR Andreas Stolcke]], [[William Hinthorn|AUTHOR William Hinthorn]], [[Zhuo Chen|AUTHOR Zhuo Chen]], [[Michael Zeng|AUTHOR Michael Zeng]], [[Xuedong Huang|AUTHOR Xuedong Huang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192793.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-2|PAPER Wed-P-6-B-2 — Detection and Recovery of OOVs for Improved English Broadcast News Captioning]]</div>|<div class="cpsessionviewpapertitle">Detection and Recovery of OOVs for Improved English Broadcast News Captioning</div><div class="cpsessionviewpaperauthor">[[Samuel Thomas|AUTHOR Samuel Thomas]], [[Kartik Audhkhasi|AUTHOR Kartik Audhkhasi]], [[Zoltán Tüske|AUTHOR Zoltán Tüske]], [[Yinghui Huang|AUTHOR Yinghui Huang]], [[Michael Picheny|AUTHOR Michael Picheny]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192629.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-3|PAPER Wed-P-6-B-3 — Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Improving Large Vocabulary Urdu Speech Recognition System Using Deep Neural Networks</div><div class="cpsessionviewpaperauthor">[[Muhammad Umar Farooq|AUTHOR Muhammad Umar Farooq]], [[Farah Adeeba|AUTHOR Farah Adeeba]], [[Sahar Rauf|AUTHOR Sahar Rauf]], [[Sarmad Hussain|AUTHOR Sarmad Hussain]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192586.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-4|PAPER Wed-P-6-B-4 — Hybrid Arbitration Using Raw ASR String and NLU Information — Taking the Best of Both Embedded World and Cloud World]]</div>|<div class="cpsessionviewpapertitle">Hybrid Arbitration Using Raw ASR String and NLU Information — Taking the Best of Both Embedded World and Cloud World</div><div class="cpsessionviewpaperauthor">[[Min Tang|AUTHOR Min Tang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192132.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-5|PAPER Wed-P-6-B-5 — Leveraging a Character, Word and Prosody Triplet for an ASR Error Robust and Agglutination Friendly Punctuation Approach]]</div>|<div class="cpsessionviewpapertitle">Leveraging a Character, Word and Prosody Triplet for an ASR Error Robust and Agglutination Friendly Punctuation Approach</div><div class="cpsessionviewpaperauthor">[[György Szaszák|AUTHOR György Szaszák]], [[Máté Ákos Tündik|AUTHOR Máté Ákos Tündik]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191962.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-6|PAPER Wed-P-6-B-6 — The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection]]</div>|<div class="cpsessionviewpapertitle">The Airbus Air Traffic Control Speech Recognition 2018 Challenge: Towards ATC Automatic Transcription and Call Sign Detection</div><div class="cpsessionviewpaperauthor">[[Thomas Pellegrini|AUTHOR Thomas Pellegrini]], [[Jér^ome Farinas|AUTHOR Jér^ome Farinas]], [[Estelle Delpech|AUTHOR Estelle Delpech]], [[François Lancelot|AUTHOR François Lancelot]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191390.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-7|PAPER Wed-P-6-B-7 —  Kite: Automatic Speech Recognition for Unmanned Aerial Vehicles]]</div>|<div class="cpsessionviewpapertitle"> Kite: Automatic Speech Recognition for Unmanned Aerial Vehicles</div><div class="cpsessionviewpaperauthor">[[Dan Oneață|AUTHOR Dan Oneață]], [[Horia Cucu|AUTHOR Horia Cucu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191343.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-8|PAPER Wed-P-6-B-8 — Exploring Methods for the Automatic Detection of Errors in Manual Transcription]]</div>|<div class="cpsessionviewpapertitle">Exploring Methods for the Automatic Detection of Errors in Manual Transcription</div><div class="cpsessionviewpaperauthor">[[Xiaofei Wang|AUTHOR Xiaofei Wang]], [[Jinyi Yang|AUTHOR Jinyi Yang]], [[Ruizhi Li|AUTHOR Ruizhi Li]], [[Samik Sadhu|AUTHOR Samik Sadhu]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191328.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-9|PAPER Wed-P-6-B-9 — Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training]]</div>|<div class="cpsessionviewpapertitle">Improved Low-Resource Somali Speech Recognition by Semi-Supervised Acoustic and Language Model Training</div><div class="cpsessionviewpaperauthor">[[Astik Biswas|AUTHOR Astik Biswas]], [[Raghav Menon|AUTHOR Raghav Menon]], [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]], [[Thomas Niesler|AUTHOR Thomas Niesler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191248.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-10|PAPER Wed-P-6-B-10 — The Althingi ASR System]]</div>|<div class="cpsessionviewpapertitle">The Althingi ASR System</div><div class="cpsessionviewpaperauthor">[[Inga R. Helgadóttir|AUTHOR Inga R. Helgadóttir]], [[Anna Björk Nikulásdóttir|AUTHOR Anna Björk Nikulásdóttir]], [[Michal Borský|AUTHOR Michal Borský]], [[Judy Y. Fong|AUTHOR Judy Y. Fong]], [[Róbert Kjaran|AUTHOR Róbert Kjaran]], [[Jón Guðnason|AUTHOR Jón Guðnason]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191131.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-B-11|PAPER Wed-P-6-B-11 — CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task]]</div>|<div class="cpsessionviewpapertitle">CRIM’s Speech Transcription and Call Sign Detection System for the ATC Airbus Challenge Task</div><div class="cpsessionviewpaperauthor">[[Vishwa Gupta|AUTHOR Vishwa Gupta]], [[Lise Rebout|AUTHOR Lise Rebout]], [[Gilles Boulianne|AUTHOR Gilles Boulianne]], [[Pierre-André Ménard|AUTHOR Pierre-André Ménard]], [[Jahangir Alam|AUTHOR Jahangir Alam]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Daniel Bone|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193095.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-1|PAPER Wed-P-6-C-1 — Optimizing Speech-Input Length for Speaker-Independent Depression Classification]]</div>|<div class="cpsessionviewpapertitle">Optimizing Speech-Input Length for Speaker-Independent Depression Classification</div><div class="cpsessionviewpaperauthor">[[Tomasz Rutowski|AUTHOR Tomasz Rutowski]], [[Amir Harati|AUTHOR Amir Harati]], [[Yang Lu|AUTHOR Yang Lu]], [[Elizabeth Shriberg|AUTHOR Elizabeth Shriberg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192987.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-2|PAPER Wed-P-6-C-2 — A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia]]</div>|<div class="cpsessionviewpapertitle">A New Approach for Automating Analysis of Responses on Verbal Fluency Tests from Subjects At-Risk for Schizophrenia</div><div class="cpsessionviewpaperauthor">[[Mary Pietrowicz|AUTHOR Mary Pietrowicz]], [[Carla Agurto|AUTHOR Carla Agurto]], [[Raquel Norel|AUTHOR Raquel Norel]], [[Elif Eyigoz|AUTHOR Elif Eyigoz]], [[Guillermo Cecchi|AUTHOR Guillermo Cecchi]], [[Zarina R. Bilgrami|AUTHOR Zarina R. Bilgrami]], [[Cheryl Corcoran|AUTHOR Cheryl Corcoran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192825.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-3|PAPER Wed-P-6-C-3 — Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models]]</div>|<div class="cpsessionviewpapertitle">Comparison of Telephone Recordings and Professional Microphone Recordings for Early Detection of Parkinson’s Disease, Using Mel-Frequency Cepstral Coefficients with Gaussian Mixture Models</div><div class="cpsessionviewpaperauthor">[[Laetitia Jeancolas|AUTHOR Laetitia Jeancolas]], [[Graziella Mangone|AUTHOR Graziella Mangone]], [[Jean-Christophe Corvol|AUTHOR Jean-Christophe Corvol]], [[Marie Vidailhet|AUTHOR Marie Vidailhet]], [[Stéphane Lehéricy|AUTHOR Stéphane Lehéricy]], [[Badr-Eddine Benkelfat|AUTHOR Badr-Eddine Benkelfat]], [[Habib Benali|AUTHOR Habib Benali]], [[Dijana Petrovska-Delacrétaz|AUTHOR Dijana Petrovska-Delacrétaz]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192791.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-4|PAPER Wed-P-6-C-4 — Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility]]</div>|<div class="cpsessionviewpapertitle">Spectral Subspace Analysis for Automatic Assessment of Pathological Speech Intelligibility</div><div class="cpsessionviewpaperauthor">[[Parvaneh Janbakhshi|AUTHOR Parvaneh Janbakhshi]], [[Ina Kodrasi|AUTHOR Ina Kodrasi]], [[Hervé Bourlard|AUTHOR Hervé Bourlard]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192551.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-5|PAPER Wed-P-6-C-5 — An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy]]</div>|<div class="cpsessionviewpapertitle">An Investigation of Therapeutic Rapport Through Prosody in Brief Psychodynamic Psychotherapy</div><div class="cpsessionviewpaperauthor">[[Carolina De Pasquale|AUTHOR Carolina De Pasquale]], [[Charlie Cullen|AUTHOR Charlie Cullen]], [[Brian Vaughan|AUTHOR Brian Vaughan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192490.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-6|PAPER Wed-P-6-C-6 — Feature Representation of Pathophysiology of Parkinsonian Dysarthria]]</div>|<div class="cpsessionviewpapertitle">Feature Representation of Pathophysiology of Parkinsonian Dysarthria</div><div class="cpsessionviewpaperauthor">[[Alice Rueda|AUTHOR Alice Rueda]], [[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]], [[Cristian David Rios-Urrego|AUTHOR Cristian David Rios-Urrego]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]], [[Sridhar Krishnan|AUTHOR Sridhar Krishnan]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-7|PAPER Wed-P-6-C-7 — Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia]]</div>|<div class="cpsessionviewpapertitle">Neural Transfer Learning for Cry-Based Diagnosis of Perinatal Asphyxia</div><div class="cpsessionviewpaperauthor">[[Charles C. Onu|AUTHOR Charles C. Onu]], [[Jonathan Lebensold|AUTHOR Jonathan Lebensold]], [[William L. Hamilton|AUTHOR William L. Hamilton]], [[Doina Precup|AUTHOR Doina Precup]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-8|PAPER Wed-P-6-C-8 — Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters]]</div>|<div class="cpsessionviewpapertitle">Investigating the Variability of Voice Quality and Pain Levels as a Function of Multiple Clinical Parameters</div><div class="cpsessionviewpaperauthor">[[Hui-Ting Hong|AUTHOR Hui-Ting Hong]], [[Jeng-Lin Li|AUTHOR Jeng-Lin Li]], [[Yi-Ming Weng|AUTHOR Yi-Ming Weng]], [[Chip-Jin Ng|AUTHOR Chip-Jin Ng]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192217.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-9|PAPER Wed-P-6-C-9 — Assessing Parkinson’s Disease from Speech Using Fisher Vectors]]</div>|<div class="cpsessionviewpapertitle">Assessing Parkinson’s Disease from Speech Using Fisher Vectors</div><div class="cpsessionviewpaperauthor">[[José Vicente Egas López|AUTHOR José Vicente Egas López]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]], [[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192080.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-10|PAPER Wed-P-6-C-10 — Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data]]</div>|<div class="cpsessionviewpapertitle">Feature Space Visualization with Spatial Similarity Maps for Pathological Speech Data</div><div class="cpsessionviewpaperauthor">[[Philipp Klumpp|AUTHOR Philipp Klumpp]], [[J.C. Vásquez-Correa|AUTHOR J.C. Vásquez-Correa]], [[Tino Haderlein|AUTHOR Tino Haderlein]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191888.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-11|PAPER Wed-P-6-C-11 — Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language]]</div>|<div class="cpsessionviewpapertitle">Predicting Behavior in Cancer-Afflicted Patient and Spouse Interactions Using Speech and Language</div><div class="cpsessionviewpaperauthor">[[Sandeep Nallan Chakravarthula|AUTHOR Sandeep Nallan Chakravarthula]], [[Haoqi Li|AUTHOR Haoqi Li]], [[Shao-Yen Tseng|AUTHOR Shao-Yen Tseng]], [[Maija Reblin|AUTHOR Maija Reblin]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191688.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-C-12|PAPER Wed-P-6-C-12 — Automatic Assessment of Language Impairment Based on Raw ASR Output]]</div>|<div class="cpsessionviewpapertitle">Automatic Assessment of Language Impairment Based on Raw ASR Output</div><div class="cpsessionviewpaperauthor">[[Ying Qin|AUTHOR Ying Qin]], [[Tan Lee|AUTHOR Tan Lee]], [[Anthony Pak Hin Kong|AUTHOR Anthony Pak Hin Kong]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^Nao Hodoshima|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193209.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-1|PAPER Wed-P-6-D-1 — Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners]]</div>|<div class="cpsessionviewpapertitle">Effects of Spectral and Temporal Cues to Mandarin Concurrent-Vowels Identification for Normal-Hearing and Hearing-Impaired Listeners</div><div class="cpsessionviewpaperauthor">[[Zhen Fu|AUTHOR Zhen Fu]], [[Xihong Wu|AUTHOR Xihong Wu]], [[Jing Chen|AUTHOR Jing Chen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193134.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-2|PAPER Wed-P-6-D-2 — Disfluencies and Human Speech Transcription Errors]]</div>|<div class="cpsessionviewpapertitle">Disfluencies and Human Speech Transcription Errors</div><div class="cpsessionviewpaperauthor">[[Vicky Zayats|AUTHOR Vicky Zayats]], [[Trang Tran|AUTHOR Trang Tran]], [[Richard Wright|AUTHOR Richard Wright]], [[Courtney Mansfield|AUTHOR Courtney Mansfield]], [[Mari Ostendorf|AUTHOR Mari Ostendorf]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192699.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-3|PAPER Wed-P-6-D-3 — The Influence of Distraction on Speech Processing: How Selective is Selective Attention?]]</div>|<div class="cpsessionviewpapertitle">The Influence of Distraction on Speech Processing: How Selective is Selective Attention?</div><div class="cpsessionviewpaperauthor">[[Sandra I. Parhammer|AUTHOR Sandra I. Parhammer]], [[Miriam Ebersberg|AUTHOR Miriam Ebersberg]], [[Jenny Tippmann|AUTHOR Jenny Tippmann]], [[Katja Stärk|AUTHOR Katja Stärk]], [[Andreas Opitz|AUTHOR Andreas Opitz]], [[Barbara Hinger|AUTHOR Barbara Hinger]], [[Sonja Rossi|AUTHOR Sonja Rossi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192215.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-4|PAPER Wed-P-6-D-4 — Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking]]</div>|<div class="cpsessionviewpapertitle">Subjective Evaluation of Communicative Effort for Younger and Older Adults in Interactive Tasks with Energetic and Informational Masking</div><div class="cpsessionviewpaperauthor">[[Valerie Hazan|AUTHOR Valerie Hazan]], [[Outi Tuomainen|AUTHOR Outi Tuomainen]], [[Linda Taschenberger|AUTHOR Linda Taschenberger]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192210.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-5|PAPER Wed-P-6-D-5 — Perceiving Older Adults Producing Clear and Lombard Speech]]</div>|<div class="cpsessionviewpapertitle">Perceiving Older Adults Producing Clear and Lombard Speech</div><div class="cpsessionviewpaperauthor">[[Chris Davis|AUTHOR Chris Davis]], [[Jeesun Kim|AUTHOR Jeesun Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192144.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-6|PAPER Wed-P-6-D-6 — Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users]]</div>|<div class="cpsessionviewpapertitle">Phone-Attribute Posteriors to Evaluate the Speech of Cochlear Implant Users</div><div class="cpsessionviewpaperauthor">[[T. Arias-Vergara|AUTHOR T. Arias-Vergara]], [[Juan Rafael Orozco-Arroyave|AUTHOR Juan Rafael Orozco-Arroyave]], [[Milos Cernak|AUTHOR Milos Cernak]], [[S. Gollwitzer|AUTHOR S. Gollwitzer]], [[M. Schuster|AUTHOR M. Schuster]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191902.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-7|PAPER Wed-P-6-D-7 — Effects of Urgent Speech and Congruent/Incongruent Text on Speech Intelligibility in Noise and Reverberation]]</div>|<div class="cpsessionviewpapertitle">Effects of Urgent Speech and Congruent/Incongruent Text on Speech Intelligibility in Noise and Reverberation</div><div class="cpsessionviewpaperauthor">[[Nao Hodoshima|AUTHOR Nao Hodoshima]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191852.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-8|PAPER Wed-P-6-D-8 — Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli]]</div>|<div class="cpsessionviewpapertitle">Quantifying Cochlear Implant Users’ Ability for Speaker Identification Using CI Auditory Stimuli</div><div class="cpsessionviewpaperauthor">[[Nursadul Mamun|AUTHOR Nursadul Mamun]], [[Ria Ghosh|AUTHOR Ria Ghosh]], [[John H.L. Hansen|AUTHOR John H.L. Hansen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191414.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-9|PAPER Wed-P-6-D-9 — Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context]]</div>|<div class="cpsessionviewpapertitle">Lexically Guided Perceptual Learning of a Vowel Shift in an Interactive L2 Listening Context</div><div class="cpsessionviewpaperauthor">[[E. Felker|AUTHOR E. Felker]], [[Mirjam Ernestus|AUTHOR Mirjam Ernestus]], [[Mirjam Broersma|AUTHOR Mirjam Broersma]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191402.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-10|PAPER Wed-P-6-D-10 — Talker Intelligibility and Listening Effort with Temporally Modified Speech]]</div>|<div class="cpsessionviewpapertitle">Talker Intelligibility and Listening Effort with Temporally Modified Speech</div><div class="cpsessionviewpaperauthor">[[Maximillian Paulus|AUTHOR Maximillian Paulus]], [[Valerie Hazan|AUTHOR Valerie Hazan]], [[Patti Adank|AUTHOR Patti Adank]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191281.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-11|PAPER Wed-P-6-D-11 — R²SPIN: Re-Recording the Revised Speech Perception in Noise Test]]</div>|<div class="cpsessionviewpapertitle">R²SPIN: Re-Recording the Revised Speech Perception in Noise Test</div><div class="cpsessionviewpaperauthor">[[Lauren Ward|AUTHOR Lauren Ward]], [[Catherine Robinson|AUTHOR Catherine Robinson]], [[Matthew Paradis|AUTHOR Matthew Paradis]], [[Katherine M. Tucker|AUTHOR Katherine M. Tucker]], [[Ben Shirley|AUTHOR Ben Shirley]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191124.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-D-12|PAPER Wed-P-6-D-12 — Contributions of Consonant-Vowel Transitions to Mandarin Tone Identification in Simulated Electric-Acoustic Hearing]]</div>|<div class="cpsessionviewpapertitle">Contributions of Consonant-Vowel Transitions to Mandarin Tone Identification in Simulated Electric-Acoustic Hearing</div><div class="cpsessionviewpaperauthor">[[Fei Chen|AUTHOR Fei Chen]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Rainer Martin|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192782.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-1|PAPER Wed-P-6-E-1 — Monaural Speech Enhancement with Dilated Convolutions]]</div>|<div class="cpsessionviewpapertitle">Monaural Speech Enhancement with Dilated Convolutions</div><div class="cpsessionviewpaperauthor">[[Shadi Pirhosseinloo|AUTHOR Shadi Pirhosseinloo]], [[Jonathan S. Brumberg|AUTHOR Jonathan S. Brumberg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191519.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-2|PAPER Wed-P-6-E-2 — Noise Adaptive Speech Enhancement Using Domain Adversarial Training]]</div>|<div class="cpsessionviewpapertitle">Noise Adaptive Speech Enhancement Using Domain Adversarial Training</div><div class="cpsessionviewpaperauthor">[[Chien-Feng Liao|AUTHOR Chien-Feng Liao]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Hung-Yi Lee|AUTHOR Hung-Yi Lee]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191477.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-3|PAPER Wed-P-6-E-3 — Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Environment-Dependent Attention-Driven Recurrent Convolutional Neural Network for Robust Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Meng Ge|AUTHOR Meng Ge]], [[Longbiao Wang|AUTHOR Longbiao Wang]], [[Nan Li|AUTHOR Nan Li]], [[Hao Shi|AUTHOR Hao Shi]], [[Jianwu Dang|AUTHOR Jianwu Dang]], [[Xiangang Li|AUTHOR Xiangang Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-4|PAPER Wed-P-6-E-4 — A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders]]</div>|<div class="cpsessionviewpapertitle">A Statistically Principled and Computationally Efficient Approach to Speech Enhancement Using Variational Autoencoders</div><div class="cpsessionviewpaperauthor">[[Manuel Pariente|AUTHOR Manuel Pariente]], [[Antoine Deleforge|AUTHOR Antoine Deleforge]], [[Emmanuel Vincent|AUTHOR Emmanuel Vincent]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192954.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-5|PAPER Wed-P-6-E-5 — Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction]]</div>|<div class="cpsessionviewpapertitle">Speech Enhancement Using Forked Generative Adversarial Networks with Spectral Subtraction</div><div class="cpsessionviewpaperauthor">[[Ju Lin|AUTHOR Ju Lin]], [[Sufeng Niu|AUTHOR Sufeng Niu]], [[Zice Wei|AUTHOR Zice Wei]], [[Xiang Lan|AUTHOR Xiang Lan]], [[Adriaan J. van Wijngaarden|AUTHOR Adriaan J. van Wijngaarden]], [[Melissa C. Smith|AUTHOR Melissa C. Smith]], [[Kuang-Ching Wang|AUTHOR Kuang-Ching Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192425.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-6|PAPER Wed-P-6-E-6 — Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric]]</div>|<div class="cpsessionviewpapertitle">Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric</div><div class="cpsessionviewpaperauthor">[[Ryandhimas E. Zezario|AUTHOR Ryandhimas E. Zezario]], [[Szu-Wei Fu|AUTHOR Szu-Wei Fu]], [[Xugang Lu|AUTHOR Xugang Lu]], [[Hsin-Min Wang|AUTHOR Hsin-Min Wang]], [[Yu Tsao|AUTHOR Yu Tsao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192108.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-7|PAPER Wed-P-6-E-7 — Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Speaker-Aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Fu-Kai Chuang|AUTHOR Fu-Kai Chuang]], [[Syu-Siang Wang|AUTHOR Syu-Siang Wang]], [[Jeih-weih Hung|AUTHOR Jeih-weih Hung]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Shih-Hau Fang|AUTHOR Shih-Hau Fang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191897.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-8|PAPER Wed-P-6-E-8 — Investigation of Cost Function for Supervised Monaural Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Investigation of Cost Function for Supervised Monaural Speech Separation</div><div class="cpsessionviewpaperauthor">[[Yun Liu|AUTHOR Yun Liu]], [[Hui Zhang|AUTHOR Hui Zhang]], [[Xueliang Zhang|AUTHOR Xueliang Zhang]], [[Yuhang Cao|AUTHOR Yuhang Cao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191373.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-9|PAPER Wed-P-6-E-9 — Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation]]</div>|<div class="cpsessionviewpapertitle">Deep Attention Gated Dilated Temporal Convolutional Networks with Intra-Parallel Convolutional Modules for End-to-End Monaural Speech Separation</div><div class="cpsessionviewpaperauthor">[[Ziqiang Shi|AUTHOR Ziqiang Shi]], [[Huibin Lin|AUTHOR Huibin Lin]], [[Liu Liu|AUTHOR Liu Liu]], [[Rujie Liu|AUTHOR Rujie Liu]], [[Jiqing Han|AUTHOR Jiqing Han]], [[Anyan Shi|AUTHOR Anyan Shi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191141.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-10|PAPER Wed-P-6-E-10 — Masking Estimation with Phase Restoration of Clean Speech for Monaural Speech Enhancement]]</div>|<div class="cpsessionviewpapertitle">Masking Estimation with Phase Restoration of Clean Speech for Monaural Speech Enhancement</div><div class="cpsessionviewpaperauthor">[[Xianyun Wang|AUTHOR Xianyun Wang]], [[Changchun Bao|AUTHOR Changchun Bao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191748.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-6-E-11|PAPER Wed-P-6-E-11 — Progressive Speech Enhancement with Residual Connections]]</div>|<div class="cpsessionviewpapertitle">Progressive Speech Enhancement with Residual Connections</div><div class="cpsessionviewpaperauthor">[[Jorge Llombart|AUTHOR Jorge Llombart]], [[Dayana Ribas|AUTHOR Dayana Ribas]], [[Antonio Miguel|AUTHOR Antonio Miguel]], [[Luis Vicente|AUTHOR Luis Vicente]], [[Alfonso Ortega|AUTHOR Alfonso Ortega]], [[Eduardo Lleida|AUTHOR Eduardo Lleida]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Langzhou Chen|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192818.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-1|PAPER Wed-P-7-B-1 — Acoustic Model Bootstrapping Using Semi-Supervised Learning]]</div>|<div class="cpsessionviewpapertitle">Acoustic Model Bootstrapping Using Semi-Supervised Learning</div><div class="cpsessionviewpaperauthor">[[Langzhou Chen|AUTHOR Langzhou Chen]], [[Volker Leutnant|AUTHOR Volker Leutnant]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192589.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-2|PAPER Wed-P-7-B-2 — Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Bandwidth Embeddings for Mixed-Bandwidth Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Gautam Mantena|AUTHOR Gautam Mantena]], [[Ozlem Kalinli|AUTHOR Ozlem Kalinli]], [[Ossama Abdel-Hamid|AUTHOR Ossama Abdel-Hamid]], [[Don McAllaster|AUTHOR Don McAllaster]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192420.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-3|PAPER Wed-P-7-B-3 — Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization]]</div>|<div class="cpsessionviewpapertitle">Adversarial Black-Box Attacks on Automatic Speech Recognition Systems Using Multi-Objective Evolutionary Optimization</div><div class="cpsessionviewpaperauthor">[[Shreya Khare|AUTHOR Shreya Khare]], [[Rahul Aralikatte|AUTHOR Rahul Aralikatte]], [[Senthil Mani|AUTHOR Senthil Mani]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192339.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-4|PAPER Wed-P-7-B-4 — Towards Debugging Deep Neural Networks by Generating Speech Utterances]]</div>|<div class="cpsessionviewpapertitle">Towards Debugging Deep Neural Networks by Generating Speech Utterances</div><div class="cpsessionviewpaperauthor">[[Bilal Soomro|AUTHOR Bilal Soomro]], [[Anssi Kanervisto|AUTHOR Anssi Kanervisto]], [[Trung Ngo Trong|AUTHOR Trung Ngo Trong]], [[Ville Hautamäki|AUTHOR Ville Hautamäki]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192182.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-5|PAPER Wed-P-7-B-5 — Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation]]</div>|<div class="cpsessionviewpapertitle">Compression of CTC-Trained Acoustic Models by Dynamic Frame-Wise Distillation or Segment-Wise N-Best Hypotheses Imitation</div><div class="cpsessionviewpaperauthor">[[Haisong Ding|AUTHOR Haisong Ding]], [[Kai Chen|AUTHOR Kai Chen]], [[Qiang Huo|AUTHOR Qiang Huo]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-6|PAPER Wed-P-7-B-6 — Keyword Spotting for Hearing Assistive Devices Robust to External Speakers]]</div>|<div class="cpsessionviewpapertitle">Keyword Spotting for Hearing Assistive Devices Robust to External Speakers</div><div class="cpsessionviewpaperauthor">[[Iván López-Espejo|AUTHOR Iván López-Espejo]], [[Zheng-Hua Tan|AUTHOR Zheng-Hua Tan]], [[Jesper Jensen|AUTHOR Jesper Jensen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191797.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-7|PAPER Wed-P-7-B-7 — Latent Dirichlet Allocation Based Acoustic Data Selection for Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Latent Dirichlet Allocation Based Acoustic Data Selection for Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Mortaza Doulaty|AUTHOR Mortaza Doulaty]], [[Thomas Hain|AUTHOR Thomas Hain]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191692.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-8|PAPER Wed-P-7-B-8 — Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training]]</div>|<div class="cpsessionviewpapertitle">Target Speaker Recovery and Recognition Network with Average x-Vector and Global Training</div><div class="cpsessionviewpaperauthor">[[Wenjie Li|AUTHOR Wenjie Li]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191318.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-9|PAPER Wed-P-7-B-9 — Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes]]</div>|<div class="cpsessionviewpapertitle">Lyrics Recognition from Singing Voice Focused on Correspondence Between Voice and Notes</div><div class="cpsessionviewpaperauthor">[[Motoyuki Suzuki|AUTHOR Motoyuki Suzuki]], [[Sho Tomita|AUTHOR Sho Tomita]], [[Tomoki Morita|AUTHOR Tomoki Morita]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191227.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-B-10|PAPER Wed-P-7-B-10 — Transfer Learning from Audio-Visual Grounding to Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Transfer Learning from Audio-Visual Grounding to Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Wei-Ning Hsu|AUTHOR Wei-Ning Hsu]], [[David Harwath|AUTHOR David Harwath]], [[James Glass|AUTHOR James Glass]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Tan Lee|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192041.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-1|PAPER Wed-P-7-C-1 — Cross-Corpus Speech Emotion Recognition Using Semi-Supervised Transfer Non-Negative Matrix Factorization with Adaptation Regularization]]</div>|<div class="cpsessionviewpapertitle">Cross-Corpus Speech Emotion Recognition Using Semi-Supervised Transfer Non-Negative Matrix Factorization with Adaptation Regularization</div><div class="cpsessionviewpaperauthor">[[Hui Luo|AUTHOR Hui Luo]], [[Jiqing Han|AUTHOR Jiqing Han]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192489.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-2|PAPER Wed-P-7-C-2 — Modeling User Context for Valence Prediction from Narratives]]</div>|<div class="cpsessionviewpapertitle">Modeling User Context for Valence Prediction from Narratives</div><div class="cpsessionviewpaperauthor">[[Aniruddha Tammewar|AUTHOR Aniruddha Tammewar]], [[Alessandra Cervone|AUTHOR Alessandra Cervone]], [[Eva-Maria Messner|AUTHOR Eva-Maria Messner]], [[Giuseppe Riccardi|AUTHOR Giuseppe Riccardi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192243.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-3|PAPER Wed-P-7-C-3 — Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition]]</div>|<div class="cpsessionviewpapertitle">Front-End Feature Compensation and Denoising for Noise Robust Speech Emotion Recognition</div><div class="cpsessionviewpaperauthor">[[Rupayan Chakraborty|AUTHOR Rupayan Chakraborty]], [[Ashish Panda|AUTHOR Ashish Panda]], [[Meghna Pandharipande|AUTHOR Meghna Pandharipande]], [[Sonal Joshi|AUTHOR Sonal Joshi]], [[Sunil Kumar Kopparapu|AUTHOR Sunil Kumar Kopparapu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192229.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-4|PAPER Wed-P-7-C-4 — The Contribution of Acoustic Features Analysis to Model Emotion Perceptual Process for Language Diversity]]</div>|<div class="cpsessionviewpapertitle">The Contribution of Acoustic Features Analysis to Model Emotion Perceptual Process for Language Diversity</div><div class="cpsessionviewpaperauthor">[[Xingfeng Li|AUTHOR Xingfeng Li]], [[Masato Akagi|AUTHOR Masato Akagi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-5|PAPER Wed-P-7-C-5 — Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis]]</div>|<div class="cpsessionviewpapertitle">Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion Analysis</div><div class="cpsessionviewpaperauthor">[[Rajeev Rajan|AUTHOR Rajeev Rajan]], [[Haritha U. G.|AUTHOR Haritha U. G.]], [[Sujitha A. C.|AUTHOR Sujitha A. C.]], [[Rejisha T. M.|AUTHOR Rejisha T. M.]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191842.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-6|PAPER Wed-P-7-C-6 — Speech Emotion Recognition with a Reject Option]]</div>|<div class="cpsessionviewpapertitle">Speech Emotion Recognition with a Reject Option</div><div class="cpsessionviewpaperauthor">[[Kusha Sridhar|AUTHOR Kusha Sridhar]], [[Carlos Busso|AUTHOR Carlos Busso]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191831.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-7|PAPER Wed-P-7-C-7 — Development of Emotion Rankers Based on Intended and Perceived Emotion Labels]]</div>|<div class="cpsessionviewpapertitle">Development of Emotion Rankers Based on Intended and Perceived Emotion Labels</div><div class="cpsessionviewpaperauthor">[[Zhenghao Jin|AUTHOR Zhenghao Jin]], [[Houwei Cao|AUTHOR Houwei Cao]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191830.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-8|PAPER Wed-P-7-C-8 — Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation]]</div>|<div class="cpsessionviewpapertitle">Emotion Recognition from Natural Phone Conversations in Individuals with and without Recent Suicidal Ideation</div><div class="cpsessionviewpaperauthor">[[John Gideon|AUTHOR John Gideon]], [[Heather T. Schatten|AUTHOR Heather T. Schatten]], [[Melvin G. McInnis|AUTHOR Melvin G. McInnis]], [[Emily Mower Provost|AUTHOR Emily Mower Provost]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191823.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-9|PAPER Wed-P-7-C-9 — An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults]]</div>|<div class="cpsessionviewpapertitle">An Acoustic and Lexical Analysis of Emotional Valence in Spontaneous Speech: Autobiographical Memory Recall in Older Adults</div><div class="cpsessionviewpaperauthor">[[Deniece S. Nazareth|AUTHOR Deniece S. Nazareth]], [[Ellen Tournier|AUTHOR Ellen Tournier]], [[Sarah Leimkötter|AUTHOR Sarah Leimkötter]], [[Esther Janse|AUTHOR Esther Janse]], [[Dirk Heylen|AUTHOR Dirk Heylen]], [[Gerben J. Westerhof|AUTHOR Gerben J. Westerhof]], [[Khiet P. Truong|AUTHOR Khiet P. Truong]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191605.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-10|PAPER Wed-P-7-C-10 — Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise]]</div>|<div class="cpsessionviewpapertitle">Does the Lombard Effect Improve Emotional Communication in Noise? — Analysis of Emotional Speech Acted in Noise</div><div class="cpsessionviewpaperauthor">[[Yi Zhao|AUTHOR Yi Zhao]], [[Atsushi Ando|AUTHOR Atsushi Ando]], [[Shinji Takaki|AUTHOR Shinji Takaki]], [[Junichi Yamagishi|AUTHOR Junichi Yamagishi]], [[Satoshi Kobashikawa|AUTHOR Satoshi Kobashikawa]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191218.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-11|PAPER Wed-P-7-C-11 — Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Linear Discriminant Differential Evolution for Feature Selection in Emotional Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Soumaya Gharsellaoui|AUTHOR Soumaya Gharsellaoui]], [[Sid Ahmed Selouani|AUTHOR Sid Ahmed Selouani]], [[Mohammed Sidi Yakoub|AUTHOR Mohammed Sidi Yakoub]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191149.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-C-12|PAPER Wed-P-7-C-12 — Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription]]</div>|<div class="cpsessionviewpapertitle">Multi-Modal Learning for Speech Emotion Recognition: An Analysis and Comparison of ASR Outputs with Ground Truth Transcription</div><div class="cpsessionviewpaperauthor">[[Saurabh Sahu|AUTHOR Saurabh Sahu]], [[Vikramjit Mitra|AUTHOR Vikramjit Mitra]], [[Nadee Seneviratne|AUTHOR Nadee Seneviratne]], [[Carol Espy-Wilson|AUTHOR Carol Espy-Wilson]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193039.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-D-1|PAPER Wed-P-7-D-1 — Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives]]</div>|<div class="cpsessionviewpapertitle">Articulatory Characteristics of Secondary Palatalization in Romanian Fricatives</div><div class="cpsessionviewpaperauthor">[[Laura Spinu|AUTHOR Laura Spinu]], [[Maida Percival|AUTHOR Maida Percival]], [[Alexei Kochetov|AUTHOR Alexei Kochetov]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192995.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-D-2|PAPER Wed-P-7-D-2 — Articulation of Vowel Length Contrasts in Australian English]]</div>|<div class="cpsessionviewpapertitle">Articulation of Vowel Length Contrasts in Australian English</div><div class="cpsessionviewpaperauthor">[[Louise Ratko|AUTHOR Louise Ratko]], [[Michael Proctor|AUTHOR Michael Proctor]], [[Felicity Cox|AUTHOR Felicity Cox]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192890.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-D-3|PAPER Wed-P-7-D-3 — V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent]]</div>|<div class="cpsessionviewpapertitle">V-to-V Coarticulation Induced Acoustic and Articulatory Variability of Vowels: The Effect of Pitch-Accent</div><div class="cpsessionviewpaperauthor">[[Andrea Deme|AUTHOR Andrea Deme]], [[Márton Bartók|AUTHOR Márton Bartók]], [[Tekla Etelka Gráczi|AUTHOR Tekla Etelka Gráczi]], [[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]], [[Alexandra Markó|AUTHOR Alexandra Markó]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192851.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-D-4|PAPER Wed-P-7-D-4 — The Contribution of Lip Protrusion to Anglo-English /r/: Evidence from Hyper- and Non-Hyperarticulated Speech]]</div>|<div class="cpsessionviewpapertitle">The Contribution of Lip Protrusion to Anglo-English /r/: Evidence from Hyper- and Non-Hyperarticulated Speech</div><div class="cpsessionviewpaperauthor">[[Hannah King|AUTHOR Hannah King]], [[Emmanuel Ferragne|AUTHOR Emmanuel Ferragne]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192352.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-D-5|PAPER Wed-P-7-D-5 — Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?]]</div>|<div class="cpsessionviewpapertitle">Articulatory Analysis of Transparent Vowel /iː/ in Harmonic and Antiharmonic Hungarian Stems: Is There a Difference?</div><div class="cpsessionviewpaperauthor">[[Alexandra Markó|AUTHOR Alexandra Markó]], [[Márton Bartók|AUTHOR Márton Bartók]], [[Tamás Gábor Csapó|AUTHOR Tamás Gábor Csapó]], [[Tekla Etelka Gráczi|AUTHOR Tekla Etelka Gráczi]], [[Andrea Deme|AUTHOR Andrea Deme]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192232.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-D-6|PAPER Wed-P-7-D-6 — On the Role of Oral Configurations in European Portuguese Nasal Vowels]]</div>|<div class="cpsessionviewpapertitle">On the Role of Oral Configurations in European Portuguese Nasal Vowels</div><div class="cpsessionviewpaperauthor">[[Conceição Cunha|AUTHOR Conceição Cunha]], [[Samuel Silva|AUTHOR Samuel Silva]], [[António Teixeira|AUTHOR António Teixeira]], [[Catarina Oliveira|AUTHOR Catarina Oliveira]], [[Paula Martins|AUTHOR Paula Martins]], [[Arun A. Joseph|AUTHOR Arun A. Joseph]], [[Jens Frahm|AUTHOR Jens Frahm]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192913.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-1|PAPER Wed-P-7-E-1 — Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition]]</div>|<div class="cpsessionviewpapertitle">Residual + Capsule Networks (ResCap) for Simultaneous Single-Channel Overlapped Keyword Recognition</div><div class="cpsessionviewpaperauthor">[[Yan Xiong|AUTHOR Yan Xiong]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Chaitali Chakrabarti|AUTHOR Chaitali Chakrabarti]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192840.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-2|PAPER Wed-P-7-E-2 — A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction]]</div>|<div class="cpsessionviewpapertitle">A Study for Improving Device-Directed Speech Detection Toward Frictionless Human-Machine Interaction</div><div class="cpsessionviewpaperauthor">[[Che-Wei Huang|AUTHOR Che-Wei Huang]], [[Roland Maas|AUTHOR Roland Maas]], [[Sri Harish Mallidi|AUTHOR Sri Harish Mallidi]], [[Björn Hoffmeister|AUTHOR Björn Hoffmeister]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192384.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-3|PAPER Wed-P-7-E-3 — Unsupervised Methods for Audio Classification from Lecture Discussion Recordings]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Methods for Audio Classification from Lecture Discussion Recordings</div><div class="cpsessionviewpaperauthor">[[Hang Su|AUTHOR Hang Su]], [[Borislav Dzodzo|AUTHOR Borislav Dzodzo]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192161.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-4|PAPER Wed-P-7-E-4 — Neural Whispered Speech Detection with Imbalanced Learning]]</div>|<div class="cpsessionviewpapertitle">Neural Whispered Speech Detection with Imbalanced Learning</div><div class="cpsessionviewpaperauthor">[[Takanori Ashihara|AUTHOR Takanori Ashihara]], [[Yusuke Shinohara|AUTHOR Yusuke Shinohara]], [[Hiroshi Sato|AUTHOR Hiroshi Sato]], [[Takafumi Moriya|AUTHOR Takafumi Moriya]], [[Kiyoaki Matsui|AUTHOR Kiyoaki Matsui]], [[Takaaki Fukutomi|AUTHOR Takaaki Fukutomi]], [[Yoshikazu Yamaguchi|AUTHOR Yoshikazu Yamaguchi]], [[Yushi Aono|AUTHOR Yushi Aono]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191857.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-5|PAPER Wed-P-7-E-5 — Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach]]</div>|<div class="cpsessionviewpapertitle">Deep Learning for Orca Call Type Identification — A Fully Unsupervised Approach</div><div class="cpsessionviewpaperauthor">[[Christian Bergler|AUTHOR Christian Bergler]], [[Manuel Schmitt|AUTHOR Manuel Schmitt]], [[Rachael Xi Cheng|AUTHOR Rachael Xi Cheng]], [[Andreas Maier|AUTHOR Andreas Maier]], [[Volker Barth|AUTHOR Volker Barth]], [[Elmar Nöth|AUTHOR Elmar Nöth]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191846.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-6|PAPER Wed-P-7-E-6 — Open-Vocabulary Keyword Spotting with Audio and Text Embeddings]]</div>|<div class="cpsessionviewpapertitle">Open-Vocabulary Keyword Spotting with Audio and Text Embeddings</div><div class="cpsessionviewpaperauthor">[[Niccolò Sacchi|AUTHOR Niccolò Sacchi]], [[Alexandre Nanchen|AUTHOR Alexandre Nanchen]], [[Martin Jaggi|AUTHOR Martin Jaggi]], [[Milos Cernak|AUTHOR Milos Cernak]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191483.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-7|PAPER Wed-P-7-E-7 — ToneNet: A CNN Model of Tone Classification of Mandarin Chinese]]</div>|<div class="cpsessionviewpapertitle">ToneNet: A CNN Model of Tone Classification of Mandarin Chinese</div><div class="cpsessionviewpaperauthor">[[Qiang Gao|AUTHOR Qiang Gao]], [[Shutao Sun|AUTHOR Shutao Sun]], [[Yaping Yang|AUTHOR Yaping Yang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191363.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-8|PAPER Wed-P-7-E-8 — Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices]]</div>|<div class="cpsessionviewpapertitle">Temporal Convolution for Real-Time Keyword Spotting on Mobile Devices</div><div class="cpsessionviewpaperauthor">[[Seungwoo Choi|AUTHOR Seungwoo Choi]], [[Seokjun Seo|AUTHOR Seokjun Seo]], [[Beomjun Shin|AUTHOR Beomjun Shin]], [[Hyeongmin Byun|AUTHOR Hyeongmin Byun]], [[Martin Kersner|AUTHOR Martin Kersner]], [[Beomsu Kim|AUTHOR Beomsu Kim]], [[Dongyoung Kim|AUTHOR Dongyoung Kim]], [[Sungjoo Ha|AUTHOR Sungjoo Ha]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191302.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-9|PAPER Wed-P-7-E-9 — Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation]]</div>|<div class="cpsessionviewpapertitle">Audio Tagging with Compact Feedforward Sequential Memory Network and Audio-to-Audio Ratio Based Data Augmentation</div><div class="cpsessionviewpaperauthor">[[Zhiying Huang|AUTHOR Zhiying Huang]], [[Shiliang Zhang|AUTHOR Shiliang Zhang]], [[Ming Lei|AUTHOR Ming Lei]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191298.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-10|PAPER Wed-P-7-E-10 — Music Genre Classification Using Duplicated Convolutional Layers in Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Music Genre Classification Using Duplicated Convolutional Layers in Neural Networks</div><div class="cpsessionviewpaperauthor">[[Hansi Yang|AUTHOR Hansi Yang]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191154.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-7-E-11|PAPER Wed-P-7-E-11 — A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures]]</div>|<div class="cpsessionviewpapertitle">A Storyteller’s Tale: Literature Audiobooks Genre Classification Using CNN and RNN Architectures</div><div class="cpsessionviewpaperauthor">[[Nehory Carmi|AUTHOR Nehory Carmi]], [[Azaria Cohen|AUTHOR Azaria Cohen]], [[Mireille Avigal|AUTHOR Mireille Avigal]], [[Anat Lerner|AUTHOR Anat Lerner]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Tom Bäckström|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193249.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-1|PAPER Wed-P-8-A-1 — Parameter Enhancement for MELP Speech Codec in Noisy Communication Environment]]</div>|<div class="cpsessionviewpapertitle">Parameter Enhancement for MELP Speech Codec in Noisy Communication Environment</div><div class="cpsessionviewpaperauthor">[[Min-Jae Hwang|AUTHOR Min-Jae Hwang]], [[Hong-Goo Kang|AUTHOR Hong-Goo Kang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191816.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-2|PAPER Wed-P-8-A-2 — Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding]]</div>|<div class="cpsessionviewpapertitle">Cascaded Cross-Module Residual Learning Towards Lightweight End-to-End Speech Coding</div><div class="cpsessionviewpaperauthor">[[Kai Zhen|AUTHOR Kai Zhen]], [[Jongmo Sung|AUTHOR Jongmo Sung]], [[Mi Suk Lee|AUTHOR Mi Suk Lee]], [[Seungkwon Beack|AUTHOR Seungkwon Beack]], [[Minje Kim|AUTHOR Minje Kim]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191284.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-3|PAPER Wed-P-8-A-3 — End-to-End Optimization of Source Models for Speech and Audio Coding Using a Machine Learning Framework]]</div>|<div class="cpsessionviewpapertitle">End-to-End Optimization of Source Models for Speech and Audio Coding Using a Machine Learning Framework</div><div class="cpsessionviewpaperauthor">[[Tom Bäckström|AUTHOR Tom Bäckström]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191255.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-4|PAPER Wed-P-8-A-4 — A Real-Time Wideband Neural Vocoder at 1.6kb/s Using LPCNet]]</div>|<div class="cpsessionviewpapertitle">A Real-Time Wideband Neural Vocoder at 1.6kb/s Using LPCNet</div><div class="cpsessionviewpaperauthor">[[Jean-Marc Valin|AUTHOR Jean-Marc Valin]], [[Jan Skoglund|AUTHOR Jan Skoglund]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191620.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-5|PAPER Wed-P-8-A-5 — Super-Wideband Spectral Envelope Modeling for Speech Coding]]</div>|<div class="cpsessionviewpapertitle">Super-Wideband Spectral Envelope Modeling for Speech Coding</div><div class="cpsessionviewpaperauthor">[[Guillaume Fuchs|AUTHOR Guillaume Fuchs]], [[Chamran Ashour|AUTHOR Chamran Ashour]], [[Tom Bäckström|AUTHOR Tom Bäckström]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193043.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-6|PAPER Wed-P-8-A-6 — Speech Audio Super-Resolution for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Speech Audio Super-Resolution for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Xinyu Li|AUTHOR Xinyu Li]], [[Venkata Chebiyyam|AUTHOR Venkata Chebiyyam]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191580.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-7|PAPER Wed-P-8-A-7 — Artificial Bandwidth Extension Using H∞ Optimization]]</div>|<div class="cpsessionviewpapertitle">Artificial Bandwidth Extension Using H∞ Optimization</div><div class="cpsessionviewpaperauthor">[[Deepika Gupta|AUTHOR Deepika Gupta]], [[Hanumant Singh Shekhawat|AUTHOR Hanumant Singh Shekhawat]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192636.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-8|PAPER Wed-P-8-A-8 — Quality Degradation Diagnosis for Voice Networks — Estimating the Perceived Noisiness, Coloration, and Discontinuity of Transmitted Speech]]</div>|<div class="cpsessionviewpapertitle">Quality Degradation Diagnosis for Voice Networks — Estimating the Perceived Noisiness, Coloration, and Discontinuity of Transmitted Speech</div><div class="cpsessionviewpaperauthor">[[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Sebastian Möller|AUTHOR Sebastian Möller]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192511.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-9|PAPER Wed-P-8-A-9 — A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">A Cross-Entropy-Guided (CEG) Measure for Speech Enhancement Front-End Assessing Performances of Back-End Automatic Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Li Chai|AUTHOR Li Chai]], [[Jun Du|AUTHOR Jun Du]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191340.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-A-10|PAPER Wed-P-8-A-10 — Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios]]</div>|<div class="cpsessionviewpapertitle">Extending the E-Model Towards Super-Wideband and Fullband Speech Communication Scenarios</div><div class="cpsessionviewpaperauthor">[[Sebastian Möller|AUTHOR Sebastian Möller]], [[Gabriel Mittag|AUTHOR Gabriel Mittag]], [[Thilo Michael|AUTHOR Thilo Michael]], [[Vincent Barriac|AUTHOR Vincent Barriac]], [[Hitoshi Aoki|AUTHOR Hitoshi Aoki]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Gallery B|<|
|^Chair:&nbsp;|^Sriram Ganapathy|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192723.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-1|PAPER Wed-P-8-B-1 — Modulation Vectors as Robust Feature Representation for ASR in Domain Mismatched Conditions]]</div>|<div class="cpsessionviewpapertitle">Modulation Vectors as Robust Feature Representation for ASR in Domain Mismatched Conditions</div><div class="cpsessionviewpaperauthor">[[Samik Sadhu|AUTHOR Samik Sadhu]], [[Hynek Hermansky|AUTHOR Hynek Hermansky]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192659.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-2|PAPER Wed-P-8-B-2 — Prosody Usage Optimization for Children Speech Recognition with Zero Resource Children Speech]]</div>|<div class="cpsessionviewpapertitle">Prosody Usage Optimization for Children Speech Recognition with Zero Resource Children Speech</div><div class="cpsessionviewpaperauthor">[[Chenda Li|AUTHOR Chenda Li]], [[Yanmin Qian|AUTHOR Yanmin Qian]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192652.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-3|PAPER Wed-P-8-B-3 — Unsupervised Raw Waveform Representation Learning for ASR]]</div>|<div class="cpsessionviewpapertitle">Unsupervised Raw Waveform Representation Learning for ASR</div><div class="cpsessionviewpaperauthor">[[Purvi Agrawal|AUTHOR Purvi Agrawal]], [[Sriram Ganapathy|AUTHOR Sriram Ganapathy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192193.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-4|PAPER Wed-P-8-B-4 — Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">Low-Dimensional Bottleneck Features for On-Device Continuous Speech Recognition</div><div class="cpsessionviewpaperauthor">[[David B. Ramsay|AUTHOR David B. Ramsay]], [[Kevin Kilgour|AUTHOR Kevin Kilgour]], [[Dominik Roblek|AUTHOR Dominik Roblek]], [[Matthew Sharifi|AUTHOR Matthew Sharifi]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191877.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-5|PAPER Wed-P-8-B-5 — Binary Speech Features for Keyword Spotting Tasks]]</div>|<div class="cpsessionviewpapertitle">Binary Speech Features for Keyword Spotting Tasks</div><div class="cpsessionviewpaperauthor">[[Alexandre Riviello|AUTHOR Alexandre Riviello]], [[Jean-Pierre David|AUTHOR Jean-Pierre David]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191873.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-6|PAPER Wed-P-8-B-6 — wav2vec: Unsupervised Pre-Training for Speech Recognition]]</div>|<div class="cpsessionviewpapertitle">wav2vec: Unsupervised Pre-Training for Speech Recognition</div><div class="cpsessionviewpaperauthor">[[Steffen Schneider|AUTHOR Steffen Schneider]], [[Alexei Baevski|AUTHOR Alexei Baevski]], [[Ronan Collobert|AUTHOR Ronan Collobert]], [[Michael Auli|AUTHOR Michael Auli]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191668.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-7|PAPER Wed-P-8-B-7 — Automatic Detection of Prosodic Focus in American English]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of Prosodic Focus in American English</div><div class="cpsessionviewpaperauthor">[[Sunghye Cho|AUTHOR Sunghye Cho]], [[Mark Liberman|AUTHOR Mark Liberman]], [[Yong-cheol Lee|AUTHOR Yong-cheol Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191665.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-8|PAPER Wed-P-8-B-8 — Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders]]</div>|<div class="cpsessionviewpapertitle">Feature Exploration for Almost Zero-Resource ASR-Free Keyword Spotting Using a Multilingual Bottleneck Extractor and Correspondence Autoencoders</div><div class="cpsessionviewpaperauthor">[[Raghav Menon|AUTHOR Raghav Menon]], [[Herman Kamper|AUTHOR Herman Kamper]], [[Ewald van der Westhuizen|AUTHOR Ewald van der Westhuizen]], [[John Quinn|AUTHOR John Quinn]], [[Thomas Niesler|AUTHOR Thomas Niesler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191257.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-B-9|PAPER Wed-P-8-B-9 — On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters]]</div>|<div class="cpsessionviewpapertitle">On Learning Interpretable CNNs with Parametric Modulated Kernel-Based Filters</div><div class="cpsessionviewpaperauthor">[[Erfan Loweimi|AUTHOR Erfan Loweimi]], [[Peter Bell|AUTHOR Peter Bell]], [[Steve Renals|AUTHOR Steve Renals]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Gallery C|<|
|^Chair:&nbsp;|^Xunying Liu|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191332.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-1|PAPER Wed-P-8-C-1 — Reverse Transfer Learning: Can Word Embeddings Trained for Different NLP Tasks Improve Neural Language Models?]]</div>|<div class="cpsessionviewpapertitle">Reverse Transfer Learning: Can Word Embeddings Trained for Different NLP Tasks Improve Neural Language Models?</div><div class="cpsessionviewpaperauthor">[[Lyan Verwimp|AUTHOR Lyan Verwimp]], [[Jerome R. Bellegarda|AUTHOR Jerome R. Bellegarda]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191434.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-2|PAPER Wed-P-8-C-2 — Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR]]</div>|<div class="cpsessionviewpapertitle">Joint Grapheme and Phoneme Embeddings for Contextual End-to-End ASR</div><div class="cpsessionviewpaperauthor">[[Zhehuai Chen|AUTHOR Zhehuai Chen]], [[Mahaveer Jain|AUTHOR Mahaveer Jain]], [[Yongqiang Wang|AUTHOR Yongqiang Wang]], [[Michael L. Seltzer|AUTHOR Michael L. Seltzer]], [[Christian Fuegen|AUTHOR Christian Fuegen]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191484.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-3|PAPER Wed-P-8-C-3 — Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR]]</div>|<div class="cpsessionviewpapertitle">Character-Aware Sub-Word Level Language Modeling for Uyghur and Turkish ASR</div><div class="cpsessionviewpaperauthor">[[Chang Liu|AUTHOR Chang Liu]], [[Zhen Zhang|AUTHOR Zhen Zhang]], [[Pengyuan Zhang|AUTHOR Pengyuan Zhang]], [[Yonghong Yan|AUTHOR Yonghong Yan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191822.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-4|PAPER Wed-P-8-C-4 — Connecting and Comparing Language Model Interpolation Techniques]]</div>|<div class="cpsessionviewpapertitle">Connecting and Comparing Language Model Interpolation Techniques</div><div class="cpsessionviewpaperauthor">[[Ernest Pusateri|AUTHOR Ernest Pusateri]], [[Christophe Van Gysel|AUTHOR Christophe Van Gysel]], [[Rami Botros|AUTHOR Rami Botros]], [[Sameer Badaskar|AUTHOR Sameer Badaskar]], [[Mirko Hannemann|AUTHOR Mirko Hannemann]], [[Youssef Oualil|AUTHOR Youssef Oualil]], [[Ilya Oparin|AUTHOR Ilya Oparin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191858.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-5|PAPER Wed-P-8-C-5 — Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation]]</div>|<div class="cpsessionviewpapertitle">Enriching Rare Word Representations in Neural Language Models by Embedding Matrix Augmentation</div><div class="cpsessionviewpaperauthor">[[Yerbolat Khassanov|AUTHOR Yerbolat Khassanov]], [[Zhiping Zeng|AUTHOR Zhiping Zeng]], [[Van Tung Pham|AUTHOR Van Tung Pham]], [[Haihua Xu|AUTHOR Haihua Xu]], [[Eng Siong Chng|AUTHOR Eng Siong Chng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191927.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-6|PAPER Wed-P-8-C-6 — Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models]]</div>|<div class="cpsessionviewpapertitle">Comparative Study of Parametric and Representation Uncertainty Modeling for Recurrent Neural Network Language Models</div><div class="cpsessionviewpaperauthor">[[Jianwei Yu|AUTHOR Jianwei Yu]], [[Max W.Y. Lam|AUTHOR Max W.Y. Lam]], [[Shoukang Hu|AUTHOR Shoukang Hu]], [[Xixin Wu|AUTHOR Xixin Wu]], [[Xu Li|AUTHOR Xu Li]], [[Yuewen Cao|AUTHOR Yuewen Cao]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192164.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-7|PAPER Wed-P-8-C-7 — Improving Automatically Induced Lexicons for Highly Agglutinating Languages Using Data-Driven Morphological Segmentation]]</div>|<div class="cpsessionviewpapertitle">Improving Automatically Induced Lexicons for Highly Agglutinating Languages Using Data-Driven Morphological Segmentation</div><div class="cpsessionviewpaperauthor">[[Wiehan Agenbag|AUTHOR Wiehan Agenbag]], [[Thomas Niesler|AUTHOR Thomas Niesler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192347.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-8|PAPER Wed-P-8-C-8 — Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR]]</div>|<div class="cpsessionviewpapertitle">Attention-Based Word Vector Prediction with LSTMs and its Application to the OOV Problem in ASR</div><div class="cpsessionviewpaperauthor">[[Alejandro Coucheiro-Limeres|AUTHOR Alejandro Coucheiro-Limeres]], [[Fernando Fernández-Martínez|AUTHOR Fernando Fernández-Martínez]], [[Rubén San-Segundo|AUTHOR Rubén San-Segundo]], [[Javier Ferreiros-López|AUTHOR Javier Ferreiros-López]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192501.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-9|PAPER Wed-P-8-C-9 — Code-Switching Sentence Generation by Bert and Generative Adversarial Networks]]</div>|<div class="cpsessionviewpapertitle">Code-Switching Sentence Generation by Bert and Generative Adversarial Networks</div><div class="cpsessionviewpaperauthor">[[Yingying Gao|AUTHOR Yingying Gao]], [[Junlan Feng|AUTHOR Junlan Feng]], [[Ying Liu|AUTHOR Ying Liu]], [[Leijing Hou|AUTHOR Leijing Hou]], [[Xin Pan|AUTHOR Xin Pan]], [[Yong Ma|AUTHOR Yong Ma]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192807.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-10|PAPER Wed-P-8-C-10 — Unified Verbalization for Speech Recognition & Synthesis Across Languages]]</div>|<div class="cpsessionviewpapertitle">Unified Verbalization for Speech Recognition & Synthesis Across Languages</div><div class="cpsessionviewpaperauthor">[[Sandy Ritchie|AUTHOR Sandy Ritchie]], [[Richard Sproat|AUTHOR Richard Sproat]], [[Kyle Gorman|AUTHOR Kyle Gorman]], [[Daan van Esch|AUTHOR Daan van Esch]], [[Christian Schallhart|AUTHOR Christian Schallhart]], [[Nikos Bampounis|AUTHOR Nikos Bampounis]], [[Beno^ıt Brard|AUTHOR Beno^ıt Brard]], [[Jonas Fromseier Mortensen|AUTHOR Jonas Fromseier Mortensen]], [[Millie Holt|AUTHOR Millie Holt]], [[Eoin Mahon|AUTHOR Eoin Mahon]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193207.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-C-11|PAPER Wed-P-8-C-11 — Better Morphology Prediction for Better Speech Systems]]</div>|<div class="cpsessionviewpapertitle">Better Morphology Prediction for Better Speech Systems</div><div class="cpsessionviewpaperauthor">[[Dravyansh Sharma|AUTHOR Dravyansh Sharma]], [[Melissa Wilson|AUTHOR Melissa Wilson]], [[Antoine Bruguier|AUTHOR Antoine Bruguier]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 10/D|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192832.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-1|PAPER Wed-P-8-D-1 — Vietnamese Learners Tackling the German /ʃt/ in Perception]]</div>|<div class="cpsessionviewpapertitle">Vietnamese Learners Tackling the German /ʃt/ in Perception</div><div class="cpsessionviewpaperauthor">[[Anke Sennema|AUTHOR Anke Sennema]], [[Silke Hamann|AUTHOR Silke Hamann]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192637.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-2|PAPER Wed-P-8-D-2 — An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK]]</div>|<div class="cpsessionviewpapertitle">An Articulatory-Acoustic Investigation into GOOSE-Fronting in German-English Bilinguals Residing in London, UK</div><div class="cpsessionviewpaperauthor">[[Scott Lewis|AUTHOR Scott Lewis]], [[Adib Mehrabi|AUTHOR Adib Mehrabi]], [[Esther de Leeuw|AUTHOR Esther de Leeuw]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191677.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-3|PAPER Wed-P-8-D-3 — Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features]]</div>|<div class="cpsessionviewpapertitle">Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features</div><div class="cpsessionviewpaperauthor">[[Sabrina Jenne|AUTHOR Sabrina Jenne]], [[Ngoc Thang Vu|AUTHOR Ngoc Thang Vu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191183.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-4|PAPER Wed-P-8-D-4 — Using Prosody to Discover Word Order Alternations in a Novel Language]]</div>|<div class="cpsessionviewpapertitle">Using Prosody to Discover Word Order Alternations in a Novel Language</div><div class="cpsessionviewpaperauthor">[[Anouschka Foltz|AUTHOR Anouschka Foltz]], [[Sarah Cooper|AUTHOR Sarah Cooper]], [[Tamsin M. McKelvey|AUTHOR Tamsin M. McKelvey]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191150.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-5|PAPER Wed-P-8-D-5 — Speaking Rate, Information Density, and Information Rate in First-Language and Second-Language Speech]]</div>|<div class="cpsessionviewpapertitle">Speaking Rate, Information Density, and Information Rate in First-Language and Second-Language Speech</div><div class="cpsessionviewpaperauthor">[[Ann R. Bradlow|AUTHOR Ann R. Bradlow]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192098.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-6|PAPER Wed-P-8-D-6 — Articulation Rate as a Metric in Spoken Language Assessment]]</div>|<div class="cpsessionviewpapertitle">Articulation Rate as a Metric in Spoken Language Assessment</div><div class="cpsessionviewpaperauthor">[[Calbert Graham|AUTHOR Calbert Graham]], [[Francis Nolan|AUTHOR Francis Nolan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193247.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-7|PAPER Wed-P-8-D-7 — Learning Alignment for Multimodal Emotion Recognition from Speech]]</div>|<div class="cpsessionviewpapertitle">Learning Alignment for Multimodal Emotion Recognition from Speech</div><div class="cpsessionviewpaperauthor">[[Haiyang Xu|AUTHOR Haiyang Xu]], [[Hui Zhang|AUTHOR Hui Zhang]], [[Kun Han|AUTHOR Kun Han]], [[Yun Wang|AUTHOR Yun Wang]], [[Yiping Peng|AUTHOR Yiping Peng]], [[Xiangang Li|AUTHOR Xiangang Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192838.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-8|PAPER Wed-P-8-D-8 — Liquid Deletion in French Child-Directed Speech]]</div>|<div class="cpsessionviewpapertitle">Liquid Deletion in French Child-Directed Speech</div><div class="cpsessionviewpaperauthor">[[Sharon Peperkamp|AUTHOR Sharon Peperkamp]], [[Monica Hegde|AUTHOR Monica Hegde]], [[Maria Julia Carbajal|AUTHOR Maria Julia Carbajal]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191773.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-9|PAPER Wed-P-8-D-9 — Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length]]</div>|<div class="cpsessionviewpapertitle">Towards Detection of Canonical Babbling by Citizen Scientists: Performance as a Function of Clip Length</div><div class="cpsessionviewpaperauthor">[[Amanda Seidl|AUTHOR Amanda Seidl]], [[Anne S. Warlaumont|AUTHOR Anne S. Warlaumont]], [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191737.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-10|PAPER Wed-P-8-D-10 — Nasal Consonant Discrimination in Infant- and Adult-Directed Speech]]</div>|<div class="cpsessionviewpapertitle">Nasal Consonant Discrimination in Infant- and Adult-Directed Speech</div><div class="cpsessionviewpaperauthor">[[Bogdan Ludusan|AUTHOR Bogdan Ludusan]], [[Annett Jorschick|AUTHOR Annett Jorschick]], [[Reiko Mazuka|AUTHOR Reiko Mazuka]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191674.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-11|PAPER Wed-P-8-D-11 — No Distributional Learning in Adults from Attended Listening to Non-Speech]]</div>|<div class="cpsessionviewpapertitle">No Distributional Learning in Adults from Attended Listening to Non-Speech</div><div class="cpsessionviewpaperauthor">[[Ellen Marklund|AUTHOR Ellen Marklund]], [[Johan Sjons|AUTHOR Johan Sjons]], [[Lisa Gustavsson|AUTHOR Lisa Gustavsson]], [[Elísabet Eir Cortes|AUTHOR Elísabet Eir Cortes]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191523.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-12|PAPER Wed-P-8-D-12 — A Computational Model of Early Language Acquisition from Audiovisual Experiences of Young Infants]]</div>|<div class="cpsessionviewpapertitle">A Computational Model of Early Language Acquisition from Audiovisual Experiences of Young Infants</div><div class="cpsessionviewpaperauthor">[[Okko Räsänen|AUTHOR Okko Räsänen]], [[Khazar Khorrami|AUTHOR Khazar Khorrami]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191638.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-D-13|PAPER Wed-P-8-D-13 — The Production of Chinese Affricates /ts/ and /ts^^h^^/ by Native Urdu Speakers]]</div>|<div class="cpsessionviewpapertitle">The Production of Chinese Affricates /ts/ and /ts^^h^^/ by Native Urdu Speakers</div><div class="cpsessionviewpaperauthor">[[Dan Du|AUTHOR Dan Du]], [[Jinsong Zhang|AUTHOR Jinsong Zhang]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 10/E|<|
|^Chair:&nbsp;|^Johannes Gehrke|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193019.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-1|PAPER Wed-P-8-E-1 — Multi-Stream Network with Temporal Attention for Environmental Sound Classification]]</div>|<div class="cpsessionviewpapertitle">Multi-Stream Network with Temporal Attention for Environmental Sound Classification</div><div class="cpsessionviewpaperauthor">[[Xinyu Li|AUTHOR Xinyu Li]], [[Venkata Chebiyyam|AUTHOR Venkata Chebiyyam]], [[Katrin Kirchhoff|AUTHOR Katrin Kirchhoff]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192394.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-2|PAPER Wed-P-8-E-2 — Neural Network Distillation on IoT Platforms for Sound Event Detection]]</div>|<div class="cpsessionviewpapertitle">Neural Network Distillation on IoT Platforms for Sound Event Detection</div><div class="cpsessionviewpaperauthor">[[Gianmarco Cerutti|AUTHOR Gianmarco Cerutti]], [[Rahul Prasad|AUTHOR Rahul Prasad]], [[Alessio Brutti|AUTHOR Alessio Brutti]], [[Elisabetta Farella|AUTHOR Elisabetta Farella]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192271.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-3|PAPER Wed-P-8-E-3 — Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection]]</div>|<div class="cpsessionviewpapertitle">Class-Wise Centroid Distance Metric Learning for Acoustic Event Detection</div><div class="cpsessionviewpaperauthor">[[Xugang Lu|AUTHOR Xugang Lu]], [[Peng Shen|AUTHOR Peng Shen]], [[Sheng Li|AUTHOR Sheng Li]], [[Yu Tsao|AUTHOR Yu Tsao]], [[Hisashi Kawai|AUTHOR Hisashi Kawai]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192171.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-4|PAPER Wed-P-8-E-4 — A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models]]</div>|<div class="cpsessionviewpapertitle">A Hybrid Approach to Acoustic Scene Classification Based on Universal Acoustic Models</div><div class="cpsessionviewpaperauthor">[[Xue Bai|AUTHOR Xue Bai]], [[Jun Du|AUTHOR Jun Du]], [[Zi-Rui Wang|AUTHOR Zi-Rui Wang]], [[Chin-Hui Lee|AUTHOR Chin-Hui Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192049.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-5|PAPER Wed-P-8-E-5 — Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection]]</div>|<div class="cpsessionviewpapertitle">Hierarchical Pooling Structure for Weakly Labeled Sound Event Detection</div><div class="cpsessionviewpaperauthor">[[Ke-Xin He|AUTHOR Ke-Xin He]], [[Yu-Han Shen|AUTHOR Yu-Han Shen]], [[Wei-Qiang Zhang|AUTHOR Wei-Qiang Zhang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191860.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-6|PAPER Wed-P-8-E-6 — Sound Event Detection in Multichannel Audio Using Convolutional Time-Frequency-Channel Squeeze and Excitation]]</div>|<div class="cpsessionviewpapertitle">Sound Event Detection in Multichannel Audio Using Convolutional Time-Frequency-Channel Squeeze and Excitation</div><div class="cpsessionviewpaperauthor">[[Wei Xia|AUTHOR Wei Xia]], [[Kazuhito Koishida|AUTHOR Kazuhito Koishida]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191841.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-7|PAPER Wed-P-8-E-7 — A Robust Framework for Acoustic Scene Classification]]</div>|<div class="cpsessionviewpapertitle">A Robust Framework for Acoustic Scene Classification</div><div class="cpsessionviewpaperauthor">[[Lam Pham|AUTHOR Lam Pham]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]], [[Huy Phan|AUTHOR Huy Phan]], [[Ramaswamy Palaniappan|AUTHOR Ramaswamy Palaniappan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191747.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-8|PAPER Wed-P-8-E-8 — Compression of Acoustic Event Detection Models with Quantized Distillation]]</div>|<div class="cpsessionviewpapertitle">Compression of Acoustic Event Detection Models with Quantized Distillation</div><div class="cpsessionviewpaperauthor">[[Bowen Shi|AUTHOR Bowen Shi]], [[Ming Sun|AUTHOR Ming Sun]], [[Chieh-Chi Kao|AUTHOR Chieh-Chi Kao]], [[Viktor Rozgic|AUTHOR Viktor Rozgic]], [[Spyros Matsoukas|AUTHOR Spyros Matsoukas]], [[Chao Wang|AUTHOR Chao Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191579.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-9|PAPER Wed-P-8-E-9 — An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy]]</div>|<div class="cpsessionviewpapertitle">An End-to-End Audio Classification System Based on Raw Waveforms and Mix-Training Strategy</div><div class="cpsessionviewpaperauthor">[[Jiaxu Chen|AUTHOR Jiaxu Chen]], [[Jing Hao|AUTHOR Jing Hao]], [[Kai Chen|AUTHOR Kai Chen]], [[Di Xie|AUTHOR Di Xie]], [[Shicai Yang|AUTHOR Shicai Yang]], [[Shiliang Pu|AUTHOR Shiliang Pu]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191532.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-10|PAPER Wed-P-8-E-10 — Few-Shot Audio Classification with Attentional Graph Neural Networks]]</div>|<div class="cpsessionviewpapertitle">Few-Shot Audio Classification with Attentional Graph Neural Networks</div><div class="cpsessionviewpaperauthor">[[Shilei Zhang|AUTHOR Shilei Zhang]], [[Yong Qin|AUTHOR Yong Qin]], [[Kewei Sun|AUTHOR Kewei Sun]], [[Yonghua Lin|AUTHOR Yonghua Lin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191231.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-P-8-E-11|PAPER Wed-P-8-E-11 — Semi-Supervised Audio Classification with Consistency-Based Regularization]]</div>|<div class="cpsessionviewpapertitle">Semi-Supervised Audio Classification with Consistency-Based Regularization</div><div class="cpsessionviewpaperauthor">[[Kangkang Lu|AUTHOR Kangkang Lu]], [[Chuan-Sheng Foo|AUTHOR Chuan-Sheng Foo]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]], [[Vijay Ramaseshan Chandrasekhar|AUTHOR Vijay Ramaseshan Chandrasekhar]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 4|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198002.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-3-1|PAPER Wed-S&T-3-1 — Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations]]</div>|<div class="cpsessionviewpapertitle">Avaya Conversational Intelligence: A Real-Time System for Spoken Language Understanding in Human-Human Call Center Conversations</div><div class="cpsessionviewpaperauthor">[[Jan Mizgajski|AUTHOR Jan Mizgajski]], [[Adrian Szymczak|AUTHOR Adrian Szymczak]], [[Robert Głowski|AUTHOR Robert Głowski]], [[Piotr Szymański|AUTHOR Piotr Szymański]], [[Piotr Żelasko|AUTHOR Piotr Żelasko]], [[Łukasz Augustyniak|AUTHOR Łukasz Augustyniak]], [[Mikołaj Morzy|AUTHOR Mikołaj Morzy]], [[Yishay Carmiel|AUTHOR Yishay Carmiel]], [[Jeff Hodson|AUTHOR Jeff Hodson]], [[Łukasz Wójciak|AUTHOR Łukasz Wójciak]], [[Daniel Smoczyk|AUTHOR Daniel Smoczyk]], [[Adam Wróbel|AUTHOR Adam Wróbel]], [[Bartosz Borowik|AUTHOR Bartosz Borowik]], [[Adam Artajew|AUTHOR Adam Artajew]], [[Marcin Baran|AUTHOR Marcin Baran]], [[Cezary Kwiatkowski|AUTHOR Cezary Kwiatkowski]], [[Marzena Żyła-Hoppe|AUTHOR Marzena Żyła-Hoppe]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-3-2|PAPER Wed-S&T-3-2 — Robust Keyword Spotting via Recycle-Pooling for Mobile Game]]</div>|<div class="cpsessionviewpapertitle">Robust Keyword Spotting via Recycle-Pooling for Mobile Game</div><div class="cpsessionviewpaperauthor">[[Shounan An|AUTHOR Shounan An]], [[Youngsoo Kim|AUTHOR Youngsoo Kim]], [[Hu Xu|AUTHOR Hu Xu]], [[Jinwoo Lee|AUTHOR Jinwoo Lee]], [[Myungwoo Lee|AUTHOR Myungwoo Lee]], [[Insoo Oh|AUTHOR Insoo Oh]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198011.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-3-3|PAPER Wed-S&T-3-3 — Multimodal Dialog with the MALACH Audiovisual Archive]]</div>|<div class="cpsessionviewpapertitle">Multimodal Dialog with the MALACH Audiovisual Archive</div><div class="cpsessionviewpaperauthor">[[Adam Chýlek|AUTHOR Adam Chýlek]], [[Luboš Šmídl|AUTHOR Luboš Šmídl]], [[Jan Švec|AUTHOR Jan Švec]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198014.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-3-4|PAPER Wed-S&T-3-4 — SpeechMarker: A Voice Based Multi-Level Attendance Application]]</div>|<div class="cpsessionviewpapertitle">SpeechMarker: A Voice Based Multi-Level Attendance Application</div><div class="cpsessionviewpaperauthor">[[Sarfaraz Jelil|AUTHOR Sarfaraz Jelil]], [[Abhishek Shrivastava|AUTHOR Abhishek Shrivastava]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[S.R. Mahadeva Prasanna|AUTHOR S.R. Mahadeva Prasanna]], [[Rohit Sinha|AUTHOR Rohit Sinha]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198032.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-3-5|PAPER Wed-S&T-3-5 — Robust Sound Recognition: A Neuromorphic Approach]]</div>|<div class="cpsessionviewpapertitle">Robust Sound Recognition: A Neuromorphic Approach</div><div class="cpsessionviewpaperauthor">[[Jibin Wu|AUTHOR Jibin Wu]], [[Zihan Pan|AUTHOR Zihan Pan]], [[Malu Zhang|AUTHOR Malu Zhang]], [[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Yansong Chua|AUTHOR Yansong Chua]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198047.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-3-6|PAPER Wed-S&T-3-6 — The CUHK Dysarthric Speech Recognition Systems for English and Cantonese]]</div>|<div class="cpsessionviewpapertitle">The CUHK Dysarthric Speech Recognition Systems for English and Cantonese</div><div class="cpsessionviewpaperauthor">[[Shoukang Hu|AUTHOR Shoukang Hu]], [[Shansong Liu|AUTHOR Shansong Liu]], [[Heng Fai Chang|AUTHOR Heng Fai Chang]], [[Mengzhe Geng|AUTHOR Mengzhe Geng]], [[Jiani Chen|AUTHOR Jiani Chen]], [[Lau Wing Chung|AUTHOR Lau Wing Chung]], [[To Ka Hei|AUTHOR To Ka Hei]], [[Jianwei Yu|AUTHOR Jianwei Yu]], [[Ka Ho Wong|AUTHOR Ka Ho Wong]], [[Xunying Liu|AUTHOR Xunying Liu]], [[Helen Meng|AUTHOR Helen Meng]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 4|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198001.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-4-1|PAPER Wed-S&T-4-1 — BAS Web Services for Automatic Subtitle Creation and Anonymization]]</div>|<div class="cpsessionviewpapertitle">BAS Web Services for Automatic Subtitle Creation and Anonymization</div><div class="cpsessionviewpaperauthor">[[Florian Schiel|AUTHOR Florian Schiel]], [[Thomas Kisler|AUTHOR Thomas Kisler]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198015.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-4-2|PAPER Wed-S&T-4-2 — A User-Friendly and Adaptable Re-Implementation of an Acoustic Prominence Detection and Annotation Tool]]</div>|<div class="cpsessionviewpapertitle">A User-Friendly and Adaptable Re-Implementation of an Acoustic Prominence Detection and Annotation Tool</div><div class="cpsessionviewpaperauthor">[[Jana Voße|AUTHOR Jana Voße]], [[Petra Wagner|AUTHOR Petra Wagner]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198021.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-4-3|PAPER Wed-S&T-4-3 — PyToBI: A Toolkit for ToBI Labeling Under Python]]</div>|<div class="cpsessionviewpapertitle">PyToBI: A Toolkit for ToBI Labeling Under Python</div><div class="cpsessionviewpaperauthor">[[Mónica Domínguez|AUTHOR Mónica Domínguez]], [[Patrick Louis Rohrer|AUTHOR Patrick Louis Rohrer]], [[Juan Soler-Company|AUTHOR Juan Soler-Company]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198025.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-4-4|PAPER Wed-S&T-4-4 — GECKO — A Tool for Effective Annotation of Human Conversations]]</div>|<div class="cpsessionviewpapertitle">GECKO — A Tool for Effective Annotation of Human Conversations</div><div class="cpsessionviewpaperauthor">[[Golan Levy|AUTHOR Golan Levy]], [[Raquel Sitman|AUTHOR Raquel Sitman]], [[Ido Amir|AUTHOR Ido Amir]], [[Eduard Golshtein|AUTHOR Eduard Golshtein]], [[Ran Mochary|AUTHOR Ran Mochary]], [[Eilon Reshef|AUTHOR Eilon Reshef]], [[Roi Reichart|AUTHOR Roi Reichart]], [[Omri Allouche|AUTHOR Omri Allouche]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198028.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-4-5|PAPER Wed-S&T-4-5 — SLP-AA: Tools for Sign Language Phonetic and Phonological Research]]</div>|<div class="cpsessionviewpapertitle">SLP-AA: Tools for Sign Language Phonetic and Phonological Research</div><div class="cpsessionviewpaperauthor">[[Roger Yu-Hsiang Lo|AUTHOR Roger Yu-Hsiang Lo]], [[Kathleen Currie Hall|AUTHOR Kathleen Currie Hall]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198040.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-4-6|PAPER Wed-S&T-4-6 — SANTLR: Speech Annotation Toolkit for Low Resource Languages]]</div>|<div class="cpsessionviewpapertitle">SANTLR: Speech Annotation Toolkit for Low Resource Languages</div><div class="cpsessionviewpaperauthor">[[Xinjian Li|AUTHOR Xinjian Li]], [[Zhong Zhou|AUTHOR Zhong Zhou]], [[Siddharth Dalmia|AUTHOR Siddharth Dalmia]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Florian Metze|AUTHOR Florian Metze]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 4|<|
|^Chair:&nbsp;|^To be confirmed|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198013.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-5-1|PAPER Wed-S&T-5-1 — Web-Based Speech Synthesis Editor]]</div>|<div class="cpsessionviewpapertitle">Web-Based Speech Synthesis Editor</div><div class="cpsessionviewpaperauthor">[[Martin Grůber|AUTHOR Martin Grůber]], [[Jakub Vít|AUTHOR Jakub Vít]], [[Jindřich Matoušek|AUTHOR Jindřich Matoušek]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198018.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-5-2|PAPER Wed-S&T-5-2 —  GFM-Voc: A Real-Time Voice Quality Modification System]]</div>|<div class="cpsessionviewpapertitle"> GFM-Voc: A Real-Time Voice Quality Modification System</div><div class="cpsessionviewpaperauthor">[[Olivier Perrotin|AUTHOR Olivier Perrotin]], [[Ian McLoughlin|AUTHOR Ian McLoughlin]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198026.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-5-3|PAPER Wed-S&T-5-3 — Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS]]</div>|<div class="cpsessionviewpapertitle">Off the Cuff: Exploring Extemporaneous Speech Delivery with TTS</div><div class="cpsessionviewpaperauthor">[[Éva Székely|AUTHOR Éva Székely]], [[Gustav Eje Henter|AUTHOR Gustav Eje Henter]], [[Jonas Beskow|AUTHOR Jonas Beskow]], [[Joakim Gustafson|AUTHOR Joakim Gustafson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198031.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-5-4|PAPER Wed-S&T-5-4 — Synthesized Spoken Names: Biases Impacting Perception]]</div>|<div class="cpsessionviewpapertitle">Synthesized Spoken Names: Biases Impacting Perception</div><div class="cpsessionviewpaperauthor">[[Lucas Kessler|AUTHOR Lucas Kessler]], [[Cecilia Ovesdotter Alm|AUTHOR Cecilia Ovesdotter Alm]], [[Reynold Bailey|AUTHOR Reynold Bailey]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198034.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-5-5|PAPER Wed-S&T-5-5 — Unbabel Talk — Human Verified Translations for Voice Instant Messaging]]</div>|<div class="cpsessionviewpapertitle">Unbabel Talk — Human Verified Translations for Voice Instant Messaging</div><div class="cpsessionviewpaperauthor">[[Luís Bernardo|AUTHOR Luís Bernardo]], [[Mathieu Giquel|AUTHOR Mathieu Giquel]], [[Sebastião Quintas|AUTHOR Sebastião Quintas]], [[Paulo Dimas|AUTHOR Paulo Dimas]], [[Helena Moniz|AUTHOR Helena Moniz]], [[Isabel Trancoso|AUTHOR Isabel Trancoso]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS198045.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-S&T-5-6|PAPER Wed-S&T-5-6 — Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer]]</div>|<div class="cpsessionviewpapertitle">Adjusting Pleasure-Arousal-Dominance for Continuous Emotional Text-to-Speech Synthesizer</div><div class="cpsessionviewpaperauthor">[[Azam Rabiee|AUTHOR Azam Rabiee]], [[Tae-Ho Kim|AUTHOR Tae-Ho Kim]], [[Soo-Young Lee|AUTHOR Soo-Young Lee]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|10:00–12:00, Wednesday 18 Sept 2019, Hall 11|<|
|^Chair:&nbsp;|^Björn Schuller|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191122.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-1|PAPER Wed-SS-6-4-1 — The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity]]</div>|<div class="cpsessionviewpapertitle">The INTERSPEECH 2019 Computational Paralinguistics Challenge: Styrian Dialects, Continuous Sleepiness, Baby Sounds & Orca Activity</div><div class="cpsessionviewpaperauthor">[[Björn W. Schuller|AUTHOR Björn W. Schuller]], [[Anton Batliner|AUTHOR Anton Batliner]], [[Christian Bergler|AUTHOR Christian Bergler]], [[Florian B. Pokorny|AUTHOR Florian B. Pokorny]], [[Jarek Krajewski|AUTHOR Jarek Krajewski]], [[Margaret Cychosz|AUTHOR Margaret Cychosz]], [[Ralf Vollmann|AUTHOR Ralf Vollmann]], [[Sonja-Dana Roelen|AUTHOR Sonja-Dana Roelen]], [[Sebastian Schnieder|AUTHOR Sebastian Schnieder]], [[Elika Bergelson|AUTHOR Elika Bergelson]], [[Alejandrina Cristia|AUTHOR Alejandrina Cristia]], [[Amanda Seidl|AUTHOR Amanda Seidl]], [[Anne S. Warlaumont|AUTHOR Anne S. Warlaumont]], [[Lisa Yankowitz|AUTHOR Lisa Yankowitz]], [[Elmar Nöth|AUTHOR Elmar Nöth]], [[Shahin Amiriparian|AUTHOR Shahin Amiriparian]], [[Simone Hantke|AUTHOR Simone Hantke]], [[Maximilian Schmitt|AUTHOR Maximilian Schmitt]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192398.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-2|PAPER Wed-SS-6-4-2 — Using Speech Production Knowledge for Raw Waveform Modelling Based Styrian Dialect Identification]]</div>|<div class="cpsessionviewpapertitle">Using Speech Production Knowledge for Raw Waveform Modelling Based Styrian Dialect Identification</div><div class="cpsessionviewpaperauthor">[[S. Pavankumar Dubagunta|AUTHOR S. Pavankumar Dubagunta]], [[Mathew Magimai-Doss|AUTHOR Mathew Magimai-Doss]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192478.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-3|PAPER Wed-SS-6-4-3 — Deep Neural Baselines for Computational Paralinguistics]]</div>|<div class="cpsessionviewpapertitle">Deep Neural Baselines for Computational Paralinguistics</div><div class="cpsessionviewpaperauthor">[[Daniel Elsner|AUTHOR Daniel Elsner]], [[Stefan Langer|AUTHOR Stefan Langer]], [[Fabian Ritz|AUTHOR Fabian Ritz]], [[Robert Mueller|AUTHOR Robert Mueller]], [[Steffen Illium|AUTHOR Steffen Illium]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192540.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-4|PAPER Wed-SS-6-4-4 — Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm]]</div>|<div class="cpsessionviewpapertitle">Styrian Dialect Classification: Comparing and Fusing Classifiers Based on a Feature Selection Using a Genetic Algorithm</div><div class="cpsessionviewpaperauthor">[[Thomas Kisler|AUTHOR Thomas Kisler]], [[Raphael Winkelmann|AUTHOR Raphael Winkelmann]], [[Florian Schiel|AUTHOR Florian Schiel]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192110.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-5|PAPER Wed-SS-6-4-5 — Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition]]</div>|<div class="cpsessionviewpapertitle">Using Attention Networks and Adversarial Augmentation for Styrian Dialect Continuous Sleepiness and Baby Sound Recognition</div><div class="cpsessionviewpaperauthor">[[Sung-Lin Yeh|AUTHOR Sung-Lin Yeh]], [[Gao-Yi Chao|AUTHOR Gao-Yi Chao]], [[Bo-Hao Su|AUTHOR Bo-Hao Su]], [[Yu-Lin Huang|AUTHOR Yu-Lin Huang]], [[Meng-Han Lin|AUTHOR Meng-Han Lin]], [[Yin-Chun Tsai|AUTHOR Yin-Chun Tsai]], [[Yu-Wen Tai|AUTHOR Yu-Wen Tai]], [[Zheng-Chi Lu|AUTHOR Zheng-Chi Lu]], [[Chieh-Yu Chen|AUTHOR Chieh-Yu Chen]], [[Tsung-Ming Tai|AUTHOR Tsung-Ming Tai]], [[Chiu-Wang Tseng|AUTHOR Chiu-Wang Tseng]], [[Cheng-Kuang Lee|AUTHOR Cheng-Kuang Lee]], [[Chi-Chun Lee|AUTHOR Chi-Chun Lee]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192278.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-6|PAPER Wed-SS-6-4-6 — Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech]]</div>|<div class="cpsessionviewpapertitle">Ordinal Triplet Loss: Investigating Sleepiness Detection from Speech</div><div class="cpsessionviewpaperauthor">[[Peter Wu|AUTHOR Peter Wu]], [[SaiKrishna Rallabandi|AUTHOR SaiKrishna Rallabandi]], [[Alan W. Black|AUTHOR Alan W. Black]], [[Eric Nyberg|AUTHOR Eric Nyberg]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192988.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-7|PAPER Wed-SS-6-4-7 — Voice Quality and Between-Frame Entropy for Sleepiness Estimation]]</div>|<div class="cpsessionviewpapertitle">Voice Quality and Between-Frame Entropy for Sleepiness Estimation</div><div class="cpsessionviewpaperauthor">[[Vijay Ravi|AUTHOR Vijay Ravi]], [[Soo Jin Park|AUTHOR Soo Jin Park]], [[Amber Afshan|AUTHOR Amber Afshan]], [[Abeer Alwan|AUTHOR Abeer Alwan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191726.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-8|PAPER Wed-SS-6-4-8 — Using Fisher Vector and Bag-of-Audio-Words Representations to Identify Styrian Dialects, Sleepiness, Baby & Orca Sounds]]</div>|<div class="cpsessionviewpapertitle">Using Fisher Vector and Bag-of-Audio-Words Representations to Identify Styrian Dialects, Sleepiness, Baby & Orca Sounds</div><div class="cpsessionviewpaperauthor">[[Gábor Gosztolya|AUTHOR Gábor Gosztolya]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191894.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-9|PAPER Wed-SS-6-4-9 — Instantaneous Phase and Long-Term Acoustic Cues for Orca Activity Detection]]</div>|<div class="cpsessionviewpapertitle">Instantaneous Phase and Long-Term Acoustic Cues for Orca Activity Detection</div><div class="cpsessionviewpaperauthor">[[Rohan Kumar Das|AUTHOR Rohan Kumar Das]], [[Haizhou Li|AUTHOR Haizhou Li]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192707.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-10|PAPER Wed-SS-6-4-10 — Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence]]</div>|<div class="cpsessionviewpapertitle">Relevance-Based Feature Masking: Improving Neural Network Based Whale Classification Through Explainable Artificial Intelligence</div><div class="cpsessionviewpaperauthor">[[Dominik Schiller|AUTHOR Dominik Schiller]], [[Tobias Huber|AUTHOR Tobias Huber]], [[Florian Lingenfelser|AUTHOR Florian Lingenfelser]], [[Michael Dietz|AUTHOR Michael Dietz]], [[Andreas Seiderer|AUTHOR Andreas Seiderer]], [[Elisabeth André|AUTHOR Elisabeth André]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191693.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-11|PAPER Wed-SS-6-4-11 — Spatial, Temporal and Spectral Multiresolution Analysis for the INTERSPEECH 2019 ComParE Challenge]]</div>|<div class="cpsessionviewpapertitle">Spatial, Temporal and Spectral Multiresolution Analysis for the INTERSPEECH 2019 ComParE Challenge</div><div class="cpsessionviewpaperauthor">[[Marie-José Caraty|AUTHOR Marie-José Caraty]], [[Claude Montacié|AUTHOR Claude Montacié]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191386.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-12|PAPER Wed-SS-6-4-12 — The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge]]</div>|<div class="cpsessionviewpapertitle">The DKU-LENOVO Systems for the INTERSPEECH 2019 Computational Paralinguistic Challenge</div><div class="cpsessionviewpaperauthor">[[Haiwei Wu|AUTHOR Haiwei Wu]], [[Weiqing Wang|AUTHOR Weiqing Wang]], [[Ming Li|AUTHOR Ming Li]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-6-4-13|PAPER Wed-SS-6-4-13 — Overview on Approaches and Results]]</div>|<div class="cpsessionviewpapertitle">Overview on Approaches and Results</div><div class="cpsessionviewpaperauthor"></div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Hall 2|<|
|^Chair:&nbsp;|^Mahesh Kumar Nandwana, Mitchell McLaren|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191837.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-3-1|PAPER Wed-O-7-3-1 — The VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]], [[Julien van Hout|AUTHOR Julien van Hout]], [[Colleen Richey|AUTHOR Colleen Richey]], [[Mitchell McLaren|AUTHOR Mitchell McLaren]], [[Maria A. Barrios|AUTHOR Maria A. Barrios]], [[Aaron Lawson|AUTHOR Aaron Lawson]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192783.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-3-2|PAPER Wed-O-7-3-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|<div class="cpsessionviewpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div><div class="cpsessionviewpaperauthor">[[Sergey Novoselov|AUTHOR Sergey Novoselov]], [[Aleksei Gusev|AUTHOR Aleksei Gusev]], [[Artem Ivanov|AUTHOR Artem Ivanov]], [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]], [[Andrey Shulipa|AUTHOR Andrey Shulipa]], [[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]], [[Vladimir Volokhov|AUTHOR Vladimir Volokhov]], [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192471.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-3-3|PAPER Wed-O-7-3-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Pavel Matějka|AUTHOR Pavel Matějka]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Hossein Zeinali|AUTHOR Hossein Zeinali]], [[Ladislav Mošner|AUTHOR Ladislav Mošner]], [[Anna Silnova|AUTHOR Anna Silnova]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Ondřej Novotný|AUTHOR Ondřej Novotný]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191574.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-3-4|PAPER Wed-O-7-3-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Ivan Medennikov|AUTHOR Ivan Medennikov]], [[Yuri Khokhlov|AUTHOR Yuri Khokhlov]], [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]], [[Ivan Sorokin|AUTHOR Ivan Sorokin]], [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]], [[Vladimir Bataev|AUTHOR Vladimir Bataev]], [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]], [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]], [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]], [[Oleg Petrov|AUTHOR Oleg Petrov]], [[Alexander Zatvornitskiy|AUTHOR Alexander Zatvornitskiy]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192130.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-O-7-3-5|PAPER Wed-O-7-3-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Tze Yuang Chong|AUTHOR Tze Yuang Chong]], [[Kye Min Tan|AUTHOR Kye Min Tan]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Chang Huai You|AUTHOR Chang Huai You]], [[Hanwu Sun|AUTHOR Hanwu Sun]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|13:30–15:30, Wednesday 18 Sept 2019, Gallery A|<|
|^Chair:&nbsp;|^Mahesh Kumar Nandwana, Mitchell McLaren|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-1|PAPER Wed-SS-7-A-1 — The VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Mahesh Kumar Nandwana|AUTHOR Mahesh Kumar Nandwana]], [[Julien van Hout|AUTHOR Julien van Hout]], [[Colleen Richey|AUTHOR Colleen Richey]], [[Mitchell McLaren|AUTHOR Mitchell McLaren]], [[Maria A. Barrios|AUTHOR Maria A. Barrios]], [[Aaron Lawson|AUTHOR Aaron Lawson]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-2|PAPER Wed-SS-7-A-2 — STC Speaker Recognition Systems for the VOiCES from a Distance Challenge]]</div>|<div class="cpsessionviewpapertitle">STC Speaker Recognition Systems for the VOiCES from a Distance Challenge</div><div class="cpsessionviewpaperauthor">[[Sergey Novoselov|AUTHOR Sergey Novoselov]], [[Aleksei Gusev|AUTHOR Aleksei Gusev]], [[Artem Ivanov|AUTHOR Artem Ivanov]], [[Timur Pekhovsky|AUTHOR Timur Pekhovsky]], [[Andrey Shulipa|AUTHOR Andrey Shulipa]], [[Galina Lavrentyeva|AUTHOR Galina Lavrentyeva]], [[Vladimir Volokhov|AUTHOR Vladimir Volokhov]], [[Alexandr Kozlov|AUTHOR Alexandr Kozlov]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-3|PAPER Wed-SS-7-A-3 — Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">Analysis of BUT Submission in Far-Field Scenarios of VOiCES 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[Pavel Matějka|AUTHOR Pavel Matějka]], [[Oldřich Plchot|AUTHOR Oldřich Plchot]], [[Hossein Zeinali|AUTHOR Hossein Zeinali]], [[Ladislav Mošner|AUTHOR Ladislav Mošner]], [[Anna Silnova|AUTHOR Anna Silnova]], [[Lukáš Burget|AUTHOR Lukáš Burget]], [[Ondřej Novotný|AUTHOR Ondřej Novotný]], [[Ondřej Glembek|AUTHOR Ondřej Glembek]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-4|PAPER Wed-SS-7-A-4 — The STC ASR System for the VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The STC ASR System for the VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Ivan Medennikov|AUTHOR Ivan Medennikov]], [[Yuri Khokhlov|AUTHOR Yuri Khokhlov]], [[Aleksei Romanenko|AUTHOR Aleksei Romanenko]], [[Ivan Sorokin|AUTHOR Ivan Sorokin]], [[Anton Mitrofanov|AUTHOR Anton Mitrofanov]], [[Vladimir Bataev|AUTHOR Vladimir Bataev]], [[Andrei Andrusenko|AUTHOR Andrei Andrusenko]], [[Tatiana Prisyach|AUTHOR Tatiana Prisyach]], [[Mariya Korenevskaya|AUTHOR Mariya Korenevskaya]], [[Oleg Petrov|AUTHOR Oleg Petrov]], [[Alexander Zatvornitskiy|AUTHOR Alexander Zatvornitskiy]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-5|PAPER Wed-SS-7-A-5 — The I2R’s ASR System for the VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The I2R’s ASR System for the VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Tze Yuang Chong|AUTHOR Tze Yuang Chong]], [[Kye Min Tan|AUTHOR Kye Min Tan]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Chang Huai You|AUTHOR Chang Huai You]], [[Hanwu Sun|AUTHOR Hanwu Sun]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193010.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-6|PAPER Wed-SS-7-A-6 — Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech]]</div>|<div class="cpsessionviewpapertitle">Multi-Task Discriminative Training of Hybrid DNN-TVM Model for Speaker Verification with Noisy and Far-Field Speech</div><div class="cpsessionviewpaperauthor">[[Arindam Jati|AUTHOR Arindam Jati]], [[Raghuveer Peri|AUTHOR Raghuveer Peri]], [[Monisankha Pal|AUTHOR Monisankha Pal]], [[Tae Jin Park|AUTHOR Tae Jin Park]], [[Naveen Kumar|AUTHOR Naveen Kumar]], [[Ruchir Travadi|AUTHOR Ruchir Travadi]], [[Panayiotis Georgiou|AUTHOR Panayiotis Georgiou]], [[Shrikanth Narayanan|AUTHOR Shrikanth Narayanan]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192979.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-7|PAPER Wed-SS-7-A-7 — The JHU Speaker Recognition System for the VOiCES 2019 Challenge]]</div>|<div class="cpsessionviewpapertitle">The JHU Speaker Recognition System for the VOiCES 2019 Challenge</div><div class="cpsessionviewpaperauthor">[[David Snyder|AUTHOR David Snyder]], [[Jesús Villalba|AUTHOR Jesús Villalba]], [[Nanxin Chen|AUTHOR Nanxin Chen]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Gregory Sell|AUTHOR Gregory Sell]], [[Najim Dehak|AUTHOR Najim Dehak]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192894.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-8|PAPER Wed-SS-7-A-8 — Intel Far-Field Speaker Recognition System for VOiCES Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">Intel Far-Field Speaker Recognition System for VOiCES Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Jonathan Huang|AUTHOR Jonathan Huang]], [[Tobias Bocklet|AUTHOR Tobias Bocklet]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191997.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-9|PAPER Wed-SS-7-A-9 — The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The I2R’s Submission to VOiCES Distance Speaker Recognition Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Hanwu Sun|AUTHOR Hanwu Sun]], [[Kah Kuan Teh|AUTHOR Kah Kuan Teh]], [[Ivan Kukanov|AUTHOR Ivan Kukanov]], [[Huy Dat Tran|AUTHOR Huy Dat Tran]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191944.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-10|PAPER Wed-SS-7-A-10 — The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The LeVoice Far-Field Speech Recognition System for VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Yulong Liang|AUTHOR Yulong Liang]], [[Lin Yang|AUTHOR Lin Yang]], [[Xuyang Wang|AUTHOR Xuyang Wang]], [[Yingjie Li|AUTHOR Yingjie Li]], [[Chen Jia|AUTHOR Chen Jia]], [[Junjie Wang|AUTHOR Junjie Wang]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191948.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-11|PAPER Wed-SS-7-A-11 — The JHU ASR System for VOiCES from a Distance Challenge 2019]]</div>|<div class="cpsessionviewpapertitle">The JHU ASR System for VOiCES from a Distance Challenge 2019</div><div class="cpsessionviewpaperauthor">[[Yiming Wang|AUTHOR Yiming Wang]], [[David Snyder|AUTHOR David Snyder]], [[Hainan Xu|AUTHOR Hainan Xu]], [[Vimal Manohar|AUTHOR Vimal Manohar]], [[Phani Sankar Nidadavolu|AUTHOR Phani Sankar Nidadavolu]], [[Daniel Povey|AUTHOR Daniel Povey]], [[Sanjeev Khudanpur|AUTHOR Sanjeev Khudanpur]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191435.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-7-A-12|PAPER Wed-SS-7-A-12 — The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge]]</div>|<div class="cpsessionviewpapertitle">The DKU System for the Speaker Recognition Task of the 2019 VOiCES from a Distance Challenge</div><div class="cpsessionviewpaperauthor">[[Danwei Cai|AUTHOR Danwei Cai]], [[Xiaoyi Qin|AUTHOR Xiaoyi Qin]], [[Weicheng Cai|AUTHOR Weicheng Cai]], [[Ming Li|AUTHOR Ming Li]]</div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}} {{$:/causal/Session List Button}} {{$:/causal/Author Index Button}}
</p>
<p>
{{||$:/causal/Preceding Session Button}}
&nbsp;<span class="cpprevnextanchortext">SESSION</span>
&nbsp;{{||$:/causal/Next Session Button}}
</p></div>

<div class="cpsessionviewmetadata">

|cpborderless|k
|16:00–18:00, Wednesday 18 Sept 2019, Hall 3|<|
|^Chair:&nbsp;|^Philipp Aichinger, Carlo Drioli|

</div>

|cptablecelltopbottomspace2|k
|cpsessionviewtable|k
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192465.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-1|PAPER Wed-SS-8-6-1 — Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease]]</div>|<div class="cpsessionviewpapertitle">Identifying Distinctive Acoustic and Spectral Features in Parkinson’s Disease</div><div class="cpsessionviewpaperauthor">[[Yermiyahu Hauptman|AUTHOR Yermiyahu Hauptman]], [[Ruth Aloni-Lavi|AUTHOR Ruth Aloni-Lavi]], [[Itshak Lapidot|AUTHOR Itshak Lapidot]], [[Tanya Gurevich|AUTHOR Tanya Gurevich]], [[Yael Manor|AUTHOR Yael Manor]], [[Stav Naor|AUTHOR Stav Naor]], [[Noa Diamant|AUTHOR Noa Diamant]], [[Irit Opher|AUTHOR Irit Opher]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192338.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-2|PAPER Wed-SS-8-6-2 — Aerodynamics and Lumped-Masses Combined with Delay Lines for Modeling Vertical and Anterior-Posterior Phase Differences in Pathological Vocal Fold Vibration]]</div>|<div class="cpsessionviewpapertitle">Aerodynamics and Lumped-Masses Combined with Delay Lines for Modeling Vertical and Anterior-Posterior Phase Differences in Pathological Vocal Fold Vibration</div><div class="cpsessionviewpaperauthor">[[Carlo Drioli|AUTHOR Carlo Drioli]], [[Philipp Aichinger|AUTHOR Philipp Aichinger]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192863.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-3|PAPER Wed-SS-8-6-3 — Mel-Frequency Cepstral Coefficients of Voice Source Waveforms for Classification of Phonation Types in Speech]]</div>|<div class="cpsessionviewpapertitle">Mel-Frequency Cepstral Coefficients of Voice Source Waveforms for Classification of Phonation Types in Speech</div><div class="cpsessionviewpaperauthor">[[Sudarsana Reddy Kadiri|AUTHOR Sudarsana Reddy Kadiri]], [[Paavo Alku|AUTHOR Paavo Alku]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191452.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-4|PAPER Wed-SS-8-6-4 — Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations]]</div>|<div class="cpsessionviewpapertitle">Automatic Detection of Autism Spectrum Disorder in Children Using Acoustic and Text Features from Brief Natural Conversations</div><div class="cpsessionviewpaperauthor">[[Sunghye Cho|AUTHOR Sunghye Cho]], [[Mark Liberman|AUTHOR Mark Liberman]], [[Neville Ryant|AUTHOR Neville Ryant]], [[Meredith Cola|AUTHOR Meredith Cola]], [[Robert T. Schultz|AUTHOR Robert T. Schultz]], [[Julia Parish-Morris|AUTHOR Julia Parish-Morris]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS191998.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-5|PAPER Wed-SS-8-6-5 — Analysis and Synthesis of Vocal Flutter and Vocal Jitter]]</div>|<div class="cpsessionviewpapertitle">Analysis and Synthesis of Vocal Flutter and Vocal Jitter</div><div class="cpsessionviewpaperauthor">[[Jean Schoentgen|AUTHOR Jean Schoentgen]], [[Philipp Aichinger|AUTHOR Philipp Aichinger]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS192910.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-6|PAPER Wed-SS-8-6-6 — Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt]]</div>|<div class="cpsessionviewpapertitle">Reliability of Clinical Voice Parameters Captured with Smartphones — Measurements of Added Noise and Spectral Tilt</div><div class="cpsessionviewpaperauthor">[[Felix Schaeffler|AUTHOR Felix Schaeffler]], [[Stephen Jannetts|AUTHOR Stephen Jannetts]], [[Janet Beck|AUTHOR Janet Beck]]</div>|
|^ @@.pdficonintable @@<a href="./IS2019/HTML/AUTHOR/IS193096.PDF" class="externallinkbutton" style="outline:0;" target="_blank">{{$:/causal/pdf icon in session view}}</a> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-7|PAPER Wed-SS-8-6-7 — Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make]]</div>|<div class="cpsessionviewpapertitle">Say What? A Dataset for Exploring the Error Patterns That Two ASR Engines Make</div><div class="cpsessionviewpaperauthor">[[Meredith Moore|AUTHOR Meredith Moore]], [[Michael Saxon|AUTHOR Michael Saxon]], [[Hemanth Venkateswara|AUTHOR Hemanth Venkateswara]], [[Visar Berisha|AUTHOR Visar Berisha]], [[Sethuraman Panchanathan|AUTHOR Sethuraman Panchanathan]]</div>|
|^<div class="cpauthorindexpersoncardpapercode">{{$:/causal/NO-PDF Marker}}</div> |^<div class="cpsessionviewpapercode">[[Wed-SS-8-6-8|PAPER Wed-SS-8-6-8 — Discussion]]</div>|<div class="cpsessionviewpapertitle">Discussion</div><div class="cpsessionviewpaperauthor"></div>|
\rules except wikilink
<div class="cpbuttonrow"><p>
{{$:/causal/Welcome Page Button}}
</p></div>

<div class="cpsupportpage">
This HTML index file is based on the [ext[TiddlyWiki|http://www.tiddlywiki.com]] web application.
You can browse the table of contents, author index, and individual paper details, and launch the paper PDF file to a separate window.
</div>

|cpsupportpagetable|k
|cptightlineheight|k
|cptablecelltopbottomspace2|k
|PDF&nbsp;Reader |This publication has been designed for use with Adobe Reader 8 or later to view the PDF files.|
|^Support |If you have problems with this publication, please contact Causal Productions at:<div class="cpmailingaddress">Causal Productions Pty Ltd<br>PO Box<$link to="$:/causal/Causal Productions Configurator Control Panel"> </$link>100<br>Rundle Mall<br>SA 5000<br>Australia</div>|
|Phone |+61 8 8295 8200|
|Fax |+61 8 8295 8299|
|E-mail |[ext[info@causalproductions.com|mailto:info@causalproductions.com]]|
|Web |[ext[http://www.causalproductions.com|http://www.causalproductions.com]]|
\rules except wikilink
<div class="cppublicationname">INTERSPEECH 2019</div><div class="cppublicationdatevenue">September 15–19th 2019, Graz, Austria<span><a href="http://www.interspeech2019.org" target="_blank"><$button><small>Conference Website</small></$button></a></span></div>

|cpborderless|k
|cpwelcomepageconferencetable|k
|cph3|k
|<hr>|<|<|
| <div class="cpwelcomepagespaceaboveiconwithoutconferencename icon_size_on_welcome_page">{{$:/causal/image/INTERSPEECH 2019 WELCOME.SVG}}</div> |<div class="cpwelcomepageconferencelinks">[[Conference Information]]<br>[[Session List]]<br>[[Author Index]] </div> |
|<hr>|<|<|
|[[Copyright Statement]] |[[Support]] |
<div class="cpwelcomepagecopyright">
{{$:/causal/publication/Copyright Statement}}
</div>