How to bug that prevents Windows.Media.SpeechRecognition from continuing.
LLM Doghouse0.0.1.4は、タイトルの通りWindows.Media.SpeechRecognitionが途中で終わってしまうバグの解消でリリースが遅れております。他の方のコーディングを参考にさせていただきようやく目途がつきましたので近日公開予定です。
Windows.Media.SpeechRecognitionが途中で終わってしまったあとの挙動をデバッグすると音声キャプチャーは行っているように見えます。ですがまったくイベントが起きないんです。どうも一度止まってしまうとイベントも破棄しないとダメなようです。
chkMicという名前のトグルボタンでWindows.Media.SpeechRecognitionを開始/終了を行うコードを載せておきます。同じ症状で悩んでおられる方は参考にしてください。
//Start/stop the voice conversation function by turning on/off the toggle button named “chkMic”.
public partial class MainForm : Window
{
private Windows.Media.SpeechRecognition.SpeechRecognizer recognizer;
private void chkMic_Click(object sender, RoutedEventArgs e)
{
chkMic_Change();
}
private async void chkMic_Change()
{
try
{
if (chkMic.IsChecked.Value)
{
await StartRecognizer();
}
else
{
await StopRecognizer();
}
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, this.Title, MessageBoxButton.OK, MessageBoxImage.Error);
}
}
private async Task StopRecognizer()
{
if (recognizer != null)
{
//Stop Recognizer
await recognizer.ContinuousRecognitionSession.CancelAsync();
//Initialize SpeechRecognition
recognizer.HypothesisGenerated -= ContinuousRecognitionSession_HypothesisGenerated;
recognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
recognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed;
recognizer.StateChanged -= Recognizer_StateChanged;
recognizer.Dispose();
recognizer = null;
}
}
private async Task StartRecognizer()
{
// Initialize SpeechRecognition
if (recognizer != null)
{
recognizer.HypothesisGenerated -= ContinuousRecognitionSession_HypothesisGenerated;
recognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated;
recognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed;
recognizer.StateChanged -= Recognizer_StateChanged;
recognizer.Dispose();
recognizer = null;
}
// SpeechRecognition Settings
recognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(new Windows.Globalization.Language(strLng));
// Apply the dictation topic constraint to optimize for dictated freeform speech.
await recognizer.CompileConstraintsAsync();
recognizer.HypothesisGenerated += ContinuousRecognitionSession_HypothesisGenerated;
recognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
// Recognizer TimeOut
recognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
// Recognizer ChageStatus
recognizer.StateChanged += Recognizer_StateChanged;
// Start Recognizer
await recognizer.ContinuousRecognitionSession.StartAsync();
}
private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionResultGeneratedEventArgs e)
{
//Add voice conversation logic.
}
private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs e)
{
await StartRecognizer();
}
private void Recognizer_StateChanged(Windows.Media.SpeechRecognition.SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args)
{
// Add logic for status changes.(No particular need)
}
private void ContinuousRecognitionSession_HypothesisGenerated(Windows.Media.SpeechRecognition.SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args)
{
// Add logic during voice conversation.(No particular need)
}
}