mirror of
https://github.com/openhab/openhab-addons.git
synced 2025-01-25 14:55:55 +01:00
[voice] Allow speech-to-text services to emit empty error events (#16922)
* [voice] Align speech-to-text services error events to core Signed-off-by: Miguel Álvarez <miguelwork92@gmail.com> Signed-off-by: Ciprian Pascu <contact@ciprianpascu.ro>
This commit is contained in:
parent
3555e45df2
commit
ba79e5ba68
@ -405,10 +405,8 @@ public class GoogleSTTService implements STTService {
|
||||
String transcript = transcriptBuilder.toString();
|
||||
if (!transcript.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionEvent(transcript, averageConfidence));
|
||||
} else if (!config.noResultsMessage.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
|
||||
} else {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent("No results"));
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -418,13 +416,7 @@ public class GoogleSTTService implements STTService {
|
||||
logger.warn("Recognition error: ", t);
|
||||
if (!aborted.getAndSet(true)) {
|
||||
sttListener.sttEventReceived(new RecognitionStopEvent());
|
||||
if (!config.errorMessage.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
} else {
|
||||
String errorMessage = t.getMessage();
|
||||
sttListener.sttEventReceived(
|
||||
new SpeechRecognitionErrorEvent(errorMessage != null ? errorMessage : "Unknown error"));
|
||||
}
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -271,27 +271,15 @@ public class VoskSTTService implements STTService {
|
||||
if (!transcript.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionEvent(transcript, 1F));
|
||||
} else {
|
||||
if (!config.noResultsMessage.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
|
||||
} else {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent("No results"));
|
||||
}
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error running speech to text: {}", e.getMessage());
|
||||
if (config.errorMessage.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent("Error"));
|
||||
} else {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
}
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
logger.warn("Missing native dependency: {}", e.getMessage());
|
||||
if (config.errorMessage.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent("Error"));
|
||||
} else {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
}
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
} finally {
|
||||
if (recognizer != null) {
|
||||
recognizer.close();
|
||||
|
@ -51,6 +51,7 @@ org.openhab.voice.watsonstt:optOutLogging=false
|
||||
org.openhab.voice.watsonstt:smartFormatting=false
|
||||
org.openhab.voice.watsonstt:redaction=false
|
||||
org.openhab.voice.watsonstt:noResultsMessage="Sorry, I didn't understand you"
|
||||
org.openhab.voice.watsonstt:errorMessage="Sorry, something went wrong"
|
||||
```
|
||||
|
||||
### Default Speech-to-Text Configuration
|
||||
|
@ -63,7 +63,13 @@ public class WatsonSTTConfiguration {
|
||||
/**
|
||||
* Message to be told when no results
|
||||
*/
|
||||
public String noResultsMessage = "No results";
|
||||
public String noResultsMessage = "Sorry, I didn't understand you";
|
||||
|
||||
/**
|
||||
* Message to be told when an error has happened
|
||||
*/
|
||||
public String errorMessage = "Sorry, something went wrong";
|
||||
|
||||
/**
|
||||
* By default, all IBM Watson™ services log requests and their results. Logging is done only to improve the services
|
||||
* for future users. The logged data is not shared or made public.
|
||||
|
@ -311,8 +311,7 @@ public class WatsonSTTService implements STTService {
|
||||
}
|
||||
logger.warn("TranscriptionError: {}", errorMessage);
|
||||
if (!aborted.getAndSet(true)) {
|
||||
sttListener.sttEventReceived(
|
||||
new SpeechRecognitionErrorEvent(errorMessage != null ? errorMessage : "Unknown error"));
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.errorMessage));
|
||||
}
|
||||
}
|
||||
|
||||
@ -327,11 +326,7 @@ public class WatsonSTTService implements STTService {
|
||||
if (!transcript.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionEvent(transcript, averageConfidence));
|
||||
} else {
|
||||
if (!config.noResultsMessage.isBlank()) {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
|
||||
} else {
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent("No results"));
|
||||
}
|
||||
sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -47,7 +47,12 @@
|
||||
<parameter name="noResultsMessage" type="text" groupName="stt">
|
||||
<label>No Results Message</label>
|
||||
<description>Message to be told when no transcription is done.</description>
|
||||
<default>No results</default>
|
||||
<default>Sorry, I didn't understand you</default>
|
||||
</parameter>
|
||||
<parameter name="errorMessage" type="text" groupName="stt">
|
||||
<label>Error Message</label>
|
||||
<description>Message to be told when an error has happened.</description>
|
||||
<default>Sorry, something went wrong</default>
|
||||
</parameter>
|
||||
<parameter name="singleUtteranceMode" type="boolean" groupName="stt">
|
||||
<label>Single Utterance Mode</label>
|
||||
|
@ -12,6 +12,8 @@ voice.config.watsonstt.maxSilenceSeconds.label = Max Silence Seconds
|
||||
voice.config.watsonstt.maxSilenceSeconds.description = The time in seconds after which, if only silence (no speech) is detected in the audio, the connection is closed.
|
||||
voice.config.watsonstt.noResultsMessage.label = No Results Message
|
||||
voice.config.watsonstt.noResultsMessage.description = Message to be told when no transcription is done.
|
||||
voice.config.watsonstt.errorMessage.label = Error Message
|
||||
voice.config.watsonstt.errorMessage.description = Message to be told when an error has happened.
|
||||
voice.config.watsonstt.optOutLogging.label = Opt Out Logging
|
||||
voice.config.watsonstt.optOutLogging.description = By default, all IBM Watson™ services log requests and their results. Logging is done only to improve the services for future users. The logged data is not shared or made public.
|
||||
voice.config.watsonstt.preferMultimediaModel.label = Prefer Multimedia Model
|
||||
|
Loading…
Reference in New Issue
Block a user