Merge pull request #2 from Paoda/refinements

support for multiple languages
This commit is contained in:
Rekai Nyangadzayi Musuka 2018-09-07 10:55:55 +01:00 committed by GitHub
commit f478409a32
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 8 additions and 26 deletions

4
app.js
View File

@ -45,7 +45,7 @@ class WebSocketServer {
} }
}); });
client.on('startRecognition', (lang) => { client.on('startRecognition', (lang, target) => {
if (this.google.speech) { if (this.google.speech) {
this.google.speech.stopRecognition(); this.google.speech.stopRecognition();
this.google.speech = null; this.google.speech = null;
@ -53,7 +53,7 @@ class WebSocketServer {
console.log("Speech Recognition Started"); console.log("Speech Recognition Started");
this.google.speech = new Speech() this.google.speech = new Speech()
this.google.speech.startRecognition(lang); this.google.speech.startRecognition(lang, target);
}) })
client.on('stopRecognition', () => { client.on('stopRecognition', () => {

View File

@ -34,10 +34,10 @@ class Speech {
/** /**
* Starts the Google API Stream * Starts the Google API Stream
* @param {string} lang - Language Code e.g en-CA * @param {string} lang - Language Code e.g en-CA for Canadian English
* @param {string} target - Language Code e.g. "en" for English
*/ */
startRecognition(lang) { startRecognition(lang, target) {
this.lang = lang;
this.enabled = true; this.enabled = true;
const request = { const request = {
@ -55,40 +55,22 @@ class Speech {
.streamingRecognize(request) .streamingRecognize(request)
.on("error", console.error) .on("error", console.error)
.on("data", data => { .on("data", data => {
process.stdout.write(
data.results[0] && data.results[0].alternatives[0]
? `Transcription: ${data.results[0].alternatives[0].transcript}\n`
: `\n\nReached transcription time limit, press Ctrl+C\n`
);
//client.emit("speechData", data);
if (data.results[0].alternatives[0] !== undefined) { if (data.results[0].alternatives[0] !== undefined) {
let text = data.results[0].alternatives[0].transcript; let text = data.results[0].alternatives[0].transcript;
translate.speech(text, "fr").then(translation => { translate.speech(text, target).then(translation => {
console.log("Translation: " + translation); console.log(`${lang}: ${text} | ${target}: ${translation} (${data.results[0].alternatives[0].confidence})`);
}).catch(err => console.error(err)); }).catch(err => console.error(err));
// translate
// .translate(text, target)
// .then(results => {
// const translation = results[0];
// //client.emit("translateData", translation);
// console.log(`Text: ${text}`);
// console.log(`Translation: ${translation}`);
// })
// .catch(err => {
// console.error("ERROR:", err);
// });
} }
// if end of utterance, let's restart stream // if end of utterance, let's restart stream
// this is a small hack. After 65 seconds of silence, the stream will still throw an error for speech length limit // this is a small hack. After 65 seconds of silence, the stream will still throw an error for speech length limit
if (data.results[0] && data.results[0].isFinal) { if (data.results[0] && data.results[0].isFinal) {
this.stopRecognition(); this.stopRecognition();
this.startRecognition(this.lang); this.startRecognition(lang, target);
// console.log('restarted stream serverside'); // console.log('restarted stream serverside');
} }
}); });