function streamingMicRecognize () {
  // Instantiates a client
  const speech = Speech();

  const options = {
    config: {
      // Configure these settings based on the audio you're transcribing
      encoding: 'LINEAR16',
      sampleRate: 16000
    }
  };

  // Create a recognize stream
  const recognizeStream = speech.createRecognizeStream(options)
    .on('error', console.error)
    .on('data', (data) => process.stdout.write(data.results));

  // Start recording and send the microphone input to the Speech API
  record.start({
    sampleRate: 16000,
    threshold: 0
  }).pipe(recognizeStream);

  console.log('Listening, press Ctrl+C to stop.');
}
Example #2
0
File: main.js Project: causztic/eve
const startHotWordDetection = () => {
  mic = record.start({
    threshold: 0,
    verbose: true
  });
  detector = new Detector({
    resource: "resources/common.res",
    models: models,
    audioGain: 2.0
  });
  mic.pipe(detector);
}
Example #3
0
var captureSpeechIntentFromMic = function (access_token, options, callback) {
    if(!callback) {
        callback = options;
        options = undefined;
    }

// Setup audio stream
    var content_type = 'audio/wav';
    var stream = record.start();

// Get the speech intent for the microphone stream
    captureSpeechIntent(access_token, stream, content_type, options, callback);

// Return record so it can be manually/forcefully stopped
    return record;

};
Example #4
0
		return new Promise((resolve, reject) => {
			let responses = [];
			const call = speechService.streamingRecognize();

			call.on('error', reject);
			call.on('data', function (recognizeResponse) {
				if (recognizeResponse) {
					responses.push(recognizeResponse);
					if (onData && recognizeResponse.results && recognizeResponse.results.length) {
						onData(recognizeResponse.results);
					}
				}
			});

			call.on('end', function () {
				resolve(responses);
			});

			call.write({
				streamingConfig: {
					config: {
						encoding: 'LINEAR16',
						sampleRate: 16000
					},
					interimResults: false,
					singleUtterance: false
				}
			});

			const toRecognizeRequest = new Transform({ objectMode: true });
			toRecognizeRequest._transform = function (chunk, encoding, done) {
				done(null, {
					audioContent: chunk
				});
			};

			record.start({
			   sampleRate: 44100,
			   verbose: true
		   })
		   .pipe(toRecognizeRequest)
		   .pipe(call);

		});
Example #5
0
var record = require('node-record-lpcm16'),
    fs     = require('fs');

var file = fs.createWriteStream('test.wav', { encoding: 'binary' });

record.start({
  sampleRate : 16000,
  threshold : 0.5,
  verbose : true
})
.pipe(file);