Skip to content

Commit 4479cd4

Browse files
committed
STT: createRecognitionJob() now accepts .recognize()'s params; getRecognitionJobs() now uses standard signature; doc improvements
1 parent 3b961ec commit 4479cd4

File tree

2 files changed

+48
-13
lines changed

2 files changed

+48
-13
lines changed

speech-to-text/v1.js

Lines changed: 38 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ const http = require('http');
2626
const isStream = require('isstream');
2727
const requestFactory = require('../lib/requestwrapper');
2828
const RecognizeStream = require('./recognize_stream');
29-
const pkg = require('../package.json'); // todo: consider using env properties here instead (to enable webpack support without requiring a plugin)
29+
const pkg = require('../package.json');
3030
const util = require('util');
3131
const BaseService = require('../lib/base_service');
3232
const async = require('async');
@@ -38,7 +38,7 @@ const PARAMS_ALLOWED = [
3838
'word_confidence',
3939
'inactivity_timeout',
4040
'model',
41-
'content-type',
41+
'content-type', // this is accepted in querystring by the service, but methods here all accept content_type and then set a header
4242
'interim_results',
4343
'keywords',
4444
'keywords_threshold',
@@ -128,12 +128,13 @@ SpeechToTextV1.prototype.registerCallback = function(params, callback) {
128128
* How you learn the status and results of a job depends on the parameters you include with the job creation request.
129129
*
130130
* @param {object} params - The parameters
131-
* @param {Audio} params.audio - Audio to be recognized
131+
* @param {Stream} params.audio - Audio to be recognized
132132
* @param {string} params.content_type - The Content-type e.g. audio/l16; rate=48000
133133
* @param {string} params.callback_url - A URL to which callback notifications are to be sent
134134
* @param {string} [params.event] - recognitions.started|recognitions.completed|recognitions.failed|recognitions.completed_with_results
135135
* @param {string} [params.user_token] - The token allows the user to maintain an internal mapping between jobs and notification events
136136
* @param {number} [params.results_ttl] - time to alive of the job result
137+
* @param {*} [params.*] - all params that .recognize() accepts may also be passed to createRecognitionJob()
137138
* @param {Function} callback
138139
* @returns {ReadableStream|undefined}
139140
*/
@@ -156,7 +157,7 @@ SpeechToTextV1.prototype.createRecognitionJob = function(params, callback) {
156157
headers: {
157158
'Content-Type': params.content_type
158159
},
159-
qs: pick(params, ['callback_url', 'event', 'user_token', 'results_ttl']),
160+
qs: pick(params, ['callback_url', 'event', 'user_token', 'results_ttl'].concat(PARAMS_ALLOWED)),
160161
json: true
161162
},
162163
defaultOptions: this._options
@@ -175,10 +176,14 @@ SpeechToTextV1.prototype.createRecognitionJob = function(params, callback) {
175176
* The method also returns the creation and update times of each job, and, if a job was created with a callback URL
176177
* and a user token, the user token for the job.
177178
*
179+
* @param {Object} [params]
178180
* @param {Function} callback
179181
* @returns {ReadableStream|undefined}
180182
*/
181-
SpeechToTextV1.prototype.getRecognitionJobs = function(callback) {
183+
SpeechToTextV1.prototype.getRecognitionJobs = function(params, callback) {
184+
if (!callback && typeof params === 'function') {
185+
callback = params;
186+
}
182187
const parameters = {
183188
options: {
184189
method: 'GET',
@@ -251,8 +256,23 @@ SpeechToTextV1.prototype.deleteRecognitionJob = function(params, callback) {
251256
* Speech recognition for given audio using default model.
252257
*
253258
* @param {Object} params The parameters
254-
* @param {Audio} [params.audio] - Audio to be recognized
255-
* @param {String} [params.content_type] - Content-type
259+
* @param {Stream} params.audio - Audio to be recognized
260+
* @param {String} params.content_type - Content-type
261+
* @param {Boolean} [params.continuous],
262+
* @param {Number} [params.max_alternatives],
263+
* @param {Boolean} [params.timestamps],
264+
* @param {Boolean} [params.word_confidence],
265+
* @param {Number} [params.inactivity_timeout],
266+
* @param {String} [params.model],
267+
* @param {Boolean} [params.interim_results],
268+
* @param {Boolean} [params.keywords],
269+
* @param {Number} [params.keywords_threshold],
270+
* @param {Number} [params.word_alternatives_threshold],
271+
* @param {Boolean} [params.profanity_filter],
272+
* @param {Boolean} [params.smart_formatting],
273+
* @param {String} [params.customization_id],
274+
* @param {Boolean} [params.speaker_labels]
275+
* @param {function} callback
256276
*/
257277
SpeechToTextV1.prototype.recognize = function(params, callback) {
258278
const missingParams = helper.getMissingParams(params, ['audio', 'content_type']);
@@ -299,10 +319,12 @@ SpeechToTextV1.prototype.recognize = function(params, callback) {
299319
* Sets 'Transfer-Encoding': 'chunked' and prepare the connection to send
300320
* chunk data.
301321
*
322+
* @deprecated use createRecognizeStream instead
323+
*
302324
* @param {Object} params The parameters
303325
* @param {String} [params.content_type] - The Content-type e.g. audio/l16; rate=48000
304326
* @param {String} [params.session_id] - The session id
305-
* @deprecated use createRecognizeStream instead
327+
* @param {function} callback
306328
*/
307329
SpeechToTextV1.prototype.recognizeLive = function(params, callback) {
308330
const missingParams = helper.getMissingParams(params, ['session_id', 'content_type', 'cookie_session']);
@@ -360,10 +382,12 @@ SpeechToTextV1.prototype.recognizeLive = function(params, callback) {
360382
* This request has to be started before POST on recognize finishes,
361383
* otherwise it waits for the next recognition.
362384
*
385+
* @deprecated use createRecognizeStream instead
386+
*
363387
* @param {Object} params The parameters
364388
* @param {String} [params.session_id] - Session used in the recognition
365389
* @param {boolean} [params.interim_results] - If true, interim results will be returned. Default: false
366-
* @deprecated use createRecognizeStream instead
390+
* @param {Function} callback
367391
*/
368392
SpeechToTextV1.prototype.observeResult = function(params, callback) {
369393
const missingParams = helper.getMissingParams(params, ['session_id', 'cookie_session']);
@@ -415,9 +439,11 @@ SpeechToTextV1.prototype.observeResult = function(params, callback) {
415439
* This is the way to check if the session is ready to accept a new recognition task.
416440
* The returned state has to be 'initialized' to be able to do recognize POST.
417441
*
442+
* @deprecated use createRecognizeStream instead
443+
*
418444
* @param {Object} params The parameters
419445
* @param {String} [params.session_id] - Session used in the recognition
420-
* @deprecated use createRecognizeStream instead
446+
* @param {Function} callback
421447
*/
422448
SpeechToTextV1.prototype.getRecognizeStatus = function(params, callback) {
423449
const parameters = {
@@ -482,6 +508,7 @@ SpeechToTextV1.prototype.getModel = function(params, callback) {
482508
*
483509
* @param {Object} params The parameters
484510
* @param {string} params.model - The model to use during the session
511+
* @param {Function} callback
485512
*/
486513
SpeechToTextV1.prototype.createSession = function(params, callback) {
487514
const parameters = {
@@ -520,6 +547,7 @@ SpeechToTextV1.prototype.createSession = function(params, callback) {
520547
*
521548
* @param {Object} params The parameters
522549
* @param {String} params.session_id - Session id.
550+
* @param {Function} callback
523551
*/
524552
SpeechToTextV1.prototype.deleteSession = function(params, callback) {
525553
const parameters = {

test/integration/test.speech_to_text.js

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -399,15 +399,22 @@ describe('speech_to_text_integration', function() {
399399
};
400400

401401
it('registerCallback()', function(done) {
402-
const params = { callback_url: 'https://watson-test-resources.mybluemix.net/speech-to-text-async-callback-secure', user_secret: 'ThisIsMySecret' };
403-
speech_to_text.registerCallback(params, done);
402+
speech_to_text.registerCallback(
403+
{
404+
// if this fails, logs are available at https://watson-test-resources.mybluemix.net/speech-to-text-async/secure
405+
callback_url: 'https://watson-test-resources.mybluemix.net/speech-to-text-async/secure/callback',
406+
user_secret: 'ThisIsMySecret'
407+
},
408+
done
409+
);
404410
});
405411

406412
it('createRecognitionJob()', function(done) {
407413
const params = {
408414
audio: fs.createReadStream(__dirname + '/../resources/weather.ogg'),
409415
content_type: 'audio/ogg; codec=opus',
410-
callback_url: 'https://watson-test-resources.mybluemix.net/speech-to-text-async-callback-secure',
416+
// if this fails, logs are available at https://watson-test-resources.mybluemix.net/speech-to-text-async/secure
417+
callback_url: 'https://watson-test-resources.mybluemix.net/speech-to-text-async/secure/callback',
411418
user_token: 'Node.js SDK Integration Test at ' + new Date(),
412419
event: 'recognitions.completed',
413420
results_ttl: 1

0 commit comments

Comments
 (0)