Skip to content

Commit 5f1fab0

Browse files
authored
Merge pull request watson-developer-cloud#806 from watson-developer-cloud/patch-stt-and-discovery
Patch stt and discovery
2 parents 663a70f + bf2cd68 commit 5f1fab0

File tree

6 files changed

+33
-17
lines changed

6 files changed

+33
-17
lines changed

discovery/v1-generated.ts

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4784,6 +4784,12 @@ namespace DiscoveryV1 {
47844784
[propName: string]: any;
47854785
}
47864786

4787+
/** An object specifiying the concepts enrichment and related parameters. */
4788+
export interface NluEnrichmentConcepts {
4789+
/** The maximum number of concepts enrichments to extact from each instance of the specified field. */
4790+
limit?: number;
4791+
}
4792+
47874793
/** An object specifying the emotion detection enrichment and related parameters. */
47884794
export interface NluEnrichmentEmotion {
47894795
/** When `true`, emotion detection is performed on the entire field. */
@@ -4805,7 +4811,7 @@ namespace DiscoveryV1 {
48054811
/** When `true`, the types of mentions for each idetifieid entity is recorded. The default is `false`. */
48064812
mention_types?: boolean;
48074813
/** When `true`, a list of sentence locations for each instance of each identified entity is recorded. The default is `false`. */
4808-
sentence_location?: boolean;
4814+
sentence_locations?: boolean;
48094815
/** The enrichement model to use with entity extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, or the default public model `alchemy`. */
48104816
model?: string;
48114817
}
@@ -4826,6 +4832,8 @@ namespace DiscoveryV1 {
48264832
semantic_roles?: NluEnrichmentSemanticRoles;
48274833
/** An object specifying the relations enrichment and related parameters. */
48284834
relations?: NluEnrichmentRelations;
4835+
/** An object specifiying the concepts enrichment and related parameters. */
4836+
concepts?: NluEnrichmentConcepts;
48294837
}
48304838

48314839
/** An object specifying the Keyword enrichment and related parameters. */
@@ -5207,13 +5215,13 @@ namespace DiscoveryV1 {
52075215
/** An object defining a single tokenizaion rule. */
52085216
export interface TokenDictRule {
52095217
/** The string to tokenize. */
5210-
text?: string;
5218+
text: string;
52115219
/** Array of tokens that the `text` field is split into when found. */
5212-
tokens?: string[];
5220+
tokens: string[];
52135221
/** Array of tokens that represent the content of the `text` field in an alternate character set. */
52145222
readings?: string[];
52155223
/** The part of speech that the `text` string belongs to. For example `noun`. Custom parts of speech can be specified. */
5216-
part_of_speech?: string;
5224+
part_of_speech: string;
52175225
}
52185226

52195227
/** Object describing the current status of the tokenization dictionary. */

lib/recognize-stream.ts

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ const QUERY_PARAMS_ALLOWED = [
4343
'model',
4444
'X-Watson-Learning-Opt-Out',
4545
'watson-token',
46+
'language_customization_id',
4647
'customization_id',
4748
'acoustic_customization_id'
4849
];
@@ -111,7 +112,8 @@ class RecognizeStream extends Duplex {
111112
* @param {Boolean} [options.objectMode=false] - alias for options.readableObjectMode
112113
* @param {Number} [options.X-Watson-Learning-Opt-Out=false] - set to true to opt-out of allowing Watson to use this request to improve it's services
113114
* @param {Boolean} [options.smart_formatting=false] - formats numeric values such as dates, times, currency, etc.
114-
* @param {String} [options.customization_id] - Customization ID
115+
* @param {String} [options.language_customization_id] - Language customization ID
116+
* @param {String} [options.customization_id] - Customization ID (DEPRECATED)
115117
* @param {String} [options.acoustic_customization_id] - Acoustic customization ID
116118
* @param {IamTokenManagerV1} [options.token_manager] - Token manager for authenticating with IAM
117119
* @param {string} [options.base_model_version] - The version of the specified base model that is to be used with recognition request or, for the **Create a session** method, with the new session.
@@ -201,8 +203,14 @@ class RecognizeStream extends Duplex {
201203
options['X-Watson-Learning-Opt-Out'] = options['X-WDC-PL-OPT-OUT'];
202204
}
203205

206+
// compatibility code for the deprecated param, customization_id
207+
if (options.customization_id && !options.language_customization_id) {
208+
options.language_customization_id = options.customization_id;
209+
delete options.customization_id;
210+
}
211+
204212
const queryParams = extend(
205-
'customization_id' in options
213+
'language_customization_id' in options
206214
? pick(options, QUERY_PARAMS_ALLOWED)
207215
: { model: 'en-US_BroadbandModel' },
208216
pick(options, QUERY_PARAMS_ALLOWED)

speech-to-text/v1-generated.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ class SpeechToTextV1 extends BaseService {
206206
*
207207
* @param {Object} params - The parameters to send to the service.
208208
* @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe.
209-
* @param {string} params.content_type - The type of the input.
209+
* @param {string} [params.content_type] - The type of the input.
210210
* @param {string} [params.model] - The identifier of the model that is to be used for the recognition request.
211211
* @param {string} [params.language_customization_id] - The customization ID (GUID) of a custom language model that is
212212
* to be used with the recognition request. The base model of the specified custom language model must match the model
@@ -292,7 +292,7 @@ class SpeechToTextV1 extends BaseService {
292292
public recognize(params: SpeechToTextV1.RecognizeParams, callback?: SpeechToTextV1.Callback<SpeechToTextV1.SpeechRecognitionResults>): NodeJS.ReadableStream | void {
293293
const _params = extend({}, params);
294294
const _callback = (callback) ? callback : () => { /* noop */ };
295-
const requiredParams = ['audio', 'content_type'];
295+
const requiredParams = ['audio'];
296296

297297
const missingParams = getMissingParams(_params, requiredParams);
298298
if (missingParams) {
@@ -509,7 +509,7 @@ class SpeechToTextV1 extends BaseService {
509509
*
510510
* @param {Object} params - The parameters to send to the service.
511511
* @param {NodeJS.ReadableStream|FileObject|Buffer} params.audio - The audio to transcribe.
512-
* @param {string} params.content_type - The type of the input.
512+
* @param {string} [params.content_type] - The type of the input.
513513
* @param {string} [params.model] - The identifier of the model that is to be used for the recognition request.
514514
* @param {string} [params.callback_url] - A URL to which callback notifications are to be sent. The URL must already
515515
* be successfully white-listed by using the **Register a callback** method. You can include the same callback URL
@@ -623,7 +623,7 @@ class SpeechToTextV1 extends BaseService {
623623
public createJob(params: SpeechToTextV1.CreateJobParams, callback?: SpeechToTextV1.Callback<SpeechToTextV1.RecognitionJob>): NodeJS.ReadableStream | void {
624624
const _params = extend({}, params);
625625
const _callback = (callback) ? callback : () => { /* noop */ };
626-
const requiredParams = ['audio', 'content_type'];
626+
const requiredParams = ['audio'];
627627

628628
const missingParams = getMissingParams(_params, requiredParams);
629629
if (missingParams) {
@@ -2748,7 +2748,7 @@ namespace SpeechToTextV1 {
27482748
/** The audio to transcribe. */
27492749
audio: NodeJS.ReadableStream|FileObject|Buffer;
27502750
/** The type of the input. */
2751-
content_type: RecognizeConstants.ContentType | string;
2751+
content_type?: RecognizeConstants.ContentType | string;
27522752
/** The identifier of the model that is to be used for the recognition request. */
27532753
model?: RecognizeConstants.Model | string;
27542754
/** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */
@@ -2842,7 +2842,7 @@ namespace SpeechToTextV1 {
28422842
/** The audio to transcribe. */
28432843
audio: NodeJS.ReadableStream|FileObject|Buffer;
28442844
/** The type of the input. */
2845-
content_type: CreateJobConstants.ContentType | string;
2845+
content_type?: CreateJobConstants.ContentType | string;
28462846
/** The identifier of the model that is to be used for the recognition request. */
28472847
model?: CreateJobConstants.Model | string;
28482848
/** A URL to which callback notifications are to be sent. The URL must already be successfully white-listed by using the **Register a callback** method. You can include the same callback URL with any number of job creation requests. Omit the parameter to poll the service for job completion and results. Use the `user_token` parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. */

speech-to-text/v1.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -488,7 +488,7 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 {
488488
*
489489
* @param {Object} params The parameters
490490
* @param {Stream} params.audio - Audio to be recognized
491-
* @param {String} params.content_type - Content-type
491+
* @param {String} [params.content_type] - Content-type
492492
* @param {String} [params.base_model_version]
493493
* @param {Number} [params.max_alternatives]
494494
* @param {Boolean} [params.timestamps]
@@ -501,14 +501,15 @@ class SpeechToTextV1 extends GeneratedSpeechToTextV1 {
501501
* @param {Number} [params.word_alternatives_threshold]
502502
* @param {Boolean} [params.profanity_filter]
503503
* @param {Boolean} [params.smart_formatting]
504+
* @param {String} [params.language_customization_id]
504505
* @param {String} [params.customization_id]
505506
* @param {String} [params.acoustic_customization_id]
506507
* @param {Number} [params.customization_weight]
507508
* @param {Boolean} [params.speaker_labels]
508509
* @param {function} callback
509510
*/
510511
recognize(params, callback) {
511-
const missingParams = getMissingParams(params, ['audio', 'content_type']);
512+
const missingParams = getMissingParams(params, ['audio']);
512513
if (missingParams) {
513514
callback(missingParams);
514515
return;

test/unit/speech-helpers.test.js

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ describe('speech_to_text', function() {
3434
it('should check no parameters provided', function() {
3535
speech_to_text.recognize({}, missingParameter);
3636
speech_to_text.recognize(null, missingParameter);
37-
speech_to_text.recognize({ audio: 'foo' }, missingParameter);
3837
speech_to_text.recognize({ content_type: 'bar' }, missingParameter);
3938
speech_to_text.recognize({ continuous: 'false' }, missingParameter);
4039
});

test/unit/speech-to-text.v1.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ describe('recognize', () => {
261261

262262
test('should enforce required parameters', done => {
263263
// required parameters for this method
264-
const requiredParams = ['audio', 'content_type'];
264+
const requiredParams = ['audio'];
265265

266266
speech_to_text.recognize({}, err => {
267267
checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock);
@@ -511,7 +511,7 @@ describe('createJob', () => {
511511

512512
test('should enforce required parameters', done => {
513513
// required parameters for this method
514-
const requiredParams = ['audio', 'content_type'];
514+
const requiredParams = ['audio'];
515515

516516
speech_to_text.createJob({}, err => {
517517
checkRequiredParamsHandling(requiredParams, err, missingParamsMock, createRequestMock);

0 commit comments

Comments
 (0)