@@ -84,16 +84,16 @@ class Gen {
84
84
*
85
85
* @returns {Promise<string|Buffer> } - The generated image, either as base64 string or Buffer.
86
86
*/
87
- static async generate_image_from_desc ( promptString , openaiKey , imageApiKey , is_base64 = true ,
88
- provider = SupportedImageModels . STABILITY , customProxyHelper = null ) {
87
+ static async generate_image_from_desc ( promptString , openaiKey , imageApiKey , is_base64 = true , width = 1024 ,
88
+ height = 1024 , provider = SupportedImageModels . STABILITY , customProxyHelper = null ) {
89
89
90
90
const imageDescription = await Gen . getImageDescription ( promptString , openaiKey , customProxyHelper ) ;
91
91
const imgModel = new RemoteImageModel ( imageApiKey , provider ) ;
92
92
const images = await imgModel . generateImages (
93
93
new ImageModelInput ( { prompt : imageDescription ,
94
94
numberOfImages : 1 ,
95
- width : 512 ,
96
- height : 512 ,
95
+ width : width ,
96
+ height : height ,
97
97
responseFormat : 'b64_json' } ) ) ;
98
98
if ( is_base64 ) {
99
99
return images [ 0 ] ;
@@ -121,6 +121,8 @@ class Gen {
121
121
tokeSize = 8000 ;
122
122
} else if ( model_name . includes ( 'gpt-4' ) ) {
123
123
tokeSize = 4000 ;
124
+ } else if ( model_name . includes ( 'gpt-4o' ) ) {
125
+ tokeSize = 20000 ;
124
126
}
125
127
126
128
// prepare the bot
@@ -130,7 +132,7 @@ class Gen {
130
132
// set the user message with the template
131
133
input . addUserMessage ( promptTemp . format ( { 'text' : text } ) ) ;
132
134
const responses = await chatbot . chat ( input ) ;
133
- return JSON . parse ( responses [ 0 ] . trim ( ) ) ;
135
+ return JSON . parse ( responses [ 0 ] . trim ( ) . replace ( '```json' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) ) ;
134
136
}
135
137
136
138
static async save_html_page ( text , folder , file_name , openaiKey , model_name = 'gpt-4' , customProxyHelper = null ) {
@@ -158,15 +160,18 @@ class Gen {
158
160
tokeSize = 8000 ;
159
161
} else if ( model_name . includes ( 'gpt-4' ) ) {
160
162
tokeSize = 3900 ;
163
+ } else if ( model_name . includes ( 'gpt-4o' ) ) {
164
+ tokeSize = 20000 ;
161
165
}
166
+
162
167
const chatbot = new Chatbot ( openaiKey , SupportedChatModels . OPENAI , customProxyHelper ) ;
163
168
const input = new ChatGPTInput ( 'Generate HTML graphs from the CSV data and ensure the response is a valid JSON to parse with full HTML code.' ,
164
169
{ maxTokens : tokeSize , model : model_name , temperature :0.3 } ) ;
165
170
// set the user message with the template
166
171
input . addUserMessage ( promptTemp . format ( { 'count' : num_graphs , 'topic' : topic , 'text' : csvStrData } ) ) ;
167
172
const responses = await chatbot . chat ( input ) ;
168
173
169
- return JSON . parse ( responses [ 0 ] . trim ( ) ) [ 0 ] ;
174
+ return JSON . parse ( responses [ 0 ] . trim ( ) . replace ( '```json' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) ) [ 0 ] ;
170
175
}
171
176
172
177
0 commit comments