@@ -101,7 +101,10 @@ require "openai"
101
101
For a quick test you can pass your token directly to a new client:
102
102
103
103
``` ruby
104
- client = OpenAI ::Client .new (access_token: " access_token_goes_here" )
104
+ client = OpenAI ::Client .new (
105
+ access_token: " access_token_goes_here" ,
106
+ log_errors: true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production.
107
+ )
105
108
```
106
109
107
110
### With Config
@@ -110,8 +113,9 @@ For a more robust setup, you can configure the gem with your API keys, for examp
110
113
111
114
``` ruby
112
115
OpenAI .configure do |config |
113
- config.access_token = ENV .fetch(" OPENAI_ACCESS_TOKEN" )
114
- config.organization_id = ENV .fetch(" OPENAI_ORGANIZATION_ID" ) # Optional.
116
+ config.access_token = ENV .fetch(" OPENAI_ACCESS_TOKEN" )
117
+ config.organization_id = ENV .fetch(" OPENAI_ORGANIZATION_ID" ) # Optional.
118
+ config.log_errors = true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production.
115
119
end
116
120
```
117
121
@@ -342,12 +346,12 @@ puts response.dig("choices", 0, "message", "content")
342
346
343
347
#### JSON Mode
344
348
345
- You can set the response_format to ask for responses in JSON (at least for `gpt-3.5-turbo-1106`) :
349
+ You can set the response_format to ask for responses in JSON:
346
350
347
351
```ruby
348
352
response = client.chat(
349
353
parameters: {
350
- model: "gpt-3.5-turbo-1106 ",
354
+ model: "gpt-3.5-turbo",
351
355
response_format: { type: "json_object" },
352
356
messages: [{ role: "user", content: "Hello! Give me some JSON please."}],
353
357
temperature: 0.7,
@@ -367,7 +371,7 @@ You can stream it as well!
367
371
```ruby
368
372
response = client.chat(
369
373
parameters: {
370
- model: "gpt-3.5-turbo-1106 ",
374
+ model: "gpt-3.5-turbo",
371
375
messages: [{ role: "user", content: "Can I have some JSON please?"}],
372
376
response_format: { type: "json_object" },
373
377
stream: proc do |chunk, _bytesize|
@@ -564,7 +568,7 @@ These files are in JSONL format, with each line representing the output or error
564
568
"id": "chatcmpl-abc123",
565
569
"object": "chat.completion",
566
570
"created": 1677858242,
567
- "model": "gpt-3.5-turbo-0301 ",
571
+ "model": "gpt-3.5-turbo",
568
572
"choices": [
569
573
{
570
574
"index": 0,
@@ -660,16 +664,19 @@ To create a new assistant:
660
664
```ruby
661
665
response = client.assistants.create(
662
666
parameters: {
663
- model: "gpt-3.5-turbo-1106", # Retrieve via client.models.list. Assistants need ' gpt - 3.5 - turbo - 1106 ' or later.
667
+ model: "gpt-3.5-turbo",
664
668
name: "OpenAI-Ruby test assistant",
665
669
description: nil,
666
- instructions: "You are a helpful assistant for coding a OpenAI API client using the OpenAI- Ruby gem. ",
670
+ instructions: "You are a Ruby dev bot. When asked a question, write and run Ruby code to answer the question ",
667
671
tools: [
668
- { type: ' retrieval' }, # Allow access to files attached using file_ids
669
- { type: ' code_interpreter' }, # Allow access to Python code interpreter
672
+ { type: "code_interpreter" },
670
673
],
671
- "file_ids": ["file-123"], # See Files section above for how to upload files
672
- "metadata": { my_internal_version_id: ' 1.0 .0 ' }
674
+ tool_resources: {
675
+ "code_interpreter": {
676
+ "file_ids": [] # See Files section above for how to upload files
677
+ }
678
+ },
679
+ "metadata": { my_internal_version_id: "1.0.0" }
673
680
})
674
681
assistant_id = response["id"]
675
682
```
@@ -851,11 +858,7 @@ client.runs.list(thread_id: thread_id, parameters: { order: "asc", limit: 3 })
851
858
You can also create a thread and run in one call like this:
852
859
853
860
` ` ` ruby
854
- response = client.threads.create_and_run(
855
- parameters: {
856
- model: 'gpt-3.5-turbo',
857
- messages: [{ role: 'user', content: "What's deep learning?"}]
858
- })
861
+ response = client.runs.create_thread_and_run(parameters: { assistant_id: assistant_id })
859
862
run_id = response['id']
860
863
thread_id = response['thread_id']
861
864
` ` `
0 commit comments