Skip to content

Commit 21007f6

Browse files
authored
Merge pull request #35 from Duan-JM/main
Optimize window split & change code to adapt to latest openai package
2 parents 7e801af + 5b15f48 commit 21007f6

File tree

2 files changed

+72
-33
lines changed

2 files changed

+72
-33
lines changed

README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,15 @@ pip install openai
3636
```
3737
[Detailed Direction For Installation](https://github.com/CoderCookE/vim-chatgpt/issues/4#issuecomment-1704607737)
3838

39+
Additionally, for Azure gpt user:
40+
```
41+
let g:api_type = 'azure'
42+
let g:chat_gpt_key = 'your_azure_chatgpt_api'
43+
let g:azure_endpoint = 'your_azure_endpoint'
44+
let g:azure_deployment = 'your_azure_deployment'
45+
let g:azure_api_version = '2023-03-15-preview'
46+
```
47+
3948
## Customization
4049
In your `.vimrc` file you set the following options
4150

@@ -46,6 +55,7 @@ let g:chat_gpt_session_mode=0
4655
let g:chat_gpt_temperature = 0.7
4756
let g:chat_gpt_lang = 'Chinese'
4857
let g:chat_gpt_split_direction = 'vertical'
58+
let g:split_ratio=4
4959
```
5060

5161
- g:chat_gpt_max_tokens: This option allows you to set the maximum number of tokens (words or characters) that the ChatGPT API will return in its response. By default, it is set to 2000 tokens. You can adjust this value based on your needs and preferences.
@@ -55,6 +65,7 @@ let g:chat_gpt_split_direction = 'vertical'
5565
- g:chat_gpt_lang: Answer in certain langusage, such as Chinese,
5666
- g:chat_gpt_split_direction: Controls how to open splits, 'vertical' or 'horizontal'. Plugin opens horizontal splits by default.
5767
By customizing these options, you can tailor the ChatGPT Vim Plugin to better suit your specific needs and preferences.
68+
- g:split_ratio: Control the split window size. If set 4, the window size will be 1/4.
5869

5970
## Usage
6071

plugin/chatgpt.vim

Lines changed: 61 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -6,28 +6,6 @@ if !has('python3')
66
finish
77
endif
88

9-
" Add ChatGPT dependencies
10-
python3 << EOF
11-
import sys
12-
import vim
13-
import os
14-
15-
try:
16-
import openai
17-
except ImportError:
18-
print("Error: openai module not found. Please install with Pip and ensure equality of the versions given by :!python3 -V, and :python3 import sys; print(sys.version)")
19-
raise
20-
21-
def safe_vim_eval(expression):
22-
try:
23-
return vim.eval(expression)
24-
except vim.error:
25-
return None
26-
27-
openai.api_key = os.getenv('OPENAI_API_KEY') or safe_vim_eval('g:chat_gpt_key') or safe_vim_eval('g:openai_api_key')
28-
openai.proxy = os.getenv("OPENAI_PROXY")
29-
EOF
30-
319
" Set default values for Vim variables if they don't exist
3210
if !exists("g:chat_gpt_max_tokens")
3311
let g:chat_gpt_max_tokens = 2000
@@ -48,6 +26,56 @@ endif
4826
if !exists("g:chat_gpt_split_direction")
4927
let g:chat_gpt_split_direction = 'horizontal'
5028
endif
29+
if !exists("g:split_ratio")
30+
let g:split_ratio = 3
31+
endif
32+
33+
" Add ChatGPT dependencies
34+
python3 << EOF
35+
import sys
36+
import vim
37+
import os
38+
39+
try:
40+
from openai import AzureOpenAI, OpenAI
41+
except ImportError:
42+
print("Error: openai module not found. Please install with Pip and ensure equality of the versions given by :!python3 -V, and :python3 import sys; print(sys.version)")
43+
raise
44+
45+
def safe_vim_eval(expression):
46+
try:
47+
return vim.eval(expression)
48+
except vim.error:
49+
return None
50+
51+
def create_client():
52+
api_type = safe_vim_eval('g:api_type')
53+
api_key = os.getenv('OPENAI_API_KEY') or safe_vim_eval('g:chat_gpt_key') or safe_vim_eval('g:openai_api_key')
54+
openai_base_url = safe_vim_eval('g:openai_base_url')
55+
56+
if api_type == 'azure':
57+
azure_endpoint = safe_vim_eval('g:azure_endpoint')
58+
azure_api_version = safe_vim_eval('g:azure_api_version')
59+
azure_deployment = safe_vim_eval('g:azure_deployment')
60+
assert azure_endpoint and azure_api_version and azure_deployment, "azure_endpoint, azure_api_version and azure_deployment not set property, please check your settings in `vimrc` or `enviroment`."
61+
assert api_key, "api_key not set, please configure your `openai_api_key` in your `vimrc` or `enviroment`"
62+
client = AzureOpenAI(
63+
azure_endpoint=azure_endpoint,
64+
azure_deployment=azure_deployment,
65+
api_key=api_key,
66+
api_version=azure_api_version,
67+
)
68+
else:
69+
client = OpenAI(
70+
base_url=openai_base_url,
71+
api_key=api_key,
72+
)
73+
return client
74+
75+
client = create_client()
76+
77+
EOF
78+
5179

5280
let g:prompt_templates = {
5381
\ 'ask': '',
@@ -74,9 +102,9 @@ function! DisplayChatGPTResponse(response, finish_reason, chat_gpt_session_id)
74102

75103
if !bufexists(chat_gpt_session_id)
76104
if g:chat_gpt_split_direction ==# 'vertical'
77-
silent execute 'vnew '. chat_gpt_session_id
105+
silent execute winwidth(0)/g:split_ratio.'vnew '. chat_gpt_session_id
78106
else
79-
silent execute 'new '. chat_gpt_session_id
107+
silent execute winheight(0)/g:split_ratio.'new '. chat_gpt_session_id
80108
endif
81109
call setbufvar(chat_gpt_session_id, '&buftype', 'nofile')
82110
call setbufvar(chat_gpt_session_id, '&bufhidden', 'hide')
@@ -89,9 +117,9 @@ function! DisplayChatGPTResponse(response, finish_reason, chat_gpt_session_id)
89117

90118
if bufwinnr(chat_gpt_session_id) == -1
91119
if g:chat_gpt_split_direction ==# 'vertical'
92-
execute 'vsplit ' . chat_gpt_session_id
120+
execute winwidth(0)/g:split_ratio.'vsplit ' . chat_gpt_session_id
93121
else
94-
execute 'split ' . chat_gpt_session_id
122+
execute winheight(0)/g:split_ratio.'split ' . chat_gpt_session_id
95123
endif
96124
endif
97125

@@ -180,13 +208,13 @@ def chat_gpt(prompt):
180208
messages.insert(0, systemCtx)
181209

182210
try:
183-
response = openai.chat.completions.create(
184-
model=model,
185-
messages=messages,
186-
max_tokens=max_tokens,
187-
stop='',
188-
temperature=temperature,
189-
stream=True
211+
response = client.chat.completions.create(
212+
model=model,
213+
messages=messages,
214+
temperature=temperature,
215+
max_tokens=max_tokens,
216+
stop='',
217+
stream=True
190218
)
191219

192220
# Iterate through the response chunks

0 commit comments

Comments
 (0)