Skip to content

Commit de8199f

Browse files
committed
Update
1 parent cb64bb1 commit de8199f

File tree

2 files changed

+66
-42
lines changed

2 files changed

+66
-42
lines changed

.vscode/settings.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{}

gradio-app.py

Lines changed: 65 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,20 @@
11
import os
22
import gradio as gr
33
import openai
4+
45
import google.generativeai as palm
6+
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
7+
8+
from langchain.prompts.chat import (
9+
ChatPromptTemplate,
10+
SystemMessagePromptTemplate,
11+
HumanMessagePromptTemplate,
12+
)
13+
from langchain.schema import HumanMessage, SystemMessage, BaseOutputParser
14+
15+
from dotenv import load_dotenv
16+
load_dotenv()
17+
518

619
llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
720
TEST_MESSAGE = "Write an introductory paragraph to explain Generative AI to the reader of this content."
@@ -13,54 +26,64 @@
1326

1427
temperature = 0.7
1528

16-
def openai_text_completion(openai_api_key: str, prompt: str, model: str):
17-
try:
18-
system_prompt: str = "Explain in detail to help student understand the concept.",
19-
assistant_prompt: str = None,
20-
messages = [
21-
{"role": "user", "content": f"{prompt}"},
22-
{"role": "system", "content": f"{system_prompt}"},
23-
{"role": "assistant", "content": f"{assistant_prompt}"}
24-
]
25-
openai.api_key = openai_api_key
26-
openai.api_version = '2020-11-07'
27-
completion = openai.ChatCompletion.create(
28-
model = model,
29-
messages = messages,
30-
temperature = temperature
31-
)
32-
response = completion["choices"][0]["message"].content
33-
return "", response
29+
def compose_prompt():
30+
template = ("You are a helpful assistant that answers this question.")
31+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
32+
human_template = "{text}"
33+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
34+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
35+
return chat_prompt
36+
37+
38+
def azure_openai_text_completion(prompt: str,
39+
model: str,
40+
api_key: str,
41+
azure_endpoint: str = None,
42+
deployment_name: str = None
43+
):
44+
try:
45+
openai_api_base = f"https://{azure_endpoint}.openai.azure.com"
46+
chat_prompt = compose_prompt()
47+
chat = AzureChatOpenAI(openai_api_type = "azure",
48+
openai_api_key = api_key,
49+
openai_api_base = openai_api_base,
50+
deployment_name = deployment_name,
51+
model = model,
52+
temperature = temperature,
53+
openai_api_version="2023-05-15")
54+
llm_response = chat(
55+
chat_prompt.format_prompt(
56+
text=prompt
57+
).to_messages()
58+
)
59+
return "", llm_response.content
3460
except Exception as exception:
3561
print(f"Exception Name: {type(exception).__name__}")
3662
print(exception)
3763
return f" openai_text_completion Error - {exception}", ""
3864

39-
def azure_openai_text_completion(azure_openai_api_key: str, azure_endpoint: str, azure_deployment_name: str, prompt: str, model: str):
40-
try:
41-
system_prompt: str = "Explain in detail to help student understand the concept.",
42-
assistant_prompt: str = None,
43-
messages = [
44-
{"role": "user", "content": f"{prompt}"},
45-
{"role": "system", "content": f"{system_prompt}"},
46-
{"role": "assistant", "content": f"{assistant_prompt}"}
47-
]
48-
openai.api_key = azure_openai_api_key
49-
openai.api_type = "azure"
50-
openai.api_version = "2023-05-15"
51-
openai.api_base = f"https://{azure_endpoint}.openai.azure.com"
52-
completion = openai.ChatCompletion.create(
53-
model = model,
54-
engine = azure_deployment_name,
55-
messages = messages,
56-
temperature = temperature
57-
)
58-
response = completion["choices"][0]["message"].content
59-
return "", response
65+
def openai_text_completion(prompt: str,
66+
model: str,
67+
api_key: str
68+
):
69+
try:
70+
chat = ChatOpenAI(openai_api_key=api_key,
71+
model=model,
72+
temperature=temperature)
73+
74+
chat_prompt = compose_prompt()
75+
76+
llm_response = chat(
77+
chat_prompt.format_prompt(
78+
text=prompt
79+
).to_messages()
80+
)
81+
return "", llm_response.content
6082
except Exception as exception:
6183
print(f"Exception Name: {type(exception).__name__}")
6284
print(exception)
63-
return f" azure_openai_text_completion Error - {exception}", ""
85+
return f" openai_text_completion Error - {exception}", ""
86+
6487

6588

6689
def palm_text_completion(google_palm_key: str, prompt: str, model: str):
@@ -102,10 +125,10 @@ def test_handler(optionSelection,
102125
google_model_name: str ="models/text-bison-001"):
103126
match optionSelection:
104127
case "OpenAI API":
105-
message, response = openai_text_completion(openai_key, prompt,openai_model_name)
128+
message, response = openai_text_completion(prompt,openai_model_name, openai_key)
106129
return message, response
107130
case "Azure OpenAI API":
108-
message, response = azure_openai_text_completion(azure_openai_key, azure_openai_api_base, azure_openai_deployment_name, prompt,openai_model_name)
131+
message, response = azure_openai_text_completion(prompt,openai_model_name, azure_openai_key, azure_openai_api_base, azure_openai_deployment_name)
109132
return message, response
110133
case "Google PaLM API":
111134
message, response = palm_text_completion(google_generative_api_key, prompt,google_model_name)

0 commit comments

Comments
 (0)