|
1 | 1 | import os
|
2 | 2 | import gradio as gr
|
3 | 3 | import openai
|
| 4 | + |
4 | 5 | import google.generativeai as palm
|
| 6 | +from langchain.chat_models import ChatOpenAI, AzureChatOpenAI |
| 7 | + |
| 8 | +from langchain.prompts.chat import ( |
| 9 | + ChatPromptTemplate, |
| 10 | + SystemMessagePromptTemplate, |
| 11 | + HumanMessagePromptTemplate, |
| 12 | +) |
| 13 | +from langchain.schema import HumanMessage, SystemMessage, BaseOutputParser |
| 14 | + |
| 15 | +from dotenv import load_dotenv |
| 16 | +load_dotenv() |
| 17 | + |
5 | 18 |
|
6 | 19 | llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
|
7 | 20 | TEST_MESSAGE = "Write an introductory paragraph to explain Generative AI to the reader of this content."
|
|
13 | 26 |
|
14 | 27 | temperature = 0.7
|
15 | 28 |
|
16 |
| -def openai_text_completion(openai_api_key: str, prompt: str, model: str): |
17 |
| - try: |
18 |
| - system_prompt: str = "Explain in detail to help student understand the concept.", |
19 |
| - assistant_prompt: str = None, |
20 |
| - messages = [ |
21 |
| - {"role": "user", "content": f"{prompt}"}, |
22 |
| - {"role": "system", "content": f"{system_prompt}"}, |
23 |
| - {"role": "assistant", "content": f"{assistant_prompt}"} |
24 |
| - ] |
25 |
| - openai.api_key = openai_api_key |
26 |
| - openai.api_version = '2020-11-07' |
27 |
| - completion = openai.ChatCompletion.create( |
28 |
| - model = model, |
29 |
| - messages = messages, |
30 |
| - temperature = temperature |
31 |
| - ) |
32 |
| - response = completion["choices"][0]["message"].content |
33 |
| - return "", response |
| 29 | +def compose_prompt(): |
| 30 | + template = ("You are a helpful assistant that answers this question.") |
| 31 | + system_message_prompt = SystemMessagePromptTemplate.from_template(template) |
| 32 | + human_template = "{text}" |
| 33 | + human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) |
| 34 | + chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) |
| 35 | + return chat_prompt |
| 36 | + |
| 37 | + |
| 38 | +def azure_openai_text_completion(prompt: str, |
| 39 | + model: str, |
| 40 | + api_key: str, |
| 41 | + azure_endpoint: str = None, |
| 42 | + deployment_name: str = None |
| 43 | + ): |
| 44 | + try: |
| 45 | + openai_api_base = f"https://{azure_endpoint}.openai.azure.com" |
| 46 | + chat_prompt = compose_prompt() |
| 47 | + chat = AzureChatOpenAI(openai_api_type = "azure", |
| 48 | + openai_api_key = api_key, |
| 49 | + openai_api_base = openai_api_base, |
| 50 | + deployment_name = deployment_name, |
| 51 | + model = model, |
| 52 | + temperature = temperature, |
| 53 | + openai_api_version="2023-05-15") |
| 54 | + llm_response = chat( |
| 55 | + chat_prompt.format_prompt( |
| 56 | + text=prompt |
| 57 | + ).to_messages() |
| 58 | + ) |
| 59 | + return "", llm_response.content |
34 | 60 | except Exception as exception:
|
35 | 61 | print(f"Exception Name: {type(exception).__name__}")
|
36 | 62 | print(exception)
|
37 | 63 | return f" openai_text_completion Error - {exception}", ""
|
38 | 64 |
|
39 |
| -def azure_openai_text_completion(azure_openai_api_key: str, azure_endpoint: str, azure_deployment_name: str, prompt: str, model: str): |
40 |
| - try: |
41 |
| - system_prompt: str = "Explain in detail to help student understand the concept.", |
42 |
| - assistant_prompt: str = None, |
43 |
| - messages = [ |
44 |
| - {"role": "user", "content": f"{prompt}"}, |
45 |
| - {"role": "system", "content": f"{system_prompt}"}, |
46 |
| - {"role": "assistant", "content": f"{assistant_prompt}"} |
47 |
| - ] |
48 |
| - openai.api_key = azure_openai_api_key |
49 |
| - openai.api_type = "azure" |
50 |
| - openai.api_version = "2023-05-15" |
51 |
| - openai.api_base = f"https://{azure_endpoint}.openai.azure.com" |
52 |
| - completion = openai.ChatCompletion.create( |
53 |
| - model = model, |
54 |
| - engine = azure_deployment_name, |
55 |
| - messages = messages, |
56 |
| - temperature = temperature |
57 |
| - ) |
58 |
| - response = completion["choices"][0]["message"].content |
59 |
| - return "", response |
| 65 | +def openai_text_completion(prompt: str, |
| 66 | + model: str, |
| 67 | + api_key: str |
| 68 | + ): |
| 69 | + try: |
| 70 | + chat = ChatOpenAI(openai_api_key=api_key, |
| 71 | + model=model, |
| 72 | + temperature=temperature) |
| 73 | + |
| 74 | + chat_prompt = compose_prompt() |
| 75 | + |
| 76 | + llm_response = chat( |
| 77 | + chat_prompt.format_prompt( |
| 78 | + text=prompt |
| 79 | + ).to_messages() |
| 80 | + ) |
| 81 | + return "", llm_response.content |
60 | 82 | except Exception as exception:
|
61 | 83 | print(f"Exception Name: {type(exception).__name__}")
|
62 | 84 | print(exception)
|
63 |
| - return f" azure_openai_text_completion Error - {exception}", "" |
| 85 | + return f" openai_text_completion Error - {exception}", "" |
| 86 | + |
64 | 87 |
|
65 | 88 |
|
66 | 89 | def palm_text_completion(google_palm_key: str, prompt: str, model: str):
|
@@ -102,10 +125,10 @@ def test_handler(optionSelection,
|
102 | 125 | google_model_name: str ="models/text-bison-001"):
|
103 | 126 | match optionSelection:
|
104 | 127 | case "OpenAI API":
|
105 |
| - message, response = openai_text_completion(openai_key, prompt,openai_model_name) |
| 128 | + message, response = openai_text_completion(prompt,openai_model_name, openai_key) |
106 | 129 | return message, response
|
107 | 130 | case "Azure OpenAI API":
|
108 |
| - message, response = azure_openai_text_completion(azure_openai_key, azure_openai_api_base, azure_openai_deployment_name, prompt,openai_model_name) |
| 131 | + message, response = azure_openai_text_completion(prompt,openai_model_name, azure_openai_key, azure_openai_api_base, azure_openai_deployment_name) |
109 | 132 | return message, response
|
110 | 133 | case "Google PaLM API":
|
111 | 134 | message, response = palm_text_completion(google_generative_api_key, prompt,google_model_name)
|
|
0 commit comments