amitpuri's picture
Update app.py
567cb2f
raw
history blame
No virus
6.37 kB
import os
import gradio as gr
import openai
import google.generativeai as palm
llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
TEST_MESSAGE = "Write an introductory paragraph to explain Generative AI to the reader of this content."
openai_models = ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo",
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "text-davinci-003",
"text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
google_palm_models = ["models/text-bison-001", "models/chat-bison-001","models/embedding-gecko-001"]
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
azure_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
google_palm_key = os.getenv("GOOGLE_PALM_AI_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
azure_openai_api_key = os.getenv("AZURE_OPENAI_KEY")
temperature = 0.7
def openai_text_completion(prompt: str, model: str):
try:
system_prompt: str = "Explain in detail to help student understand the concept.",
assistant_prompt: str = None,
messages = [
{"role": "user", "content": f"{prompt}"},
{"role": "system", "content": f"{system_prompt}"},
{"role": "assistant", "content": f"{assistant_prompt}"}
]
openai.api_key = openai_api_key
openai.api_version = '2020-11-07'
completion = openai.ChatCompletion.create(
model = model,
messages = messages,
temperature = temperature
)
response = completion["choices"][0]["message"].content
return "", response
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" openai_text_completion Error - {exception}", ""
def azure_openai_text_completion(prompt: str, model: str):
try:
system_prompt: str = "Explain in detail to help student understand the concept.",
assistant_prompt: str = None,
messages = [
{"role": "user", "content": f"{prompt}"},
{"role": "system", "content": f"{system_prompt}"},
{"role": "assistant", "content": f"{assistant_prompt}"}
]
openai.api_key = azure_openai_api_key
openai.api_type = "azure"
openai.api_version = "2023-05-15"
openai.api_base = f"https://{azure_endpoint}.openai.azure.com"
completion = openai.ChatCompletion.create(
model = model,
engine = azure_deployment_name,
messages = messages,
temperature = temperature
)
response = completion["choices"][0]["message"].content
return "", response
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" azure_openai_text_completion Error - {exception}", ""
def palm_text_completion(prompt: str, model: str):
try:
candidate_count = 1
top_k = 40
top_p = 0.95
max_output_tokens = 1024
palm.configure(api_key=google_palm_key)
defaults = {
'model': model,
'temperature': temperature,
'candidate_count': candidate_count,
'top_k': top_k,
'top_p': top_p,
'max_output_tokens': max_output_tokens,
'stop_sequences': [],
'safety_settings': [{"category":"HARM_CATEGORY_DEROGATORY","threshold":1},{"category":"HARM_CATEGORY_TOXICITY","threshold":1},{"category":"HARM_CATEGORY_VIOLENCE","threshold":2},{"category":"HARM_CATEGORY_SEXUAL","threshold":2},{"category":"HARM_CATEGORY_MEDICAL","threshold":2},{"category":"HARM_CATEGORY_DANGEROUS","threshold":2}],
}
response = palm.generate_text(
**defaults,
prompt=prompt
)
return "", response.result
except Exception as exception:
print(f"Exception Name: {type(exception).__name__}")
print(exception)
return f" palm_text_completion Error - {exception}", ""
def test_handler(optionSelection, prompt: str = TEST_MESSAGE, openai_model_name: str ="gpt-4", google_model_name: str ="models/text-bison-001"):
match optionSelection:
case "OpenAI API":
message, response = openai_text_completion(prompt,openai_model_name)
return message, response
case "Azure OpenAI API":
message, response = azure_openai_text_completion(prompt,openai_model_name)
return message, response
case "Google PaLM API":
message, response = palm_text_completion(prompt,google_model_name)
return message, response
case "Llama 2":
return f"{optionSelection} is not yet implemented!", ""
case _:
if optionSelection not in llm_api_options:
return ValueError("Invalid choice!"), ""
with gr.Blocks() as LLMDemoTabbedScreen:
with gr.Tab("Text-to-Text (Text Completion)"):
llm_options = gr.Radio(llm_api_options, label="Select one", info="Which service do you want to use?", value="OpenAI API")
with gr.Row():
with gr.Column():
test_string = gr.Textbox(label="Try String", value=TEST_MESSAGE, lines=2)
test_string_response = gr.Textbox(label="Response")
test_string_output_info = gr.Label(value="Output Info", label="Info")
test_button = gr.Button("Try it")
with gr.Tab("API Settings"):
with gr.Tab("Open AI"):
openai_model = gr.Dropdown(openai_models, value="gpt-4", label="Model", info="Select one, for Natural language")
with gr.Tab("Google PaLM API"):
google_model_name = gr.Dropdown(google_palm_models,
value="models/text-bison-001", label="Model", info="Select one, for Natural language")
test_button.click(
fn=test_handler,
inputs=[llm_options, test_string, openai_model, google_model_name],
outputs=[test_string_output_info, test_string_response]
)
if __name__ == "__main__":
LLMDemoTabbedScreen.launch()