taesiri commited on
Commit
ed2d211
1 Parent(s): b135597

switching to gpt-3.5-turbo

Browse files
Files changed (2) hide show
  1. app.py +11 -11
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import os
2
- from langchain.llms import OpenAI
3
 
4
  os.system("pip install -U gradio")
5
 
@@ -80,8 +80,8 @@ session_token = os.environ.get("SessionToken")
80
 
81
  def generate_caption(object_list_str, api_key, temperature):
82
  query = f"You are an intelligent image captioner. I will hand you the objects and their position, and you should give me a detailed description for the photo. In this photo we have the following objects\n{object_list_str}"
83
- llm = OpenAI(
84
- model_name="text-davinci-003", openai_api_key=api_key, temperature=temperature
85
  )
86
 
87
  try:
@@ -142,13 +142,13 @@ def inference(img, vocabulary, api_key, temperature):
142
 
143
  with gr.Blocks() as demo:
144
  with gr.Column():
145
- gr.Markdown("# Image Captioning using LangChain (GPT3.5) 🦜️🔗")
146
  gr.Markdown(
147
- "Use Detic to detect objects in an image and then use GPT to describe the image."
148
  )
149
 
150
- with gr.Column():
151
- with gr.Row():
152
  inp = gr.Image(label="Input Image", type="filepath")
153
  with gr.Column():
154
  openai_api_key_textbox = gr.Textbox(
@@ -164,10 +164,10 @@ with gr.Blocks() as demo:
164
  value="lvis",
165
  )
166
 
167
- btn_detic = gr.Button("Run Detic+GPT3.5")
168
- with gr.Row():
169
- outviz = gr.Image(label="Visualization", type="pil")
170
- output_desc = gr.Textbox(label="Description Description", lines=5)
171
 
172
  btn_detic.click(
173
  fn=inference,
1
  import os
2
+ from langchain.llms import OpenAI, OpenAIChat
3
 
4
  os.system("pip install -U gradio")
5
 
80
 
81
  def generate_caption(object_list_str, api_key, temperature):
82
  query = f"You are an intelligent image captioner. I will hand you the objects and their position, and you should give me a detailed description for the photo. In this photo we have the following objects\n{object_list_str}"
83
+ llm = OpenAIChat(
84
+ model_name="gpt-3.5-turbo", openai_api_key=api_key, temperature=temperature
85
  )
86
 
87
  try:
142
 
143
  with gr.Blocks() as demo:
144
  with gr.Column():
145
+ gr.Markdown("# Image Captioning using Detic and ChatGPT with LangChain 🦜️🔗")
146
  gr.Markdown(
147
+ "Use Detic to detect objects in an image and then use `gpt-3.5-turbo` to describe the image."
148
  )
149
 
150
+ with gr.Row():
151
+ with gr.Column():
152
  inp = gr.Image(label="Input Image", type="filepath")
153
  with gr.Column():
154
  openai_api_key_textbox = gr.Textbox(
164
  value="lvis",
165
  )
166
 
167
+ btn_detic = gr.Button("Run Detic and ChatGPT")
168
+ with gr.Column():
169
+ output_desc = gr.Textbox(label="Description Description", lines=5)
170
+ outviz = gr.Image(label="Visualization", type="pil")
171
 
172
  btn_detic.click(
173
  fn=inference,
requirements.txt CHANGED
@@ -35,5 +35,6 @@ nltk
35
 
36
  git+https://github.com/openai/CLIP.git
37
 
 
38
  openai
39
  langchain
35
 
36
  git+https://github.com/openai/CLIP.git
37
 
38
+
39
  openai
40
  langchain