fl399 commited on
Commit
d38645c
1 Parent(s): e9ecd38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -119,14 +119,15 @@ if torch.__version__ >= "2":
119
 
120
 
121
  ## FLAN-UL2
122
- TOKEN = os.environ.get("API_TOKEN", None)
123
  API_URL = "https://api-inference.huggingface.co/models/google/flan-ul2"
124
- headers = {"Authorization": f"Bearer {TOKEN}"}
125
  def query(payload):
126
  response = requests.post(API_URL, headers=headers, json=payload)
127
  return response.json()
128
 
129
  ## OpenAI models
 
130
  def set_openai_api_key(api_key):
131
  if api_key and api_key.startswith("sk-") and len(api_key) > 50:
132
  openai.api_key = api_key
@@ -186,7 +187,10 @@ def evaluate(
186
  elif llm == "flan-ul2":
187
  output = query({"inputs": prompt_0shot})[0]["generated_text"]
188
  elif llm == "gpt-3.5-turbo":
189
- output = get_response_from_openai(prompt_0shot)
 
 
 
190
  else:
191
  RuntimeError(f"No such LLM: {llm}")
192
 
@@ -242,14 +246,17 @@ with gr.Blocks(theme=theme) as demo:
242
 
243
  gr.Examples(
244
  examples=[
 
 
 
245
  ["deplot_case_study_m1.png", "What is the sum of numbers of Indonesia and Ireland? Remember to think step by step.", "alpaca-lora"],
246
- ["deplot_case_study_m1.png", "Summarise the chart for me please.", "alpaca-lora"],
247
  ["deplot_case_study_3.png", "By how much did China's growth rate drop? Think step by step.", "alpaca-lora"],
248
  ["deplot_case_study_4.png", "How many papers are submitted in 2020?", "alpaca-lora"],
249
  ["deplot_case_study_x2.png", "Summarise the chart for me please.", "alpaca-lora"],
250
  ["deplot_case_study_4.png", "How many papers are submitted in 2020?", "flan-ul2"],
251
- ["deplot_case_study_4.png", "acceptance rate = # accepted / #submitted . What is the acceptance rate of 2010?", "flan-ul2"],
252
- ["deplot_case_study_m1.png", "Summarise the chart for me please.", "flan-ul2"],
 
253
  ],
254
  cache_examples=True,
255
  inputs=[input_image, instruction, llm],
 
119
 
120
 
121
  ## FLAN-UL2
122
+ HF_TOKEN = os.environ.get("API_TOKEN", None)
123
  API_URL = "https://api-inference.huggingface.co/models/google/flan-ul2"
124
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
125
  def query(payload):
126
  response = requests.post(API_URL, headers=headers, json=payload)
127
  return response.json()
128
 
129
  ## OpenAI models
130
+ openai.api_key = os.environ.get("OPENAI_TOKEN", None)
131
  def set_openai_api_key(api_key):
132
  if api_key and api_key.startswith("sk-") and len(api_key) > 50:
133
  openai.api_key = api_key
 
187
  elif llm == "flan-ul2":
188
  output = query({"inputs": prompt_0shot})[0]["generated_text"]
189
  elif llm == "gpt-3.5-turbo":
190
+ try:
191
+ output = get_response_from_openai(prompt_0shot)
192
+ except:
193
+ output = "<Remember to input your OpenAI API key :)>"
194
  else:
195
  RuntimeError(f"No such LLM: {llm}")
196
 
 
246
 
247
  gr.Examples(
248
  examples=[
249
+ ["deplot_case_study_4.png", "What are the acceptance rates?", "gpt-3.5-turbo"],
250
+ ["deplot_case_study_4.png", "How does the acceptance rate change over the years?", "gpt-3.5-turbo"],
251
+ ["deplot_case_study_m1.png", "Summarise the chart for me please.", "gpt-3.5-turbo"],
252
  ["deplot_case_study_m1.png", "What is the sum of numbers of Indonesia and Ireland? Remember to think step by step.", "alpaca-lora"],
 
253
  ["deplot_case_study_3.png", "By how much did China's growth rate drop? Think step by step.", "alpaca-lora"],
254
  ["deplot_case_study_4.png", "How many papers are submitted in 2020?", "alpaca-lora"],
255
  ["deplot_case_study_x2.png", "Summarise the chart for me please.", "alpaca-lora"],
256
  ["deplot_case_study_4.png", "How many papers are submitted in 2020?", "flan-ul2"],
257
+ #["deplot_case_study_m1.png", "Summarise the chart for me please.", "alpaca-lora"],
258
+ #["deplot_case_study_4.png", "acceptance rate = # accepted / #submitted . What is the acceptance rate of 2010?", "flan-ul2"],
259
+ #["deplot_case_study_m1.png", "Summarise the chart for me please.", "flan-ul2"],
260
  ],
261
  cache_examples=True,
262
  inputs=[input_image, instruction, llm],