Zeel commited on
Commit
8faaeb8
·
verified ·
1 Parent(s): 4f527ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -17,6 +17,7 @@ st.set_page_config(layout="wide")
17
  load_dotenv()
18
  Groq_Token = os.environ["GROQ_API_KEY"]
19
  hf_token = os.environ["HF_TOKEN"]
 
20
  models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it", "gemini-pro": "gemini-pro"}
21
 
22
  self_path = os.path.dirname(os.path.abspath(__file__))
@@ -205,7 +206,10 @@ if prompt:
205
  ran = False
206
  for i in range(1):
207
  print(f"Attempt {i+1}")
208
- llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0)
 
 
 
209
 
210
  df_check = pd.read_csv("Data.csv")
211
  df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
@@ -263,10 +267,13 @@ import uuid
263
  code = None
264
  error = None
265
  try:
266
- answer = llm.invoke(query)
 
 
 
267
  code = f"""
268
  {template.split("```python")[1].split("```")[0]}
269
- {answer.content.split("```python")[1].split("```")[0]}
270
  """
271
  # update variable `answer` when code is executed
272
  exec(code)
 
17
  load_dotenv()
18
  Groq_Token = os.environ["GROQ_API_KEY"]
19
  hf_token = os.environ["HF_TOKEN"]
20
+ gemini_token = os.environ["GEMINI_TOKEN"]
21
  models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it", "gemini-pro": "gemini-pro"}
22
 
23
  self_path = os.path.dirname(os.path.abspath(__file__))
 
206
  ran = False
207
  for i in range(1):
208
  print(f"Attempt {i+1}")
209
+ if model_name == "gemini-pro":
210
+ llm = GoogleGenerativeAI(model=model, google_api_key=os.getenv("GEMINI_TOKEN"), temperature=0)
211
+ else:
212
+ llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0)
213
 
214
  df_check = pd.read_csv("Data.csv")
215
  df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
 
267
  code = None
268
  error = None
269
  try:
270
+ if model_name == "gemini-pro":
271
+ answer = llm.invoke(query)
272
+ else:
273
+ answer = llm.invoke(query).content
274
  code = f"""
275
  {template.split("```python")[1].split("```")[0]}
276
+ {answer.split("```python")[1].split("```")[0]}
277
  """
278
  # update variable `answer` when code is executed
279
  exec(code)