heymenn commited on
Commit
7d204ba
1 Parent(s): 9a0115c

Update excel_chat.py

Browse files
Files changed (1) hide show
  1. excel_chat.py +142 -76
excel_chat.py CHANGED
@@ -9,7 +9,8 @@ import anthropic
9
  from users_management import update_json, users
10
  from code_df_custom import load_excel
11
  import zipfile
12
- from openai import OpenAI
 
13
 
14
  #users = ['maksG', 'AlmaA', 'YchK']
15
 
@@ -34,88 +35,153 @@ def ask_llm(query, user_input, client_index, user, keys):
34
  }]
35
  }]
36
 
37
- if "Mistral" in client_index:
38
- client = MistralClient(api_key=os.environ[user['api_keys']['mistral']])
39
- model_map = {
40
- "Mistral Tiny": "mistral-tiny",
41
- "Mistral Small": "mistral-small-latest",
42
- "Mistral Medium": "mistral-medium",
43
- }
44
- chat_completion = client.chat(messages=messages, model=model_map[client_index])
45
-
46
- elif "Claude" in client_index:
47
- client = anthropic.Anthropic(api_key=os.environ[user['api_keys']['claude']])
48
- model_map = {
49
- "Claude Sonnet": "claude-3-sonnet-20240229",
50
- "Claude Opus": "claude-3-opus-20240229",
51
- }
52
- response = client.messages.create(
53
- model=model_map[client_index],
54
- max_tokens=350,
55
- temperature=0,
56
- system=systemC,
57
- messages=messageC
58
- )
59
- return response.content[0].text
60
-
61
- elif "GPT 4o" in client_index:
62
- client = OpenAI(api_key=os.environ["OPENAI_YCHK"])
63
- response = client.chat.completions.create(
64
- model="gpt-4o",
65
- messages=messageC
66
- )
67
- return response.choices[0][message][content].text
68
-
69
- elif "Perplexity" in client_index:
70
- client = OpenAI(api_key=os.environ["PERPLEXITY_ALMAA"], base_url="https://api.perplexity.ai")
71
- model_map = {
72
- "Perplexity Llama3 70b": "llama-3-70b-instruct",
73
- "Perplexity Llama3 8b": "llama-3-8b-instruct",
74
- "Perplexity Llama3 Sonar Small": "llama-3-sonar-small-32k-chat",
75
- "Perplexity Llama3 Sonar Large": "llama-3-sonar-large-32k-chat"
76
- }
77
- response = client.chat.completions.create(
78
- model=model_map[client_index],
79
- messages=messageC
80
- )
81
-
82
- responseContent = str(response.choices[0].message.content)
83
- print(responseContent)
84
- return responseContent,keys
85
 
86
- elif "Groq" in client_index:
87
- try:
88
- client = Groq(api_key= os.getenv(keys[0]))
 
 
 
 
 
 
 
 
89
  model_map = {
90
- "Groq Mixtral": "mixtral-8x7b-32768",
91
- "Groq Llama3 70b": "llama3-70b-8192",
92
- "Groq Llama3 8b": "llama3-8b-8192"
93
  }
94
- chat_completion = client.chat.completions.create(
95
- messages=messages,
96
  model=model_map[client_index],
 
 
 
 
97
  )
98
- response = chat_completion.choices[0].message.content
99
- except Exception as e:
100
- print("Change key")
101
- if keys[0] == keys[1][0]:
102
- keys[0] = keys[1][1]
103
- elif keys[0] == keys[1][1]:
104
- keys[0] = keys[1][2]
105
- else:
106
- keys[0] = keys[1][0]
107
-
108
- client = Groq(api_key= os.getenv(keys[0]))
109
- chat_completion = client.chat.completions.create(
110
- messages=messages,
111
- model='llama3-8b-8192',
112
  )
113
- response = chat_completion.choices[0].message.content
114
- else:
115
- raise ValueError("Unsupported client index provided")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- # Return the response, handling the structure specific to Groq and Mistral clients.
118
- return chat_completion.choices[0].message.content,keys if client_index != "Claude" else chat_completion
119
 
120
 
121
 
 
9
  from users_management import update_json, users
10
  from code_df_custom import load_excel
11
  import zipfile
12
+ from openai import *
13
+ import time
14
 
15
  #users = ['maksG', 'AlmaA', 'YchK']
16
 
 
35
  }]
36
  }]
37
 
38
+ try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ if "Mistral" in client_index:
41
+ client = MistralClient(api_key=os.environ[user['api_keys']['mistral']])
42
+ model_map = {
43
+ "Mistral Tiny": "mistral-tiny",
44
+ "Mistral Small": "mistral-small-latest",
45
+ "Mistral Medium": "mistral-medium",
46
+ }
47
+ chat_completion = client.chat(messages=messages, model=model_map[client_index])
48
+
49
+ elif "Claude" in client_index:
50
+ client = anthropic.Anthropic(api_key=os.environ[user['api_keys']['claude']])
51
  model_map = {
52
+ "Claude Sonnet": "claude-3-sonnet-20240229",
53
+ "Claude Opus": "claude-3-opus-20240229",
 
54
  }
55
+ response = client.messages.create(
 
56
  model=model_map[client_index],
57
+ max_tokens=350,
58
+ temperature=0,
59
+ system=systemC,
60
+ messages=messageC
61
  )
62
+ return response.content[0].text
63
+
64
+ elif "GPT 4o" in client_index:
65
+ client = OpenAI(api_key=os.environ["OPENAI_YCHK"])
66
+ response = client.chat.completions.create(
67
+ model="gpt-4o",
68
+ messages=messageC
 
 
 
 
 
 
 
69
  )
70
+ return response.choices[0][message][content].text
71
+
72
+ elif "Perplexity" in client_index:
73
+ client = OpenAI(api_key=os.environ["PERPLEXITY_ALMAA"], base_url="https://api.perplexity.ai")
74
+ model_map = {
75
+ "Perplexity Llama3 70b": "llama-3-70b-instruct",
76
+ "Perplexity Llama3 8b": "llama-3-8b-instruct",
77
+ "Perplexity Llama3 Sonar Small": "llama-3-sonar-small-32k-chat",
78
+ "Perplexity Llama3 Sonar Large": "llama-3-sonar-large-32k-chat"
79
+ }
80
+
81
+ response = client.chat.completions.create(
82
+ model=model_map[client_index],
83
+ messages=messageC
84
+ )
85
+
86
+ responseContent = str(response.choices[0].message.content)
87
+ print(responseContent)
88
+ return responseContent,keys
89
+
90
+ elif "Groq" in client_index:
91
+ try:
92
+ client = Groq(api_key= os.getenv(keys[0]))
93
+ model_map = {
94
+ "Groq Mixtral": "mixtral-8x7b-32768",
95
+ "Groq Llama3 70b": "llama3-70b-8192",
96
+ "Groq Llama3 8b": "llama3-8b-8192"
97
+ }
98
+ chat_completion = client.chat.completions.create(
99
+ messages=messages,
100
+ model=model_map[client_index],
101
+ )
102
+ response = chat_completion.choices[0].message.content
103
+ except Exception as e:
104
+ print("Change key")
105
+ if keys[0] == keys[1][0]:
106
+ keys[0] = keys[1][1]
107
+ elif keys[0] == keys[1][1]:
108
+ keys[0] = keys[1][2]
109
+ else:
110
+ keys[0] = keys[1][0]
111
+
112
+ client = Groq(api_key= os.getenv(keys[0]))
113
+ chat_completion = client.chat.completions.create(
114
+ messages=messages,
115
+ model='llama3-8b-8192',
116
+ )
117
+ response = chat_completion.choices[0].message.content
118
+ else:
119
+ raise ValueError("Unsupported client index provided")
120
+
121
+
122
+ # Return the response, handling the structure specific to Groq and Mistral clients.
123
+ return chat_completion.choices[0].message.content,keys if client_index != "Claude" else chat_completion
124
+
125
+ except (BadRequestError) as e:
126
+
127
+ model_id = "meta-llama/Meta-Llama-3-70B-Instruct"
128
+ access_token = os.getenv("HUGGINGFACE_SPLITFILES_API_KEY")
129
+
130
+ tokenizer = AutoTokenizer.from_pretrained(
131
+ model_id,
132
+ padding_side="left",
133
+ token = access_token
134
+ )
135
+
136
+ user_input_tokenized = tokenizer.encode(user_input)
137
+ messages = []
138
+
139
+ while len(user_input_tokenized) > max_token:
140
+
141
+ user_input_divided = tokenizer.decode(user_input_tokenized[:max_token])
142
+ messages.append([
143
+ {
144
+ "role": "system",
145
+ "content": f"You are a helpful assistant. Only show your final response to the **User Query**! Do not provide any explanations or details: \n# User Query:\n{query}."
146
+ },
147
+ {
148
+ "role": "user",
149
+ "content": user_input_divided,
150
+ }])
151
+
152
+ user_input_tokenized = user_input_tokenized[max_token:]
153
+
154
+ responses = []
155
+
156
+ print(len(messages))
157
+ for msg in messages:
158
+
159
+ responses.append(client.chat.completions.create(
160
+ model=model_map["Perplexity Llama3 70b"],
161
+ messages=msg
162
+ ))
163
+
164
+ response = ""
165
+ for resp in responses:
166
+ response += " " + resp.choices[0].message.content
167
+
168
+ return response
169
+
170
+ except (RateLimitError) as e:
171
+
172
+ #if model_user in keys:
173
+ #Swap those keys
174
+ # return f()
175
+
176
+ #else:
177
+ #get eepy
178
+ time.sleep(60)
179
+ return ask_llm(query, user_input, client_index, user, keys)
180
+
181
+ except Exception as e:
182
+ print(e)
183
+ return e
184
 
 
 
185
 
186
 
187