File size: 9,812 Bytes
070c576
 
 
 
 
 
6449689
f9c03d9
4425add
c8197d4
5b23034
7d204ba
 
92d0a3c
4d5131c
6449689
d6d1995
6449689
410e03d
 
 
 
4011ea8
 
327828d
4011ea8
 
410e03d
 
 
 
 
 
327828d
410e03d
 
6449689
7d204ba
031a5a3
7d204ba
 
 
 
 
 
 
 
 
 
 
8cfa293
7d204ba
 
8cfa293
7d204ba
8cfa293
7d204ba
 
 
 
d6d1995
7d204ba
 
 
 
 
 
 
1b13e5d
7d204ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
abb77ac
410e03d
 
 
6449689
 
 
 
 
 
 
 
 
 
 
 
 
070c576
69bb9c0
d6d1995
fcf5842
d6d1995
2acbe1f
d6d1995
ac2abcc
6449689
070c576
 
a747dde
ac2abcc
378fa83
 
 
 
 
6449689
070c576
6449689
070c576
7e62d93
c68dc56
6449689
 
4011ea8
d32c258
 
 
f0fabc9
071f32f
d32c258
 
7e62d93
4011ea8
071f32f
30eb7f0
 
c68dc56
c70ee77
c68dc56
4e2a30f
c68dc56
071f32f
7e62d93
30eb7f0
070c576
6449689
071f32f
13ff5b8
 
 
 
 
267ff77
070c576
4011ea8
 
070c576
c8197d4
 
853deb7
c8197d4
070c576
c8197d4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
import gradio as gr
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
import os
import pandas as pd
import numpy as np
from groq import Groq
import anthropic
from users_management import update_json, users
from code_df_custom import load_excel
import zipfile
from openai import *
import time

#users = ['maksG', 'AlmaA', 'YchK']

def ask_llm(query, user_input, client_index, user, keys):
    messages = [
        {
            "role": "system",
            "content": f"You are a helpful assistant. Only show your final response to the **User Query**! Do not provide any explanations or details: \n# User Query:\n{query}."
        },
        {
            "role": "user",
            "content": user_input,
        }
    ]
    
    systemC = messages[0]["content"]
    messageC = [{
        "role": "user",
        "content": [{
            "type": "text",
            "text": user_input
        }]
    }]

    try: 
        
        if "Mistral" in client_index:
            client = MistralClient(api_key=os.environ[user['api_keys']['mistral']])
            model_map = {
                "Mistral Tiny": "mistral-tiny",
                "Mistral Small": "mistral-small-latest",
                "Mistral Medium": "mistral-medium",
            }
            chat_completion = client.chat(messages=messages, model=model_map[client_index])
    
        elif "Claude" in client_index:
            client = anthropic.Anthropic(api_key=os.environ[user['api_keys']['claude']])
            model_map = {
                "Claude Sonnet": "claude-3-sonnet-20240229",
                "Claude Opus": "claude-3-opus-20240229",
            }
            response = client.messages.create(
                model=model_map[client_index],
                max_tokens=350,
                temperature=0,
                system=systemC,
                messages=messageC
            )
            return response.content[0].text
    
        elif "GPT 4o" in client_index:
            client = OpenAI(api_key=os.environ["OPENAI_YCHK"])
            response = client.chat.completions.create(
                model="gpt-4o",
                messages=messageC
            )
            return response.choices[0][message][content].text
    
        elif "Perplexity" in client_index:
            client = OpenAI(api_key=os.environ["PERPLEXITY_ALMAA"], base_url="https://api.perplexity.ai")
            model_map = {
                    "Perplexity Llama3 70b": "llama-3-70b-instruct",
                    "Perplexity Llama3 8b": "llama-3-8b-instruct",
                    "Perplexity Llama3 Sonar Small": "llama-3-sonar-small-32k-chat",
                    "Perplexity Llama3 Sonar Large": "llama-3-sonar-large-32k-chat"
            }
            
            response = client.chat.completions.create(
                model=model_map[client_index],
                messages=messageC
            )
    
            responseContent = str(response.choices[0].message.content)
            print(responseContent)        
            return responseContent,keys
            
        elif "Groq" in client_index:
            try:
                client = Groq(api_key= os.getenv(keys[0]))
                model_map = {
                    "Groq Mixtral": "mixtral-8x7b-32768",
                    "Groq Llama3 70b": "llama3-70b-8192",
                    "Groq Llama3 8b": "llama3-8b-8192"
                }
                chat_completion = client.chat.completions.create(
                    messages=messages,
                    model=model_map[client_index],
                )
                response = chat_completion.choices[0].message.content
            except Exception as e:
                print("Change key")
                if keys[0] == keys[1][0]:
                    keys[0] = keys[1][1]
                elif keys[0] == keys[1][1]:
                    keys[0] = keys[1][2]
                else:
                    keys[0] = keys[1][0]
                    
                client = Groq(api_key= os.getenv(keys[0]))
                chat_completion = client.chat.completions.create(
                    messages=messages,
                    model='llama3-8b-8192',
                )
                response = chat_completion.choices[0].message.content
        else:
            raise ValueError("Unsupported client index provided")

    
        # Return the response, handling the structure specific to Groq and Mistral clients.
        return chat_completion.choices[0].message.content,keys if client_index != "Claude" else chat_completion
        
    except (BadRequestError) as e:
        
        model_id = "meta-llama/Meta-Llama-3-70B-Instruct"
        access_token = os.getenv("HUGGINGFACE_SPLITFILES_API_KEY")

        tokenizer = AutoTokenizer.from_pretrained(
            model_id,
            padding_side="left",
            token = access_token
        )

        user_input_tokenized = tokenizer.encode(user_input)
        messages = []

        while len(user_input_tokenized) > max_token:

            user_input_divided = tokenizer.decode(user_input_tokenized[:max_token])
            messages.append([
            {
                "role": "system",
                "content": f"You are a helpful assistant. Only show your final response to the **User Query**! Do not provide any explanations or details: \n# User Query:\n{query}."
            },
            {
                "role": "user",
                "content": user_input_divided,
            }])

            user_input_tokenized = user_input_tokenized[max_token:]

        responses = []

        print(len(messages))
        for msg in messages:

            responses.append(client.chat.completions.create(
              model=model_map["Perplexity Llama3 70b"],
              messages=msg
            ))

        response = ""
        for resp in responses:
            response += " " + resp.choices[0].message.content

        return response

    except (RateLimitError) as e:

        #if model_user in keys:
            #Swap those keys
        #    return f()

        #else:
            #get eepy
        time.sleep(60)
        return ask_llm(query, user_input, client_index, user, keys)

    except Exception as e:
        print(e)
        return "unhandled error",keys if client_index != "Claude" else chat_completion





def filter_df(df, column_name, keywords):
    if len(keywords)>0:
        if column_name in df.columns:
            contains_keyword = lambda x: any(keyword.lower() in (x.lower() if type(x)==str else '') for keyword in keywords)
            filtered_df = df[df[column_name].apply(contains_keyword)]
        else:
            contains_keyword = lambda row: any(keyword.lower() in (str(cell).lower() if isinstance(cell, str) else '') for keyword in keywords for cell in row)
            filtered_df = df[df.apply(contains_keyword, axis=1)]
    else:
        filtered_df = df
    return filtered_df

def chat_with_mistral(source_cols, dest_col, prompt, excel_file, url, search_col, keywords, client, user):
    # API Keys for Groq :
    KEYS = ['GROQ_API_KEY1', 'GROQ_API_KEY2', 'GROQ_API_KEY3']
    GroqKey = KEYS[0]
    gloabal_keys = [GroqKey, KEYS]
    
    new_prompts, new_keywords, new_user, conf_file_path = update_json(user, prompt, keywords)
    print(f'xlsxfile = {excel_file}')
    df = pd.read_excel(excel_file)
    df[dest_col] = ""
    if excel_file:
        file_name = excel_file.split('.xlsx')[0] + "_with_" + dest_col.replace(' ', '_') + ".xlsx"
    elif url.endswith('Docs/', 'Docs'):
        file_name = url.split("/Docs")[0].split("/")[-1] + ".xlsx"
    else:
        file_name = "meeting_recap_grid.xlsx"

    print(f"Keywords: {keywords}")

    filtred_df = filter_df(df, search_col, keywords)

    cpt = 1
    checkpoint = 50
    for index, row in filtred_df.iterrows():
        concatenated_content = "\n\n".join(f"{column_name}: {str(row[column_name])}" for column_name in source_cols)
        if not concatenated_content == "\n\n".join(f"{column_name}: nan" for column_name in source_cols):

            try:
                llm_answer,gloabal_keys = ask_llm(prompt[0], concatenated_content, client, user, gloabal_keys)
            except Exception:
                print("Catched a error : Global exception for ask_llm")
                llm_answer = "unhandled global error"
            
            print(f"{cpt}/{len(filtred_df)}\nQUERY:\n{prompt[0]}\nCONTENT:\n{concatenated_content[:200]}...\n\nANSWER:\n{llm_answer}")
            df.at[index, dest_col] = llm_answer

            try:
                if cpt == checkpoint:
                    df.to_excel("checkpointfile.xlsx", index=False)
                    checkpoint += 1
                    
            except Exception as e:
                print(f"no checkpoint : {e}")
            
            cpt += 1
            # progress((index+1)/len(df),desc=f'Request {index+1}/{len(df)}!')

    df.to_excel(file_name, index=False)
    
    zip_file_path = 'config_file.zip'
    
    with zipfile.ZipFile(zip_file_path, 'w') as zipf:
        zipf.write(conf_file_path, os.path.basename(conf_file_path))
        
    return file_name, df.head(5), new_prompts, new_keywords, new_user, zip_file_path, "checkpointfile.xlsx"


def get_columns(file,progress=gr.Progress()):
    if file is not None:
        #df = pd.read_excel(file)
        filename, df = load_excel(file)
        columns = list(df.columns)
        return gr.update(choices=columns), gr.update(choices=columns), gr.update(choices=columns), gr.update(choices=columns + [""]), gr.update(choices=columns + ['[ALL]']), df.head(5), filename, df
    else:
        return gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]), pd.DataFrame(), '', pd.DataFrame()