File size: 4,950 Bytes
29bd5cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7360ac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import os
import time

import gradio as gr
import pandas as pd
from model import Model
from tqdm import tqdm
from gradio_pdf import PDF


tqdm.pandas()

OUTPUT_FILE = "./results_qa.csv"
TOC_FILE = "./TOC.pdf"

def new_vote(data: gr.LikeData, question, model_name, **kwargs):
    feedback = "Good" if data.liked else "Bad"
    df = pd.read_csv(OUTPUT_FILE)
    df['Feedback'] = df.apply(lambda x: feedback if (x.Model == model_name and x.Question == question) else None, axis = 1)
    df.to_csv(OUTPUT_FILE, index=False)
    
# def answer_question(question: str, model_name: str, system_prompt: str):
#     start_time = time.time()
#     qa_model = Model(model_name=model_name)
#     response, sources = qa_model.run(system_prompt=system_prompt, query=question)
#     time_taken = time.time() - start_time
#     words = len(question) + len(response)
#     efficiency = words / time_taken
#     final_response = f"{response} \n\nTime Taken: {time_taken}"
#     new_row = {'Model': model_name, 'Question': question, 'Answer': response, "Sources": sources, "Time": time_taken, "Words": words, "Efficiency": efficiency, "Feedback": None, "final_response": final_response}
#     if os.path.isfile(OUTPUT_FILE):
#         df = pd.read_csv(OUTPUT_FILE)
#         rows = df.values.tolist()
#         # print("df.values.tolist(): ", df.values.tolist())
#         # df = df.append(new_row, ignore_index=True)
#         rows.append(new_row)
#     else:
#         rows = [new_row]
#     df = pd.DataFrame(rows)
#     df.to_csv(OUTPUT_FILE, index=False)
#     yield [(question, final_response)]

def answer_question(question: str, model_name: str, system_prompt: str):
    start_time = time.time()
    qa_model = Model(model_name=model_name)
    gen_response = qa_model.run(system_prompt=system_prompt, query=question)
    response = ""
    for resp in gen_response:
        if isinstance(resp, list):
            sources = resp
            break
        resp = resp.replace("$", "₹") 
        response += resp
        yield [(question, response)], OUTPUT_FILE
        
    time_taken = time.time() - start_time
    words = len(question) + len(response)
    efficiency = words / time_taken
    temp_sources = "\n".join([f"{i + 1}. {d}" for i, d in enumerate(sources)])
    final_response = f"{response} \n\nSources: \n{temp_sources} \n\nTime Taken: {time_taken}"
    new_row = {'Model': model_name, 'Question': question, 'Answer': response, "Sources": sources, "Time": time_taken, "Words": words, "Efficiency": efficiency, "Feedback": None, "final_response": final_response}
    if os.path.isfile(OUTPUT_FILE):
        try:
            df = pd.read_csv(OUTPUT_FILE)
            rows = df.to_dict(orient="records")
            rows.append(new_row)
        except Exception:
            rows = [new_row]
    else:
        rows = [new_row]
    
    df = pd.DataFrame(rows)
    df.to_csv(OUTPUT_FILE, index=False)
    final_response = final_response.strip("Question").strip("\n")
    final_response = final_response.strip("\n").strip(" ").strip("Answer:").strip("Question").strip("\n").replace("Answer:", "")
    yield [(question, final_response)], OUTPUT_FILE
    
    

if __name__ == "__main__":
    with gr.Blocks() as demo:
        chatbot = gr.Chatbot()
        
        # system_prompt = """Answer the question using the context. Provide examples only from the context and use only Rupees (₹) in examples. If you don't know the answer, just say 'Please rephrase the question I am unable to answer'"""
        system_prompt = """"Answer the question using the context. Provide examples only from the context and use only Rupees (₹) in examples. If you don't know the answer, just say 'Please rephrase the question I am unable to answer'"""
        system_prompt = "Use the following pieces of book to answer the question at the end. \nIf you don't know the answer, please think rationally and answer from the book"
        system_prompt = """Answer the question using the context. Provide examples only from the context and use only Rupees (₹) in examples. If you don't know the answer, just say 'Please rephrase the question I am unable to answer'"""
        system_prompt = """Answer the question from the book. Provide examples only from the book. If you don't know the answer, just say 'Please rephrase the question'"""
        
        choices=["gpt4", "gpt-3.5-turbo"]
        
        system_prompt = gr.Textbox(value=system_prompt, label="System Prompt")
        textbox = gr.Textbox(label="Query")
        model_name = gr.Dropdown(choices=choices, value="gpt-3.5-turbo", label="Model")
        pdf = PDF(interactive=True, value=TOC_FILE, label="Tabel of Contents")

        file = gr.File(value = OUTPUT_FILE, file_types=["csv"], label="Output")
        textbox.submit(answer_question, [textbox, model_name, system_prompt], [chatbot, file])
        chatbot.like(new_vote, [textbox, model_name], None)    
    
    demo.queue()
    demo.launch ()