File size: 11,016 Bytes
77b8357
6b31d07
 
 
affac96
a1e8d8f
bae5b02
73a141a
1cdc167
5320c7c
 
 
 
 
 
 
 
73a141a
 
 
 
7298dcb
e18b966
667b2dc
f582005
667b2dc
73a141a
44ab821
a1e8d8f
2da8950
73a141a
 
6a04a92
73a141a
 
6c42480
1cdc167
6c42480
5320c7c
 
 
6b31d07
 
 
2da8950
 
44ab821
2da8950
6b31d07
11b16f9
6b31d07
11b16f9
 
90e81fe
 
 
 
 
0d1559c
90e81fe
d62edfd
6b31d07
 
 
 
 
 
 
0d1559c
6b31d07
 
 
 
 
2da8950
 
6c42480
6b31d07
6c42480
2976cd5
6a04a92
 
5320c7c
 
6a04a92
 
5320c7c
 
 
 
 
6a04a92
5320c7c
 
 
 
6a04a92
42cf399
 
5320c7c
42cf399
5320c7c
 
42cf399
 
 
 
5320c7c
42cf399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5320c7c
6a04a92
25279a1
6b31d07
 
 
 
 
 
 
 
 
 
a874e7a
6b31d07
acae979
6b31d07
2da8950
6b31d07
8de6323
 
f5d4c8d
 
 
 
6a04a92
5320c7c
 
d7c5520
5320c7c
4cb9ad4
5320c7c
1cdc167
4f88aaf
f5d4c8d
42cf399
 
5320c7c
42cf399
25279a1
 
5320c7c
 
f5d4c8d
5320c7c
 
2fa0584
5320c7c
4f88aaf
 
aeece07
9ed042b
 
68ac0c3
1cdc167
6b31d07
44ab821
6b31d07
 
eb37076
2394274
eb37076
 
2394274
 
6b31d07
c6bb28f
25279a1
 
 
53e540a
2394274
c6bb28f
6b31d07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4459ac3
a2107b5
ddcc691
a2107b5
6b31d07
4459ac3
bae5b02
4459ac3
 
6b31d07
 
 
97bd1ab
6b31d07
 
6f08e8f
6b31d07
a2107b5
6b31d07
 
8d4bf4d
9f50f0d
a7325ef
6b31d07
119f6c6
6c42480
6570355
6b31d07
73a141a
119f6c6
6b31d07
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
import gradio as gr
import openai
import requests
import csv
import os
import langchain
import chromadb
import glob


from PyPDF2 import PdfReader
from PyPDF2 import PdfWriter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI

from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import TokenTextSplitter
#from langchain.llms import OpenAI
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import ChatVectorDBChain
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader
from langchain.chains.question_answering import load_qa_chain

# Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
#from chromadb.config import Settings
#client = chromadb.Client(Settings(
##    chroma_db_impl="duckdb+parquet",
#    persist_directory="./embeddings" # Optional, defaults to .chromadb/ in the current directory
#))






def get_empty_state():
    return {"total_tokens": 0, "messages": []}

    
#Initial prompt template, others added below from TXT file
prompt_templates = {"All Needs Experts": "I want you to act as a needs assessment expert."}

def download_prompt_templates():
    url = "https://huggingface.co/spaces/ryanrwatkins/needs/raw/main/gurus.txt"
    try:
        response = requests.get(url)
        reader = csv.reader(response.text.splitlines())
        next(reader)  # skip the header row
        for row in reader:
            if len(row) >= 2:
                act = row[0].strip('"')
                prompt = row[1].strip('"')
               # description = row[2].strip('"')
                prompt_templates[act] = prompt


    except requests.exceptions.RequestException as e:
        print(f"An error occurred while downloading prompt templates: {e}")
        return

    choices = list(prompt_templates.keys())
    choices = choices[:1] + sorted(choices[1:])
    return gr.update(value=choices[0], choices=choices)

def on_prompt_template_change(prompt_template):
    if not isinstance(prompt_template, str): return
    return prompt_templates[prompt_template]



def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):

    openai.api_key = os.environ['openai_key']
    os.environ["OPENAI_API_KEY"] = os.environ['openai_key']
    
    # load in all the files
    #path = './files'
    #pdf_files = glob.glob(os.path.join(path, "*.pdf"))
    #pdf_files = glob.glob(os.path.join(path, "*.pdf"))

    #for file in pdf_files:
     # loader = PyPDFLoader(file)
     # pages = loader.load_and_split()
     # text_splitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=0)
     # split_pages = text_splitter.split_documents(pages)
     
    #persist_directory = "./embeddings"
    #embeddings = OpenAIEmbeddings()
    #vectordb = Chroma.from_documents(split_pages, embeddings, persist_directory=persist_directory)
    #vectordb.persist()

    path = './files'
    pdf_files = glob.glob(os.path.join(path, "*.pdf"))
    
    merger = PdfWriter()
    
    # add all file in the list to the merger object
    for pdf in pdf_files:
      merger.append(pdf)
    merger.write("merged-pdf.pdf")
    merger.close()
    
    reader = PdfReader("merged-pdf.pdf")
    raw_text = ''
    for i, page in enumerate(reader.pages):
      text = page.extract_text()
      if text:
          raw_text += text
    text_splitter = CharacterTextSplitter(        
      separator = "\n",
      chunk_size = 1000,
      chunk_overlap  = 200,
      length_function = len,
    )
    texts = text_splitter.split_text(raw_text)
    len(texts)
    embeddings = OpenAIEmbeddings()
        
    
    history = state['question']

    if not prompt:
        return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
    
    prompt_template = prompt_templates[prompt_template]

    system_prompt = []
    if prompt_template:
        system_prompt = [{ "role": "system", "content": prompt_template }]

    prompt_msg = { "role": "user", "content": prompt }


    try:
        #completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)

# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)

        #completion_chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff" )
        #completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever())
        #query = str(system_prompt + history[-context_length*2:] +  [prompt_msg])
        #completion = completion.run(query)
        # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
        #completion_chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff" )
        #completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever(), return_source_documents=False)
        #completion = RetrievalQA.from_chain_type(llm=ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff", retriever=vectordb.as_retriever(), return_source_documents=True)
        #query = str(system_prompt + history[-context_length*2:] +  [prompt_msg])
        #completion = completion({"query": query})
        #completion = completion.run(query)

#        completion = completion({"question": query, "chat_history": history[-context_length*2:]})

        #with open("foo.pkl", 'rb') as f:
        #    new_docsearch = pickle.load(f)
        
        docsearch = FAISS.from_texts(texts, embeddings)
        #query = str(system_prompt + history[-context_length*2:] +  [prompt_msg])
        query = str(system_prompt + history[0] +  [prompt_msg])
        docs = docsearch.similarity_search(query)
        #print(docs[0].page_content)

        chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
        completion = chain.run(input_documents=docs, question=query)
        completion = { "content": completion }
        
# VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=docsearch, return_source_documents=True)
# https://colab.research.google.com/drive/1dzdNDZyofRB0f2KIB4gHXmIza7ehMX30?usp=sharing#scrollTo=b-ejDn_JfpWW
        
        history.append(prompt_msg.copy())
        history.append(completion.copy())
        #history.append(completion.choices[0].message.to_dict())
        #history.append(completion["result"].choices[0].message.to_dict())

        state['total_tokens'] += completion['usage']['total_tokens']
    
    except Exception as e:
        history.append(prompt_msg.copy())
        error  = {
            "role": "system",
            "content": f"Error: {e}"
        }
        history.append(error.copy())

    total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"

    chat_messages = [(prompt_msg['content'], completion['content'])]
    #chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
    #chat_messages = [(history[-2]['content'], history[-1]['content'])]
    
    return '', chat_messages, total_tokens_used_msg, state

def clear_conversation():
    return gr.update(value=None, visible=True), None, "", get_empty_state()


css = """
      #col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
      #chatbox {min-height: 400px;}
      #header {text-align: center;}
      #prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
      #total_tokens_str {text-align: right; font-size: 0.8em; color: #666;}
      #label {font-size: 0.8em; padding: 0.5em; margin: 0;}
      .message { font-size: 1.2em; }
      """

with gr.Blocks(css=css) as demo:
    
    state = gr.State(get_empty_state())


    with gr.Column(elem_id="col-container"):

        gr.Markdown("""# Chat with Needs Assessment Experts (Past and Present)
                    ## Ask questions of experts on needs assessments, get responses from *needs assessment* version of ChatGPT.
                    Ask questions of all of them, or pick your expert.""",
                    elem_id="header")
        
 
        
        
        with gr.Row():
            with gr.Column():
                chatbot = gr.Chatbot(elem_id="chatbox")
                input_message = gr.Textbox(show_label=False, placeholder="Enter your needs assessment question and press enter", visible=True).style(container=False)
                btn_submit = gr.Button("Submit")
                total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
                btn_clear_conversation = gr.Button("Start New Conversation")
            with gr.Column():
                prompt_template = gr.Dropdown(label="Choose a expert:", choices=list(prompt_templates.keys()))
                prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
                with gr.Accordion("Advanced parameters", open=False):
                    temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Flexibility", info="Higher = more creative/chaotic, Lower = just the guru")
                    max_tokens = gr.Slider(minimum=100, maximum=400, value=200, step=1, label="Max tokens per response")
                    context_length = gr.Slider(minimum=1, maximum=5, value=2, step=1, label="Context length", info="Number of previous questions you have asked. Be careful with high values, it can blow up the token budget quickly.")

   
    btn_submit.click(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
    input_message.submit(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
    btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
    prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
   

    
    demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)


demo.queue(concurrency_count=10)
demo.launch(height='800px')