File size: 11,837 Bytes
9037361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
from fastapi import FastAPI
import os
import json
import pandas as pd
import time

import phoenix as px
from phoenix.trace.langchain import OpenInferenceTracer, LangChainInstrumentor


from langchain.embeddings import HuggingFaceEmbeddings #for using HugginFace models
from langchain.chains.question_answering import load_qa_chain
from langchain import HuggingFaceHub
from langchain.prompts import PromptTemplate

from langchain.chains import RetrievalQA
from langchain.callbacks import StdOutCallbackHandler

#from langchain.retrievers import KNNRetriever
from langchain.storage import LocalFileStore
from langchain.embeddings import CacheBackedEmbeddings
from langchain.vectorstores import FAISS


from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

# from langchain import HuggingFaceHub
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# from txtai.embeddings import Embeddings
# from txtai.pipeline import Extractor

# import pandas as pd
# import sqlite3
# import os

# NOTE - we configure docs_url to serve the interactive Docs at the root path
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
app = FastAPI(docs_url="/")

#phoenix setup
session = px.launch_app()
# If no exporter is specified, the tracer will export to the locally running Phoenix server
tracer = OpenInferenceTracer()
# If no tracer is specified, a tracer is constructed for you
LangChainInstrumentor(tracer).instrument()
print(session.url)


os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"

# embedding cache
store = LocalFileStore("./cache/")

# define embedder
core_embeddings_model  = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
embedder = CacheBackedEmbeddings.from_bytes_store(core_embeddings_model, store)

# define llm
llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})
#llm=HuggingFaceHub(repo_id="gpt2", model_kwargs={"temperature":1, "max_length":1000000})
handler =  StdOutCallbackHandler()

# set global variable
vectorstore = None
retriever = None


def initialize_vectorstore():

    webpage_loader = WebBaseLoader("https://www.tredence.com/case-studies/forecasting-app-installs-for-a-large-retailer-in-the-us").load()
    webpage_chunks = _text_splitter(webpage_loader)

    global vectorstore
    global retriever

    # store embeddings in vector store
    vectorstore = FAISS.from_documents(webpage_chunks, embedder)
    print("vector store initialized with sample doc")

    # instantiate a retriever
    retriever = vectorstore.as_retriever()


def _text_splitter(doc):
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=600,
        chunk_overlap=50,
        length_function=len,
    )
    return text_splitter.transform_documents(doc)

def _load_docs(path: str):
    load_doc = WebBaseLoader(path).load()
    doc = _text_splitter(load_doc)
    return doc


@app.get("/index/")
def get_domain_file_path(file_path: str):
    print("file_path  " ,file_path)
     
    webpage_loader = _load_docs(file_path)

    webpage_chunks = _text_splitter(webpage_loader)

    # store embeddings in vector store
    vectorstore.add_documents(webpage_chunks)

    return "document loaded to vector store successfully!!"


def _prompt(question):
    return f"""Answer following question using only the context below. Say 'Could not find answer with provided context' when question can't be answered.
            Question: {question}
            Context: """



class BasePromptContext:
    def __init__(self):
        self.variables_list = ["question","answer","context"]
        self.base_template = """Please act as an impartial judge and evaluate the quality of the provided answer which attempts to answer the provided question based on a provided context.
And you'll need to submit your grading for the correctness, comprehensiveness and readability of the answer, using JSON format with the 2 items in parenthesis:
("score": [your score number for the correctness of the answer], "reasoning": [your one line step by step reasoning about the correctness of the answer])
  Below is your grading rubric:
- Correctness: If the answer correctly answer the question, below are the details for different scores:
  - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer.
      - For example, when asked “How to terminate a databricks cluster”, the answer is empty string, or content that’s completely irrelevant, or sorry I don’t know the answer.
  - Score 4: the answer provides some relevance to the question and answer one aspect of the question correctly.
      - Example:
          - Question: How to terminate a databricks cluster
          - Answer: Databricks cluster is a cloud-based computing environment that allows users to process big data and run distributed data processing tasks efficiently.
          - Or answer:  In the Databricks workspace, navigate to the "Clusters" tab. And then this is a hard question that I need to think more about it
  - Score 7: the answer mostly answer the question but is missing or hallucinating on one critical aspect.
      - Example:
          - Question: How to terminate a databricks cluster”
          - Answer: “In the Databricks workspace, navigate to the "Clusters" tab.
          Find the cluster you want to terminate from the list of active clusters.
          And then you’ll find a button to terminate all clusters at once”
  - Score 10: the answer correctly answer the question and not missing any major aspect
      - Example:
          - Question: How to terminate a databricks cluster
          - Answer: In the Databricks workspace, navigate to the "Clusters" tab.
          Find the cluster you want to terminate from the list of active clusters.
          Click on the down-arrow next to the cluster name to open the cluster details.
          Click on the "Terminate" button. A confirmation dialog will appear. Click "Terminate" again to confirm the action.”
Provided question:
{question}
Provided answer:
{answer}
Provided context:
{context}
Please provide your grading for the correctness and explain you gave the particular grading"""
        
        
class Evaluater:
    def __init__(self, item):
        self.question = item["question"]
        self.answer = item["answer"]
        #self.domain = item["domain"]
        self.context = item["context"]
        self.llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})

    def get_prompt_template(self):
        prompt = BasePromptContext()
        template = prompt.base_template
        varialbles = prompt.variables_list
        eval_template = PromptTemplate(input_variables=varialbles, template=template)
        return eval_template

    def evaluate(self):
        prompt = self.get_prompt_template().format(question = self.question, answer = self.answer, context = self.context)
        score = self.llm(prompt)
        return score

# Create extractor instance
def _create_evaluation_scenario(item):
    score = Evaluater(item).evaluate()
    return score


class HallucinatePromptContext:
    def __init__(self):
        self.variables_list = ["query","answer","context"]
        self.base_template = """In this task, you will be presented with a query, a reference text and an answer. The answer is

generated to the question based on the reference text. The answer may contain false information, you

must use the reference text to determine if the answer to the question contains false information,

if the answer is a hallucination of facts. Your objective is to determine whether the reference text

contains factual information and is not a hallucination. A 'hallucination' in this context refers to

an answer that is not based on the reference text or assumes information that is not available in

the reference text. Your response should be a single word: either "factual" or "hallucinated", and

it should not include any other text or characters. "hallucinated" indicates that the answer

provides factually inaccurate information to the query based on the reference text. "factual"

indicates that the answer to the question is correct relative to the reference text, and does not

contain made up information. Please read the query and reference text carefully before determining

your response.

 

    # Query: {query}

    # Reference text: {context}

    # Answer: {answer}

    Is the answer above factual or hallucinated based on the query and reference text?"""
        
        
class HallucinatonEvaluater:
    def __init__(self, item):
        self.question = item["question"]
        self.answer = item["answer"]
        #self.domain = item["domain"]
        self.context = item["context"]
        self.llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})

    def get_prompt_template(self):
        prompt = HallucinatePromptContext()
        template = prompt.base_template
        varialbles = prompt.variables_list
        eval_template = PromptTemplate(input_variables=varialbles, template=template)
        return eval_template

    def evaluate(self):
        prompt = self.get_prompt_template().format(query = self.question, answer = self.answer, context = self.context)
        score = self.llm(prompt)
        return score

# Create extractor instance
def _create_evaluation_scenario(item):
    score = Evaluater(item).evaluate()
    return score

# Create extractor instance
def _create_hallucination_scenario(item):
    score = HallucinatonEvaluater(item).evaluate()
    return score


@app.get("/rag")
def rag( question: str, evaluate: bool):   

    chain = RetrievalQA.from_chain_type(
    llm=llm,
    retriever=retriever,
    callbacks=[handler],
    return_source_documents=True
    )

    #response = chain("how tredence brought good insight?")
    response = chain(question)
    print(response["result"])

    time.sleep(4)
    
    df = px.active_session().get_spans_dataframe()
    print(px.active_session())
    print(px.active_session().get_spans_dataframe())
    print(df.count())        
    df_sorted = df.sort_values(by='end_time',ascending=False) 

    model_input = json.loads(df_sorted[df_sorted["name"] == "LLMChain"]["attributes.input.value"][0])
    context = model_input["context"]

    print(context)

    if evaluate:
        score = _create_evaluation_scenario({
            "question": question,
            "answer": response['result'],
            "context": context            
        })
    else:
        score = "Evaluation is Turned OFF"
 
    return {"question": question, "answer": response['result'], "context": context, "score": score}


initialize_vectorstore()


@app.get("/trace")
def trace():   
    df = px.active_session().get_spans_dataframe().fillna('')
    return df


@app.get("/hallucinate")
def trace(question: str, answer: str, context: str):
    hallucination_score = _create_hallucination_scenario({
            "question": question,
            "answer": answer,
            "context": context            
        }        
    )

    return {"hallucination_score": hallucination_score}
    



'''
#import getpass
from pyngrok import ngrok, conf
#print("Enter your authtoken, which can be copied from https://dashboard.ngrok.com/auth")
conf.get_default().auth_token="2WJNWULs5bCOyJnV24WQYJEKod3_YQUbM5EGCp8sgE4aQvzi"
port = 37689
# Open a ngrok tunnel to the HTTP server
conf.get_default().monitor_thread = False
public_url = ngrok.connect(port).public_url
print(" * ngrok tunnel \"{}\" -> \"http://127.0.0.1:{}\"".format(public_url, port))
'''