File size: 3,477 Bytes
c630198
c82e1df
 
9964f2b
9449656
 
 
 
 
 
 
 
 
c82e1df
 
c630198
c82e1df
06f91b0
 
 
 
 
b23b172
72e216c
2d50a87
 
 
c37ab94
c82e1df
 
c630198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9964f2b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import tiktoken

import os
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.llms import OpenAI
from langchain.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_openai import ChatOpenAI

# Load the FAISS index from the .pkl file
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
    raise ValueError("OPENAI_API_KEY environment variable is not set.")

embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
#with open("index.pkl", "rb") as f:
 #   db = faiss.read_index(f.read())
#with open("index.pkl", "rb") as f:
    #db = faiss.deserialize_index(f.read())

db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)

def get_response_from_query(db, query, k=3):

    docs = db.similarity_search(query, k=k)

    docs_page_content = " ".join([d.page_content for d in docs])

    # llm = BardLLM()
    llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",temperature=0)

    prompt = PromptTemplate(
        input_variables=["question", "docs"],
        template="""
        A bot that is open to discussions about different cultural, philosophical and political exchanges. I will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only.
        Answer the following question: {question}
        By searching the following articles: {docs}

        Only use the factual information from the documents. Make sure to mention key phrases from the articles.

        If you feel like you don't have enough information to answer the question, say "I don't know".

        """,
    )

    chain = LLMChain(llm=llm, prompt=prompt)
    response = chain.run(question=query, docs=docs_page_content,return_source_documents=True)
    r_text = str(response)

    ##evaluation part

    prompt_eval = PromptTemplate(
        input_variables=["answer", "docs"],
        template="""
       You job is to evaluate if the response to a given context is faithful.

        for the following: {answer}
        By searching the following article: {docs}

       Give a reason why they are similar or not, start with a Yes or a No.
        """,
    )

    chain_part_2 = LLMChain(llm=llm, prompt=prompt_eval)


    evals = chain_part_2.run(answer=r_text, docs=docs_page_content)

    return response,docs,evals



def greet(query):

    answer,sources,evals = get_response_from_query(db,query,2)
    return answer,sources,evals
examples = [
    ["How to be happy"],
    ["Climate Change Challenges in Europe"],
    ["Philosophy in the world of Minimalism"],
    ["Hate Speech  vs Freedom of Speech"],
    ["Articles by Noam Chomsky on US Politics"],
    ["The importance of values and reflection"]
    ]
demo = gr.Interface(fn=greet, title="cicero-semantic-search", inputs="text",
                    outputs=[gr.components.Textbox(lines=3, label="Response"),
                             gr.components.Textbox(lines=3, label="Source"),
                             gr.components.Textbox(lines=3, label="Evaluation")],
                   examples=examples)

demo.launch(share=True)