File size: 1,067 Bytes
423980d
 
 
 
 
 
 
 
cdaa0b4
 
 
423980d
 
 
 
 
 
 
 
cdaa0b4
423980d
 
 
cdaa0b4
 
 
 
 
 
 
 
 
 
 
423980d
cdaa0b4
 
 
423980d
 
 
 
cdaa0b4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import os

import chromadb
import openai
import langchain

from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory

import gradio as gr

from init import create_vectorstore

from config import (
    CHROMA_SETTINGS,
    PERSIST_DIRECTORY,
)


def query(question):
    embeddings = OpenAIEmbeddings()
    db = Chroma(
        persist_directory=PERSIST_DIRECTORY,
        embedding_function=embeddings,
        client_settings=CHROMA_SETTINGS,
    )
    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
    text_qa = ConversationalRetrievalChain.from_llm(
        ChatOpenAI(model_name="gpt-3.5-turbo"),
        db.as_retriever(),
        memory=memory,
    )
    result = text_qa({"question": question})

    return result["answer"]


demo = gr.Interface(fn=query, inputs="text", outputs="text")


create_vectorstore()

demo.launch()