|
try: |
|
import gradio as gr |
|
import requests |
|
import json |
|
import langchain |
|
from langchain import Cohere |
|
import os |
|
from langchain.embeddings.cohere import CohereEmbeddings |
|
from langchain.vectorstores import Chroma |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.llms import Cohere,OpenAI,HuggingFaceHub |
|
from langchain.chains import ChatVectorDBChain |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.chains.question_answering import load_qa_chain |
|
except Exception as ex: |
|
print(f'Some Error --> {ex}') |
|
|
|
with open('kb.txt') as f: |
|
state_of_the_union = f.read() |
|
|
|
embeddings = CohereEmbeddings() |
|
|
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size = 100, |
|
chunk_overlap = 20, |
|
length_function = len, |
|
) |
|
|
|
texts = text_splitter.create_documents([state_of_the_union]) |
|
|
|
docsearch = Chroma.from_documents(texts, embeddings) |
|
|
|
|
|
llm=OpenAI(temperature=0.7,max_tokens=1000) |
|
|
|
print(f'Done with loading and Indexing') |
|
|
|
def procdata(name): |
|
|
|
|
|
print(f'Start Processing ... --> {name}') |
|
|
|
|
|
|
|
|
|
|
|
return 'Done' |
|
|
|
iface = gr.Interface(fn=procdata, inputs="text", outputs="text",enable_queue=True) |
|
iface.launch() |