try: import gradio as gr import requests import json import langchain from langchain import Cohere import os from langchain.embeddings.cohere import CohereEmbeddings from langchain.vectorstores import Chroma from langchain.text_splitter import CharacterTextSplitter from langchain.llms import Cohere,OpenAI,HuggingFaceHub from langchain.chains import ChatVectorDBChain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.question_answering import load_qa_chain except Exception as ex: print(f'Some Error --> {ex}') with open('kb.txt') as f: state_of_the_union = f.read() embeddings = CohereEmbeddings() text_splitter = RecursiveCharacterTextSplitter( chunk_size = 100, chunk_overlap = 20, length_function = len, ) texts = text_splitter.create_documents([state_of_the_union]) docsearch = Chroma.from_documents(texts, embeddings) # llm=Cohere(model="command-xlarge-nightly",temperature=0.7,max_tokens=1000) llm=OpenAI(temperature=0.7,max_tokens=1000) print(f'Done with loading and Indexing') def procdata(name): # query = "Name the negatively charged particles emitted from the cathode in the cathode ray tube?" # docs = docsearch.similarity_search(query) print(f'Start Processing ... --> {name}') # chain = load_qa_chain(llm, chain_type="refine") # response = chain({"input_documents": docs[:3], "question": query}, return_only_outputs=True) # result=response['output_text'].replace('\n','') # print(f"End Processing ... --> {result}") # return result return 'Done' iface = gr.Interface(fn=procdata, inputs="text", outputs="text",enable_queue=True) iface.launch()