import os import gradio as gr import weaviate from langchain import LLMChain from langchain.chains import SequentialChain from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate collection_name = "Chunk" MODEL = "gpt-3.5-turbo" LANGUAGE = "en" # nl / en llm = ChatOpenAI(temperature=0.0, openai_api_key=os.environ["OPENAI_API_KEY"]) def get_answer_given_the_context(llm, prompt, context) -> SequentialChain: template = f""" Provide an answer to the prompt given the context. {prompt} {context} """ prompt_get_skills_intersection = ChatPromptTemplate.from_template(template=template) skills_match_chain = LLMChain( llm=llm, prompt=prompt_get_skills_intersection, output_key="answer", ) chain = SequentialChain( chains=[skills_match_chain], input_variables=["prompt", "context"], output_variables=[ skills_match_chain.output_key, ], verbose=False, ) return chain({"prompt": prompt, "context": context})["answer"] def predict(prompt): client = weaviate.Client( url=os.environ["WEAVIATE_URL"], auth_client_secret=weaviate.AuthApiKey(api_key=os.environ["WEAVIATE_API_KEY"]), additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]}, ) search_result = ( client.query.get(class_name=collection_name, properties=["text"]) .with_near_text({"concepts": prompt}) # .with_generate(single_prompt="{text}") .with_limit(5) .do() ) context_list = [ element["text"] for element in search_result["data"]["Get"]["Chunk"] ] context = "\n".join(context_list) return get_answer_given_the_context(llm=llm, prompt=prompt, context=context) iface = gr.Interface( fn=predict, # the function to wrap inputs="text", # the input type outputs="text", # the output type examples=[ [f"what is the process of raising an incident?"], [f"What is Cx0 program management?"], [ f"What is process for identifying risksthat can impact the desired outcomes of a project?" ], [f"What is the release management process?"], ], ) if __name__ == "__main__": iface.launch()