Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| st.title('π¦π NHS AI assisted search') | |
| st.write('A demo of a search empowered by OpenAI LLM') | |
| query = st.text_input('Your query:') | |
| # get relevant docs from vector store | |
| from langchain.vectorstores import Chroma | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| vectorstore = Chroma( | |
| embedding_function = HuggingFaceEmbeddings(model_name='all-MiniLM-L6-v2'), | |
| persist_directory='vectorstore', | |
| ) | |
| # arrange search template | |
| template = """As a doctor's assistant, when presented with a user's medical query and specific website context, generate consolidated guidance directing them to pertinent website links. | |
| If multiple context chunks are similar, combine them into a single suggestion, citing all relevant sources using hyperlinked text in the format <a href="SOURCE_HERE">description</a>. | |
| If the context doesn't directly address the query, state the absence of a direct match and provide the closest matching guidance. | |
| Always adhere strictly to the provided context and maintain terminology consistency. | |
| Don't mention the word "context" in your answer. Pretend that you're answering the user directly. | |
| Query: {question} | |
| Context: | |
| {context}""" | |
| # run script if query is set | |
| if query: | |
| # search vectorstores | |
| docs = vectorstore.similarity_search(query, 6) | |
| # arrange context string | |
| context = ''; | |
| for doc in docs: | |
| context += doc.page_content + "\n" | |
| context += 'Source: ' + doc.metadata['source'] + "\n\n" | |
| from langchain.prompts import PromptTemplate | |
| prompt = PromptTemplate.from_template(template) | |
| # create an LLM | |
| from langchain.chat_models import ChatOpenAI | |
| llm = ChatOpenAI( | |
| model_name='gpt-3.5-turbo', | |
| temperature=0.8, | |
| ) | |
| # arrange a LLM chain | |
| from langchain.chains import LLMChain | |
| chain = LLMChain( | |
| llm = llm, | |
| prompt = prompt, | |
| verbose = True, | |
| ) | |
| result = chain({ | |
| 'question': query, | |
| 'context': context, | |
| }) | |
| st.write(result['text']) | |