import streamlit as st import os import re import json from dotenv import load_dotenv from haystack.nodes.prompt import PromptNode, PromptTemplate from haystack.nodes import EmbeddingRetriever from haystack import Pipeline import numpy as np import pandas as pd from haystack.document_stores import FAISSDocumentStore from haystack.nodes import EmbeddingRetriever from haystack.schema import Document from huggingface_hub import login, HfApi, hf_hub_download, InferenceClient # Get HF token hf_token = os.environ["HF_TOKEN"] login(token=hf_token, add_to_git_credential=True) # Get openai API key openai_key = os.environ["OPENAI_API_KEY"] # Define the template template = PromptTemplate( prompt=""" Answer the given question using the following documents. \ Formulate your answer in the style of an academic report. \ Provide example quotes and citations using extracted text from the documents. \ Use facts and numbers from the documents in your answer. \ Reference information used from documents at the end of each applicable sentence (ex: [source: document_name]), where 'document_name' is the text provided at the start of each document (demarcated by '- &&&' and '&&&:')'. \ If no relevant information to answer the question is present in the documents, just say you don't have enough information to answer. \ Context: {' - '.join(['&&& '+d.meta['document_name']+' ref. '+str(d.meta['ref_id'])+' &&&: '+d.content for d in documents])}; Question: {query}; Answer:""", ) # Create a list of options for the dropdown model_options = ['chatGPT','Llama2'] # Create a list of options for the dropdown country_options = ['All Countries','Angola','Botswana','Lesotho','Kenya','Malawi','Mozambique','Namibia','Rwanda','South Africa','Zambia','Zimbabwe'] # List of examples examples = [ "-", "What specific initiatives are presented in the context to address the needs of groups such women and children to the effects climate change?", "In addition to gender, children, and youth, is there any mention of other groups facing disproportional impacts from climate change due to their geographic location, socio-economic status, age, gender, health, and occupation?" ] def get_docs(input_query, country = None): ''' Construct a hacky query to focus the retriever on the target country (see notes below) We take the top 150 k because we want to make sure we have 10 pertaining to the selected country ''' if country == 'ALL': query = input_query else: query = "For the country of "+country+", "+input_query # Retrieve top k documents docs = retriever.retrieve(query=query,top_k = 150) # Break out the key fields and convert to pandas for filtering docs = [{**x.meta,"score":x.score,"content":x.content} for x in docs] df_docs = pd.DataFrame(docs) if country != 'All Countries': df_docs = df_docs.query('country in @country') # Take the top 10 df_docs = df_docs.head(10) # Get ourselves an index setup from which to base the source reference number from (in the prompt and matching afterwards) df_docs = df_docs.reset_index() df_docs['ref_id'] = df_docs.index + 1 # start the index at 1 # Convert back to Document format ls_dict = [] # Iterate over df and add relevant fields to the dict object for index, row in df_docs.iterrows(): # Create a Document object for each row doc = Document( row['content'], meta={'country': row['country'],'document_name': row['document'], 'ref_id': row['ref_id'], 'score': row['score']} ) # Append the Document object to the documents list ls_dict.append(doc) return(ls_dict) def get_refs(docs, res): ''' Parse response for engineered reference ids (refer to prompt template) Extract documents using reference ids ''' res = res.lower() # Convert to lowercase for matching # This pattern should be returned by gpt3.5 # pattern = r'ref\. (\d+)\]\.' pattern = r'ref\. (\d+)' ref_ids = [int(match) for match in re.findall(pattern, res)] # extract result_str = "" # Initialize an empty string to store the result for i in range(len(docs)): doc = docs[i].to_dict() ref_id = doc['meta']['ref_id'] if ref_id in ref_ids: result_str += "**Ref. " + str(ref_id) + " [" + doc['meta']['country'] + " " + doc['meta']['document_name'] + "]:** " + "*'" + doc['content'] + "'*

" # Add
for a line break return result_str # define a special function for putting the prompt together (as we can't use haystack) def get_prompt_llama2(docs, query): base_prompt="Answer the given question using the following documents. \ Formulate your answer in the style of an academic report. \ Provide example quotes and citations using extracted text from the documents. \ Use facts and numbers from the documents in your answer. \ ALWAYS include references for information used from documents at the end of each applicable sentence using the format: '[ref. #]', where '[ref. #]' is included in the text provided at the start of each document (demarcated by the pattern '- &&& [ref. #] document_name &&&:')'. \ Do not include page numbers in the references. \ If no relevant information to answer the question is present in the documents, just say you don't have enough information to answer." # Add the meta data for references context = ' - '.join(['&&& [ref. '+str(d.meta['ref_id'])+'] '+d.meta['document_name']+' &&&: '+d.content for d in docs]) prompt = base_prompt+"; Context: "+context+"; Question: "+query+"; Answer:" return(prompt) def run_query(input_text, country, model_sel): docs = get_docs(input_text, country) # st.write('Selected country: ', country) # Debugging country if model_sel == "chatGPT": res = pipe.run(query=input_text, documents=docs) output = res["results"][0] references = get_refs(docs, res["results"][0]) else: res = client.text_generation(get_prompt_llama2(docs, query=input_text), max_new_tokens=4000, temperature=0.01, model=model) output = res references = get_refs(docs, res) st.write('Response') st.success(output) st.write('References') st.markdown('References are based on text automatically extracted from climate policy documents. These extracts may contain non-legible characters or disjointed text as an artifact of the extraction procedure') st.markdown(references, unsafe_allow_html=True) # Setup retriever, pulling from local faiss datastore retriever = EmbeddingRetriever( document_store=FAISSDocumentStore.load( index_path="./cpv_full_southern_africa_kenya.faiss", config_path="./cpv_full_southern_africa_kenya.json", ), embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1", model_format="sentence_transformers", progress_bar=False, ) with st.sidebar: # Dropdown selectbox country = st.sidebar.selectbox('Select a country:', country_options) # choice = st.sidebar.radio(label = 'Select the Document', # help = 'You can upload the document \ # or else you can try a example document', # options = ('Upload Document', 'Try Example'), # horizontal = True) with st.container(): st.markdown("

Climate Policy Documents: Vulnerabilities Analysis Q&A

", unsafe_allow_html=True) st.write(' ') with st.expander("ℹ️ - About this app", expanded=False): st.write( """ This tool seeks to provide an interface for quering national climate policy documents (NDCs, LTS etc.). The current version is powered by chatGPT (3.5). The document store is limited to 10 Southern African countries (Angola, Botswana, Eswatini, Lesotho, Malawi, Mozambique, Namibia, South Africa, Zambia, Zimbabwe), as well as Kenya and Rwanda. The intended use case is to allow users to interact with the documents and obtain valuable insights on various vulnerable groups affected by climate change. **DISCLAIMER:** *This prototype tool based on LLMs (Language Models) is provided "as is" for experimental and exploratory purposes only, and should not be used for critical or production applications. Users are advised that the tool may contain errors, bugs, or limitations and should be used with caution and awareness of potential risks, and the developers make no warranties or guarantees regarding its performance, reliability, or suitability for any specific purpose.* """) # Display the text passages as radio buttons selected_example = st.radio("Example questions", examples) # Dropdown selectbox: model # model_sel = st.selectbox('Select an LLM:', model_options) model_sel = "chatGPT" #----Model Select logic------- if model_sel == "chatGPT": model_name = "gpt-3.5-turbo" # Initialize the PromptNode pn = PromptNode(model_name_or_path=model_name, default_prompt_template=template, api_key=openai_key, max_length=2000, model_kwargs={"generation_kwargs": {"do_sample": False, "temperature": 0}}) # Initialize the pipeline pipe = Pipeline() pipe.add_node(component=pn, name="prompt_node", inputs=["Query"]) else: # Currently disabled model = "meta-llama/Llama-2-70b-chat-hf" # Instantiate the inference client client = InferenceClient() if selected_example == "-": text = st.text_area('Enter your question in the text box below using natural language or select an example from above:') else: text = st.text_area('Enter your question in the text box below using natural language or select an example from above:', value=selected_example) if st.button('Submit'): run_query(text, country=country, model_sel=model_sel)