import streamlit as st
import os
import pkg_resources
# Using this wacky hack to get around the massively ridicolous managed env loading order
def is_installed(package_name, version):
try:
pkg = pkg_resources.get_distribution(package_name)
return pkg.version == version
except pkg_resources.DistributionNotFound:
return False
@st.cache_resource
def install_packages():
install_commands = []
if not is_installed("spaces", "0.12.0"):
install_commands.append("pip install spaces==0.12.0")
if not is_installed("pydantic", "1.8.2"):
install_commands.append("pip install pydantic==1.8.2")
if install_commands:
os.system(" && ".join(install_commands))
# install packages if necessary
install_packages()
import re
import json
from dotenv import load_dotenv
import numpy as np
import pandas as pd
from haystack.schema import Document
from haystack.document_stores import PineconeDocumentStore
from haystack.nodes import EmbeddingRetriever
import openai
# for local st testing, may need to run source ~/.zshrc to point to env vars
# Get openai API key
openai.api_key = os.environ["OPENAI_API_KEY"]
# Get openai API key
pinecone_key = os.environ["PINECONE_API_KEY"]
#___________________________________________________________________________________________________________
# @st.cache_resource
# def get_document_store():
# doc_file_name="cpv_full_southern_africa"
# document_store = PineconeDocumentStore(api_key=pinecone_key,
# environment="asia-southeast1-gcp-free",
# index=doc_file_name)
# return document_store
# # Get (or initialize and get) the document store
# document_store = get_document_store()
@st.cache_resource
def get_retriever():
doc_file_name="cpv_full_southern_africa"
document_store = PineconeDocumentStore(api_key=pinecone_key,
environment="asia-southeast1-gcp-free",
index=doc_file_name)
retriever = EmbeddingRetriever(
document_store=document_store,
embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1",
model_format="sentence_transformers",
progress_bar=False,
)
return retriever
retriever = get_retriever()
# # Instantiate retriever
# retriever = EmbeddingRetriever(
# document_store=document_store,
# embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1",
# model_format="sentence_transformers",
# progress_bar=False,
# )
prompt_template="Answer the given question using the following documents. \
Formulate your answer in the style of an academic report. \
Provide example quotes and citations using extracted text from the documents. \
Use facts and numbers from the documents in your answer. \
ALWAYS include references for information used from documents at the end of each applicable sentence using the format: '[ref. #]', where '[ref. #]' is included in the text provided at the start of each document (demarcated by the pattern '- &&& [ref. #] document_name &&&:')'. \
Do not include page numbers in the references. \
If no relevant information to answer the question is present in the documents, just say you don't have enough information to answer."
# Create a list of options for the dropdown
# model_options = ['chatGPT','Llama2']
# Create a list of options for the dropdown
country_options = ['All Countries','Angola','Botswana','Lesotho','Kenya','Malawi','Mozambique','Namibia','Rwanda','South Africa','Zambia','Zimbabwe']
# Create a list of options for the dropdown
vulnerability_options = ['All Categories','Agricultural communities', 'Children', 'Coastal communities', 'Ethnic, racial or other minorities', 'Fishery communities', 'Informal sector workers', 'Members of indigenous and local communities', 'Migrants and displaced persons', 'Older persons', 'Persons living in poverty', 'Persons with disabilities', 'Persons with pre-existing health conditions', 'Residents of drought-prone regions', 'Rural populations', 'Sexual minorities (LGBTQI+)', 'Urban populations', 'Women and other genders','Other']
# List of examples
examples = [
"-",
"What specific initiatives are presented in the context to address the needs of groups such as women and children to the effects climate change?",
"In addition to gender, children, and youth, is there any mention of other groups facing disproportional impacts from climate change due to their geographic location, socio-economic status, age, gender, health, and occupation?"
]
def get_docs(input_query, country = [], vulnerability_cat = []):
if not country:
country = "All Countries"
if not vulnerability_cat:
if country == "All Countries":
filters = None
else:
filters = {'country': {'$in': country}}
else:
if country == "All Countries":
filters = {'vulnerability_cat': {'$in': vulnerability_cat}}
else:
filters = {'country': {'$in': country},'vulnerability_cat': {'$in': vulnerability_cat}}
docs = retriever.retrieve(query=input_query, filters = filters, top_k = 10)
# Break out the key fields and convert to pandas for filtering
docs = [{**x.meta,"score":x.score,"content":x.content} for x in docs]
df_docs = pd.DataFrame(docs)
# Get ourselves an index setup from which to base the source reference number from (in the prompt and matching afterwards)
df_docs = df_docs.reset_index()
df_docs['ref_id'] = df_docs.index + 1 # start the index at 1
# Convert back to Document format
ls_dict = []
# Iterate over df and add relevant fields to the dict object
for index, row in df_docs.iterrows():
# Create a Document object for each row
doc = Document(
row['content'],
meta={'country': row['country'],'document': row['document'], 'page': row['page'], 'file_name': row['file_name'], 'ref_id': row['ref_id'], 'vulnerability_cat': row['vulnerability_cat'], 'score': row['score']}
)
# Append the Document object to the documents list
ls_dict.append(doc)
return ls_dict
def get_refs(docs, res):
'''
Parse response for engineered reference ids (refer to prompt template)
Extract documents using reference ids
'''
res = res.lower() # Convert to lowercase for matching
# This pattern should be returned by gpt3.5
# pattern = r'ref\. (\d+)\]\.'
pattern = r'ref\. (\d+)'
ref_ids = [int(match) for match in re.findall(pattern, res)]
# extract
result_str = "" # Initialize an empty string to store the result
for i in range(len(docs)):
doc = docs[i].to_dict()
ref_id = doc['meta']['ref_id']
if ref_id in ref_ids:
if doc['meta']['document'] == "Supplementary":
result_str += "**Ref. " + str(ref_id) + " [" + doc['meta']['country'] + " " + doc['meta']['document'] + ':' + doc['meta']['file_name'] + ' p' + str(doc['meta']['page']) + '; vulnerabilities: ' + doc['meta']['vulnerability_cat'] + "]:** " + "*'" + doc['content'] + "'*
" # Add
for a line break
else:
result_str += "**Ref. " + str(ref_id) + " [" + doc['meta']['country'] + " " + doc['meta']['document'] + ' p' + str(doc['meta']['page']) + '; vulnerabilities: ' + doc['meta']['vulnerability_cat'] + "]:** " + "*'" + doc['content'] + "'*
" # Add
for a line break
return result_str
# define a special function for putting the prompt together (as we can't use haystack)
def get_prompt(docs, input_query):
base_prompt=prompt_template
# Add the meta data for references
context = ' - '.join(['&&& [ref. '+str(d.meta['ref_id'])+'] '+d.meta['document']+' &&&: '+d.content for d in docs])
prompt = base_prompt+"; Context: "+context+"; Question: "+input_query+"; Answer:"
return(prompt)
def run_query(input_text, country, model_sel):
# first call the retriever function using selected filters
docs = get_docs(input_text, country=country,vulnerability_cat=vulnerabilities_cat)
# model selector (not currently being used)
if model_sel == "chatGPT":
# instantiate ChatCompletion as a generator object (stream is set to True)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": get_prompt(docs, input_text)}], stream=True)
# iterate through the streamed output
report = []
for chunk in response:
# extract the object containing the text (totally different structure when streaming)
chunk_message = chunk['choices'][0]['delta']
# test to make sure there is text in the object (some don't have)
if 'content' in chunk_message:
report.append(chunk_message.content) # extract the message
# add the latest text and merge it with all previous
result = "".join(report).strip()
res_box.success(result) # output to response text box
references = get_refs(docs, result) # extract references from the generated text
# Llama2 selection (was running on HF)
# else:
# res = client.text_generation(get_prompt(docs, query=input_query), max_new_tokens=4000, temperature=0.01, model=model)
# output = res
# references = get_refs(docs, res)
st.markdown("----")
st.markdown('**REFERENCES:**')
st.markdown('References are based on text automatically extracted from climate policy documents. These extracts may contain non-legible characters or disjointed text as an artifact of the extraction procedure')
st.markdown(references, unsafe_allow_html=True)
#___________________________________________________________________________________________________________
# Sidebar (filters)
with st.sidebar:
country = st.sidebar.multiselect('Filter by country:', country_options)
vulnerabilities_cat = st.sidebar.multiselect('Filter by vulnerabilities category:', vulnerability_options)
with st.expander("ℹ️ - About filters", expanded=False):
st.markdown(
"""
* *These selections will filter the data matched against your query*
* *For a comparative analysis of multiple countries or vulnerability categories, select the items you require or select **'All Countries'** or **'All Categories'***
* *Be careful in using the vulnerabilities category filter, as many of the categories are not well represented in the documents. Therefore, this will severly limit the data available for analysis*
"""
)
# Main window title
with st.container():
st.markdown("