|
import streamlit as st |
|
import os |
|
import pandas as pd |
|
import random |
|
from os.path import join |
|
from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question |
|
from dotenv import load_dotenv |
|
from langchain_groq.chat_models import ChatGroq |
|
|
|
load_dotenv("Groq.txt") |
|
Groq_Token = os.environ["GROQ_API_KEY"] |
|
models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"} |
|
|
|
self_path = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
st.write( |
|
""" |
|
<style> |
|
.title { |
|
text-align: center; |
|
color: #17becf; |
|
} |
|
""", |
|
unsafe_allow_html=True, |
|
) |
|
|
|
|
|
st.markdown("<h2 class='title'>VayuBuddy</h2>", unsafe_allow_html=True) |
|
st.markdown("<div style='text-align:center; padding: 20px;'>VayuBuddy makes pollution monitoring easier by bridging the gap between users and datasets.<br>No coding required—just meaningful insights at your fingertips!</div>", unsafe_allow_html=True) |
|
|
|
|
|
st.markdown("<div style='text-align:center;'>Choose a query from <b>Select a prompt</b> or type a query in the <b>chat box</b>, select a <b>LLM</b> (Large Language Model), and press enter to generate a response.</div>", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "gemma"]) |
|
|
|
questions = ('Custom Prompt', |
|
'Plot the monthly average PM2.5 for the year 2023.', |
|
'Which month in which year has the highest average PM2.5 overall?', |
|
'Which month in which year has the highest PM2.5 overall?', |
|
'Which month has the highest average PM2.5 in 2023 for Mumbai?', |
|
'Plot and compare monthly timeseries of pollution for Mumbai and Bengaluru.', |
|
'Plot the yearly average PM2.5.', |
|
'Plot the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.', |
|
'Which month has the highest pollution?', |
|
'Which city has the highest PM2.5 level in July 2022?', |
|
'Plot and compare monthly timeseries of PM2.5 for Mumbai and Bengaluru.', |
|
'Plot and compare the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.', |
|
'Plot the monthly average PM2.5.', |
|
'Plot the monthly average PM10 for the year 2023.', |
|
'Which (month, year) has the highest PM2.5?', |
|
'Plot the monthly average PM2.5 of Delhi for the year 2022.', |
|
'Plot the monthly average PM2.5 of Bengaluru for the year 2022.', |
|
'Plot the monthly average PM2.5 of Mumbai for the year 2022.', |
|
'Which state has the highest average PM2.5?', |
|
'Plot monthly PM2.5 in Gujarat for 2023.', |
|
'What is the name of the month with the highest average PM2.5 overall?') |
|
|
|
waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...") |
|
|
|
|
|
|
|
|
|
if "responses" not in st.session_state: |
|
st.session_state.responses = [] |
|
|
|
|
|
for response in st.session_state.responses: |
|
if not response["no_response"]: |
|
show_response(st, response) |
|
|
|
show = True |
|
|
|
if prompt := st.sidebar.selectbox("Select a Prompt:", questions): |
|
|
|
|
|
st.sidebar.info("Select 'Custom Prompt' to ask your own question.") |
|
|
|
if prompt == 'Custom Prompt': |
|
show = False |
|
|
|
prompt = st.chat_input("Ask me anything about air quality!", key=10) |
|
if prompt : show = True |
|
if show : |
|
|
|
|
|
response = get_from_user(prompt) |
|
response["no_response"] = False |
|
st.session_state.responses.append(response) |
|
|
|
|
|
show_response(st, response) |
|
|
|
no_response = False |
|
|
|
|
|
with st.spinner(random.choice(waiting_lines)): |
|
ran = False |
|
for i in range(1): |
|
print(f"Attempt {i+1}") |
|
llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0) |
|
|
|
df_check = pd.read_csv("Data.csv") |
|
df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"]) |
|
df_check = df_check.head(5) |
|
|
|
new_line = "\n" |
|
|
|
parameters = {"font.size": 12} |
|
|
|
template = f"""```python |
|
import pandas as pd |
|
import matplotlib.pyplot as plt |
|
|
|
# plt.rcParams.update({parameters}) |
|
|
|
df = pd.read_csv("Data.csv") |
|
df["Timestamp"] = pd.to_datetime(df["Timestamp"]) |
|
|
|
import geopandas as gpd |
|
india = gpd.read_file("https://gist.githubusercontent.com/jbrobst/56c13bbbf9d97d187fea01ca62ea5112/raw/e388c4cae20aa53cb5090210a42ebb9b765c0a36/india_states.geojson") |
|
|
|
# df.dtypes |
|
{new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))} |
|
|
|
# {prompt.strip()} |
|
# <your code here> |
|
``` |
|
""" |
|
|
|
query = f"""I have a pandas dataframe data of PM2.5 and PM10. |
|
* The columns are 'Timestamp', 'station', 'PM2.5', 'PM10', 'address', 'city', 'latitude', 'longitude',and 'state'. |
|
* Frequency of data is daily. |
|
* `pollution` generally means `PM2.5`. |
|
* You already have df, so don't read the csv file |
|
* Don't print anything, but save result in a variable `answer` and make it global. |
|
* Unless explicitly mentioned, don't consider the result as a plot. |
|
* PM2.5 guidelines: India: 60, WHO: 15. |
|
* PM10 guidelines: India: 100, WHO: 50. |
|
* If result is a plot, show the India and WHO guidelines in the plot. |
|
* If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'` |
|
* If result is a plot, rotate x-axis tick labels by 45 degrees, |
|
* If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'` |
|
* I have a geopandas.geodataframe india containining the coordinates required to plot Indian Map with states. |
|
* If the query asks you to plot on India Map, use that geodataframe to plot and then add more points as per the requirements using the similar code as follows : v = ax.scatter(df['longitude'], df['latitude']). If the colorbar is required, use the following code : plt.colorbar(v) |
|
* If the query asks you to plot on India Map plot the India Map in Beige color |
|
* Whenever you do any sort of aggregation, report the corresponding standard deviation, standard error and the number of data points for that aggregation. |
|
* Whenever you're reporting a floating point number, round it to 2 decimal places. |
|
* Always report the unit of the data. Example: `The average PM2.5 is 45.67 µg/m³` |
|
|
|
Complete the following code. |
|
|
|
{template} |
|
|
|
""" |
|
answer = None |
|
code = None |
|
try: |
|
answer = llm.invoke(query) |
|
code = f""" |
|
{template.split("```python")[1].split("```")[0]} |
|
{answer.content.split("```python")[1].split("```")[0]} |
|
""" |
|
|
|
exec(code) |
|
ran = True |
|
no_response = False |
|
except Exception as e: |
|
no_response = True |
|
exception = e |
|
if code is not None: |
|
answer = f"!!!Faced an error while working on your query. Please try again!!!" |
|
|
|
if type(answer) != str: |
|
answer = f"!!!Faced an error while working on your query. Please try again!!!" |
|
|
|
response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "no_response": no_response} |
|
|
|
|
|
|
|
|
|
|
|
if ran: |
|
break |
|
|
|
|
|
if code is not None: |
|
|
|
print("Adding response") |
|
|
|
st.session_state.responses.append(response) |
|
show_response(st, response) |
|
|
|
user_comment = st.text_input("Enter your comment:",key = 501) |
|
|
|
check = st.button("Submit",key = 301) |
|
|
|
with open("user_comments.txt", "a") as file: |
|
if check: |
|
|
|
file.write(user_comment + "\n") |
|
st.success("Comment submitted successfully!") |
|
|
|
if no_response: |
|
print("No response") |
|
st.error(f"Failed to generate right output due to the following error:\n\n{exception}") |
|
|
|
|
|
|
|
prompt = 'Custom Prompt' |
|
|