|
import streamlit as st |
|
import os |
|
import json |
|
import pandas as pd |
|
import random |
|
from os.path import join |
|
from datetime import datetime |
|
from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question |
|
from dotenv import load_dotenv |
|
from langchain_groq.chat_models import ChatGroq |
|
from langchain_google_genai import GoogleGenerativeAI |
|
from streamlit_feedback import streamlit_feedback |
|
from huggingface_hub import HfApi |
|
st.set_page_config(layout="wide") |
|
|
|
|
|
load_dotenv() |
|
Groq_Token = os.environ["GROQ_API_KEY"] |
|
hf_token = os.environ["HF_TOKEN"] |
|
gemini_token = os.environ["GEMINI_TOKEN"] |
|
models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it", "gemini-pro": "gemini-pro"} |
|
|
|
self_path = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
|
|
st.write( |
|
""" |
|
<style> |
|
.title { |
|
text-align: center; |
|
color: #17becf; |
|
} |
|
</style> |
|
""", |
|
unsafe_allow_html=True, |
|
) |
|
|
|
|
|
st.markdown("<div style='text-align:center; padding: 20px;'>VayuBuddy makes pollution monitoring easier by bridging the gap between users and datasets.<br>No coding required—just meaningful insights at your fingertips!</div>", unsafe_allow_html=True) |
|
|
|
|
|
st.markdown("<div style='text-align:center;'>Choose a query from <b>Select a prompt</b> or type a query in the <b>chat box</b>, select a <b>LLM</b> (Large Language Model), and press enter to generate a response.</div>", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
image_path = "IITGN_Logo.png" |
|
|
|
|
|
col1, col2, col3 = st.sidebar.columns((1.0, 2, 1.0)) |
|
with col2: |
|
st.image(image_path, use_column_width=True) |
|
st.markdown("<h1 class='title'>VayuBuddy</h1>", unsafe_allow_html=True) |
|
|
|
|
|
model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "gemma", "gemini-pro"]) |
|
|
|
questions = ['Custom Prompt'] |
|
with open(join(self_path, "questions.txt")) as f: |
|
questions += f.read().split("\n") |
|
|
|
waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...") |
|
|
|
|
|
|
|
|
|
if "responses" not in st.session_state: |
|
st.session_state.responses = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def upload_feedback(): |
|
print("Uploading feedback") |
|
data = { |
|
"feedback": feedback['score'], |
|
"comment": feedback['text'], "error": error, "output": output, "prompt": last_prompt, "code": code} |
|
|
|
|
|
random_folder_name = str(datetime.now()).replace(" ", "_").replace(":", "-").replace(".", "-") |
|
print("Random folder:", random_folder_name) |
|
save_path = f"/tmp/vayubuddy_feedback.md" |
|
path_in_repo = f"data/{random_folder_name}/feedback.md" |
|
with open(save_path, "w") as f: |
|
template = f"""Prompt: {last_prompt} |
|
|
|
Output: {output} |
|
|
|
Code: |
|
|
|
```py |
|
{code} |
|
``` |
|
|
|
Error: {error} |
|
|
|
Feedback: {feedback['score']} |
|
|
|
Comments: {feedback['text']} |
|
""" |
|
|
|
print(template, file=f) |
|
|
|
api = HfApi(token=hf_token) |
|
api.upload_file( |
|
path_or_fileobj=save_path, |
|
path_in_repo=path_in_repo, |
|
repo_id="SustainabilityLabIITGN/VayuBuddy_Feedback", |
|
repo_type="dataset", |
|
) |
|
if status['is_image']: |
|
api.upload_file( |
|
path_or_fileobj=output, |
|
path_in_repo=f"data/{random_folder_name}/plot.png", |
|
repo_id="SustainabilityLabIITGN/VayuBuddy_Feedback", |
|
repo_type="dataset", |
|
) |
|
|
|
print("Feedback uploaded successfully!") |
|
|
|
|
|
print("#"*10) |
|
for response_id, response in enumerate(st.session_state.responses): |
|
status = show_response(st, response) |
|
if response["role"] == "assistant": |
|
feedback_key = f"feedback_{int(response_id/2)}" |
|
print("response_id", response_id, "feedback_key", feedback_key) |
|
|
|
error = response["error"] |
|
output = response["content"] |
|
last_prompt = response["last_prompt"] |
|
code = response["gen_code"] |
|
|
|
if "feedback" in st.session_state.responses[response_id]: |
|
st.write("Feedback:", st.session_state.responses[response_id]["feedback"]) |
|
else: |
|
|
|
|
|
|
|
|
|
|
|
thumbs = st.radio("We would appreciate your feedback!", ('👍', '👎'), index=None, key=feedback_key) |
|
|
|
if thumbs: |
|
|
|
comments = st.text_area("[Optional] Please provide extra information", key=feedback_key+"_comments") |
|
feedback = {"score": thumbs, "text": comments} |
|
if st.button("Submit", on_click=upload_feedback, key=feedback_key+"_submit"): |
|
st.session_state.responses[response_id]["feedback"] = feedback |
|
st.success("Feedback uploaded successfully!") |
|
|
|
|
|
print("#"*10) |
|
|
|
show = True |
|
prompt = st.sidebar.selectbox("Select a Prompt:", questions, key="prompt_key") |
|
if prompt == 'Custom Prompt': |
|
show = False |
|
|
|
prompt = st.chat_input("Ask me anything about air quality!", key=1000) |
|
if prompt : |
|
show = True |
|
else: |
|
|
|
st.chat_input("Select 'Select a Prompt' -> 'Custom Prompt' in the sidebar to ask your own questions.", key=1000, disabled=True) |
|
|
|
if "last_prompt" in st.session_state: |
|
last_prompt = st.session_state["last_prompt"] |
|
last_model_name = st.session_state["last_model_name"] |
|
if (prompt == last_prompt) and (model_name == last_model_name): |
|
show = False |
|
|
|
if prompt: |
|
st.sidebar.info("Select 'Custom Prompt' to ask your own questions.") |
|
|
|
if show: |
|
|
|
user_response = get_from_user(prompt) |
|
st.session_state.responses.append(user_response) |
|
|
|
|
|
with st.spinner(random.choice(waiting_lines)): |
|
ran = False |
|
for i in range(1): |
|
print(f"Attempt {i+1}") |
|
if model_name == "gemini-pro": |
|
llm = GoogleGenerativeAI(model=models[model_name], google_api_key=os.getenv("GEMINI_TOKEN"), temperature=0) |
|
else: |
|
llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0) |
|
|
|
df_check = pd.read_csv("Data.csv") |
|
df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"]) |
|
df_check = df_check.head(5) |
|
|
|
new_line = "\n" |
|
|
|
parameters = {"font.size": 12,"figure.dpi": 600} |
|
|
|
template = f"""```python |
|
import pandas as pd |
|
import matplotlib.pyplot as plt |
|
|
|
plt.rcParams.update({parameters}) |
|
|
|
df = pd.read_csv("Data.csv") |
|
df["Timestamp"] = pd.to_datetime(df["Timestamp"]) |
|
|
|
import geopandas as gpd |
|
india = gpd.read_file("https://gist.githubusercontent.com/jbrobst/56c13bbbf9d97d187fea01ca62ea5112/raw/e388c4cae20aa53cb5090210a42ebb9b765c0a36/india_states.geojson") |
|
india.loc[india['ST_NM'].isin(['Ladakh', 'Jammu & Kashmir']), 'ST_NM'] = 'Jammu and Kashmir' |
|
import uuid |
|
# df.dtypes |
|
{new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))} |
|
|
|
{new_line.join(['# '+line for line in prompt.strip().split(new_line)])} |
|
""" |
|
query = f"""I have a pandas dataframe data of PM2.5 and PM10. |
|
* The columns are 'Timestamp', 'station', 'PM2.5', 'PM10', 'address', 'city', 'latitude', 'longitude',and 'state'. |
|
* Frequency of data is daily. |
|
* `pollution` generally means `PM2.5`. |
|
* You already have df, so don't read the csv file |
|
* Don't print anything, but save result in a variable `answer` and make it global. |
|
* Unless explicitly mentioned, don't consider the result as a plot. |
|
* PM2.5 guidelines: India: 60, WHO: 15. |
|
* PM10 guidelines: India: 100, WHO: 50. |
|
* If result is a plot, show the India and WHO guidelines in the plot. |
|
* If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'`. Use uuid to save the plot. |
|
* If result is a plot, rotate x-axis tick labels by 45 degrees, |
|
* If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'` |
|
* I have a geopandas.geodataframe india containining the coordinates required to plot Indian Map with states. |
|
* If the query asks you to plot on India Map, use that geodataframe to plot and then add more points as per the requirements using the similar code as follows : v = ax.scatter(df['longitude'], df['latitude']). If the colorbar is required, use the following code : plt.colorbar(v) |
|
* If the query asks you to plot on India Map plot the India Map in Beige color |
|
* Whenever you do any sort of aggregation, report the corresponding standard deviation, standard error and the number of data points for that aggregation. |
|
* Whenever you're reporting a floating point number, round it to 2 decimal places. |
|
* Always report the unit of the data. Example: `The average PM2.5 is 45.67 µg/m³` |
|
|
|
Complete the following code. |
|
|
|
{template} |
|
|
|
""" |
|
|
|
answer = None |
|
code = None |
|
error = None |
|
try: |
|
if model_name == "gemini-pro": |
|
answer = llm.invoke(query) |
|
else: |
|
answer = llm.invoke(query).content |
|
code = f""" |
|
{template.split("```python")[1].split("```")[0]} |
|
{answer.split("```python")[1].split("```")[0]} |
|
""" |
|
|
|
exec(code) |
|
ran = True |
|
except Exception as e: |
|
error = e |
|
if code is not None: |
|
answer = f"Error executing the code...\n\n{e}" |
|
|
|
if type(answer) != str: |
|
answer = f"!!!Faced an error while working on your query. Please try again!!!" |
|
|
|
response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "error": error} |
|
|
|
|
|
|
|
|
|
|
|
if ran: |
|
break |
|
|
|
|
|
st.session_state.responses.append(response) |
|
|
|
st.session_state['last_prompt'] = prompt |
|
st.session_state['last_model_name'] = model_name |
|
st.rerun() |
|
|
|
|
|
|
|
contact_details = """ |
|
**Feel free to reach out to us:** |
|
- [Yash J Bachwana](mailto:yash.bachwana@iitgn.ac.in) |
|
(Lead Developer, IIT Gandhinagar) |
|
- [Zeel B Patel](https://patel-zeel.github.io/) |
|
(PhD Student, IIT Gandhinagar) |
|
- [Nipun Batra](https://nipunbatra.github.io/) |
|
(Faculty, IIT Gandhinagar) |
|
""" |
|
|
|
|
|
|
|
st.sidebar.markdown("<hr>", unsafe_allow_html=True) |
|
st.sidebar.markdown(contact_details, unsafe_allow_html=True) |
|
|
|
|
|
st.markdown( |
|
""" |
|
<style> |
|
.sidebar .sidebar-content { |
|
position: sticky; |
|
top: 0; |
|
height: 100vh; |
|
overflow-y: auto; |
|
overflow-x: hidden; |
|
} |
|
</style> |
|
""", |
|
unsafe_allow_html=True |
|
) |