import os import psycopg2 import streamlit as st from datetime import datetime from langchain_community.vectorstores import FAISS from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_huggingface import HuggingFaceEndpoint from langchain.prompts import PromptTemplate from langchain.chains import LLMChain, RetrievalQA from huggingface_hub import login # Login to Hugging Face login(token=st.secrets["HF_TOKEN"]) # Load FAISS index and ensure it only happens once if 'db' not in st.session_state: st.session_state.db = FAISS.load_local( "faiss_index", HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'), allow_dangerous_deserialization=True ) # Use session state for retriever retriever = st.session_state.db.as_retriever( search_type="mmr", search_kwargs={'k': 1} ) # Define prompt template prompt_template = """ ### [INST] Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello your only answer will be Hi! comment puis-je vous aider? Answer in french only {context} Vous devez répondre aux questions en français. ### QUESTION: {question} [/INST] Answer in french only Vous devez répondre aux questions en français. """ repo_id = "mistralai/Mistral-7B-Instruct-v0.3" # Load the model only once if 'mistral_llm' not in st.session_state: st.session_state.mistral_llm = HuggingFaceEndpoint( repo_id=repo_id, max_length=2048, temperature=0.05, huggingfacehub_api_token=st.secrets["HF_TOKEN"] ) # Create prompt and LLM chain prompt = PromptTemplate( input_variables=["question"], template=prompt_template, ) llm_chain = LLMChain(llm=st.session_state.mistral_llm, prompt=prompt) # Create QA chain qa = RetrievalQA.from_chain_type( llm=st.session_state.mistral_llm, chain_type="stuff", retriever=retriever, chain_type_kwargs={"prompt": prompt}, ) def chatbot_response(user_input): response = qa.run(user_input) return response # Create columns for logos col1, col2, col3 = st.columns([2, 3, 2]) with col1: st.image("Design 3_22.png", width=150, use_column_width=True) with col3: st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True) st.markdown('
"Votre Réponse à Chaque Défi Méthodologique "
', unsafe_allow_html=True) # Input and button for user interaction user_input = st.text_input("You:", "") submit_button = st.button("Ask 📨") import os import streamlit as st import psycopg2 from datetime import datetime # Function to create a connection to PostgreSQL def create_connection(): return psycopg2.connect( host=os.getenv("DB_HOST"), database=os.getenv("DB_NAME"), user=os.getenv("DB_USER"), password=os.getenv("DB_PASSWORD"), port=os.getenv("DB_PORT") ) # Function to create the feedback table if it doesn't exist def create_feedback_table(conn): cursor = conn.cursor() cursor.execute(""" CREATE TABLE IF NOT EXISTS feedback ( id SERIAL PRIMARY KEY, user_input TEXT NOT NULL, bot_response TEXT NOT NULL, rating INT CHECK (rating >= 1 AND rating <= 5), comment TEXT, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); """) conn.commit() cursor.close() # Function to insert feedback into the database def insert_feedback(conn, user_input, bot_response, rating, comment): cursor = conn.cursor() cursor.execute( "INSERT INTO feedback (user_input, bot_response, rating, comment, timestamp) VALUES (%s, %s, %s, %s, %s)", (user_input, bot_response, rating, comment, datetime.now()) ) conn.commit() cursor.close() # Initialize connection and create the table if necessary conn = create_connection() create_feedback_table(conn) # Streamlit app UI and logic st.markdown("## Rate your experience") # Create a star-based rating system using radio buttons rating = st.radio( "Rating", options=[1, 2, 3, 4, 5], format_func=lambda x: "★" * x # Display stars based on the rating ) # Text area for leaving a comment comment = st.text_area("Leave a comment") # Display bot response and user input for context st.markdown("### Your Question:") st.write(user_input) st.markdown("### Bot's Response:") st.write(bot_response) # Submit feedback if st.button("Submit Feedback"): if rating and comment: insert_feedback(conn, user_input, bot_response, rating, comment) st.success("Thank you for your feedback!") else: st.warning("Please provide a rating and a comment.") # Close the connection when done conn.close() # Motivational quote at the bottom st.markdown("---") st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.")