Docfile commited on
Commit
8c09efe
·
verified ·
1 Parent(s): 9bcb187

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -172
app.py CHANGED
@@ -1,177 +1,81 @@
1
- import streamlit as st
2
- import google.generativeai as genai
 
 
3
  import os
4
  from dotenv import load_dotenv
5
- import http.client
6
- import json
7
- import uuid
8
- import requests # pour envoyer le fichier sur Telegram
9
 
10
- load_dotenv()
11
 
 
 
 
 
 
12
  # Configure l'API de Gemini
13
- genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
14
-
15
- # Récupère les variables d'environnement pour Telegram
16
- TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
17
- TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID")
18
-
19
- safety_settings = [
20
- {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
21
- {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
22
- {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
23
- {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
24
- ]
25
-
26
- model = genai.GenerativeModel('gemini-2.0-flash-exp', tools='code_execution',
27
- safety_settings=safety_settings,
28
- system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
29
-
30
- def perform_web_search(query):
31
- conn = http.client.HTTPSConnection("google.serper.dev")
32
- payload = json.dumps({"q": query})
33
- headers = {
34
- 'X-API-KEY': '9b90a274d9e704ff5b21c0367f9ae1161779b573',
35
- 'Content-Type': 'application/json'
36
- }
37
- try:
38
- conn.request("POST", "/search", payload, headers)
39
- res = conn.getresponse()
40
- data = json.loads(res.read().decode("utf-8"))
41
- return data
42
- except Exception as e:
43
- st.error(f"Erreur lors de la recherche web : {e}")
44
- return None
45
- finally:
46
- conn.close()
47
-
48
- def format_search_results(data):
49
- if not data:
50
- return "Aucun résultat trouvé"
51
-
52
- result = ""
53
-
54
- # Knowledge Graph
55
- if 'knowledgeGraph' in data:
56
- kg = data['knowledgeGraph']
57
- result += f"### {kg.get('title', '')}\n"
58
- result += f"*{kg.get('type', '')}*\n\n"
59
- result += f"{kg.get('description', '')}\n\n"
60
-
61
- # Organic Results
62
- if 'organic' in data:
63
- result += "### Résultats principaux:\n"
64
- for item in data['organic'][:3]:
65
- result += f"- **{item['title']}**\n"
66
- result += f" {item['snippet']}\n"
67
- result += f" [Lien]({item['link']})\n\n"
68
-
69
- # People Also Ask
70
- if 'peopleAlsoAsk' in data:
71
- result += "### Questions fréquentes:\n"
72
- for item in data['peopleAlsoAsk'][:2]:
73
- result += f"- **{item['question']}**\n"
74
- result += f" {item['snippet']}\n\n"
75
-
76
- return result
77
-
78
- def role_to_streamlit(role):
79
- return "assistant" if role == "model" else role
80
-
81
- # --- Gestion de la session et sauvegarde de l'historique ---
82
-
83
- if "session_id" not in st.session_state:
84
- st.session_state.session_id = str(uuid.uuid4())
85
-
86
- def save_chat_history():
87
- history_data = []
88
- for message in st.session_state.chat.history:
89
- history_data.append({
90
- "role": message.role,
91
- "text": message.parts[0].text
92
- })
93
- file_name = f"chat_history_{st.session_state.session_id}.json"
94
- with open(file_name, "w", encoding="utf-8") as f:
95
- json.dump(history_data, f, ensure_ascii=False, indent=4)
96
- return file_name # On retourne le nom du fichier pour pouvoir l'envoyer ensuite
97
-
98
- def send_file_to_telegram(file_path):
99
- if TELEGRAM_BOT_TOKEN is None or TELEGRAM_CHAT_ID is None:
100
- st.error("Les variables d'environnement TELEGRAM_BOT_TOKEN ou TELEGRAM_CHAT_ID ne sont pas définies.")
101
- return
102
-
103
- url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendDocument"
104
- try:
105
- with open(file_path, "rb") as file:
106
- files = {"document": file}
107
- data = {"chat_id": TELEGRAM_CHAT_ID, "caption": "Historique de chat"}
108
- response = requests.post(url, data=data, files=files)
109
- if response.status_code == 200:
110
- st.success("Fichier envoyé sur Telegram avec succès!")
111
- else:
112
- st.error(f"Erreur lors de l'envoi sur Telegram: {response.text}")
113
- except Exception as e:
114
- st.error(f"Erreur lors de l'ouverture du fichier ou l'envoi: {e}")
115
-
116
- if "chat" not in st.session_state:
117
- st.session_state.chat = model.start_chat(history=[])
118
- if "web_search" not in st.session_state:
119
- st.session_state.web_search = False
120
-
121
- st.title("Mariam AI!")
122
-
123
- with st.sidebar:
124
- st.title("Paramètres")
125
- st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search)
126
-
127
- uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
128
-
129
- for message in st.session_state.chat.history:
130
- with st.chat_message(role_to_streamlit(message.role)):
131
- st.markdown(message.parts[0].text)
132
-
133
- def process_uploaded_file(file):
134
- if file is not None:
135
- os.makedirs("temp", exist_ok=True)
136
- temp_file_path = os.path.join("temp", file.name)
137
- with open(temp_file_path, "wb") as f:
138
- f.write(file.getbuffer())
139
- try:
140
- gemini_file = genai.upload_file(temp_file_path)
141
- return gemini_file
142
- except Exception as e:
143
- st.error(f"Erreur lors du téléchargement du fichier : {e}")
144
- return None
145
-
146
- if prompt := st.chat_input("Hey?"):
147
- uploaded_gemini_file = None
148
- if uploaded_file:
149
- uploaded_gemini_file = process_uploaded_file(uploaded_file)
150
-
151
- st.chat_message("user").markdown(prompt)
152
-
153
- try:
154
- web_results = None
155
- if st.session_state.web_search:
156
- with st.spinner("Recherche web en cours..."):
157
- web_results = perform_web_search(prompt)
158
- if web_results:
159
- formatted_results = format_search_results(web_results)
160
- prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
161
-
162
- if uploaded_gemini_file:
163
- response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
164
- else:
165
- response = st.session_state.chat.send_message(prompt)
166
-
167
- with st.chat_message("assistant"):
168
- st.markdown(response.text)
169
-
170
- # Sauvegarde de l'historique et récupération du nom de fichier
171
- file_name = save_chat_history()
172
-
173
- # Optionnel : envoyer le fichier sur Telegram
174
- send_file_to_telegram(file_name)
175
-
176
- except Exception as e:
177
- st.error(f"Erreur lors de l'envoi du message : {e}")
 
1
+ from langchain_community.chat_message_histories import StreamlitChatMessageHistory
2
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
3
+ from langchain_core.runnables.history import RunnableWithMessageHistory
4
+ from langchain_google_genai import GoogleGenerativeAIimport
5
  import os
6
  from dotenv import load_dotenv
 
 
 
 
7
 
 
8
 
9
+ import streamlit as st
10
+
11
+
12
+ load_dotenv()
13
+ api_key=os.getenv("GOOGLE_API_KEY")
14
  # Configure l'API de Gemini
15
+ #llm = GoogleGenerativeAI(model="models/text-bison-001", google_api_key=api_key)
16
+ #genai.configure(api_key=api_key)
17
+
18
+
19
+ st.set_page_config(page_title="StreamlitChatMessageHistory", page_icon="📖")
20
+ st.title("📖 StreamlitChatMessageHistory")
21
+
22
+ """
23
+ A basic example of using StreamlitChatMessageHistory to help LLMChain remember messages in a conversation.
24
+ The messages are stored in Session State across re-runs automatically. You can view the contents of Session State
25
+ in the expander below. View the
26
+ [source code for this app](https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/basic_memory.py).
27
+ """
28
+
29
+ # Set up memory
30
+ msgs = StreamlitChatMessageHistory(key="langchain_messages")
31
+ if len(msgs.messages) == 0:
32
+ msgs.add_ai_message("How can I help you?")
33
+
34
+ view_messages = st.expander("View the message contents in session state")
35
+
36
+
37
+
38
+ # Set up the LangChain, passing in Message History
39
+
40
+ prompt = ChatPromptTemplate.from_messages(
41
+ [
42
+ ("system", "You are an AI chatbot having a conversation with a human."),
43
+ MessagesPlaceholder(variable_name="history"),
44
+ ("human", "{question}"),
45
+ ]
46
+ )
47
+
48
+ chain = prompt | GoogleGenerativeAI(model="models/gemini-2.0-flash-exp", google_api_key=api_key)
49
+
50
+
51
+
52
+ chain_with_history = RunnableWithMessageHistory(
53
+ chain,
54
+ lambda session_id: msgs,
55
+ input_messages_key="question",
56
+ history_messages_key="history",
57
+ )
58
+
59
+ # Render current messages from StreamlitChatMessageHistory
60
+ for msg in msgs.messages:
61
+ st.chat_message(msg.type).write(msg.content)
62
+
63
+ # If user inputs a new prompt, generate and draw a new response
64
+ if prompt := st.chat_input():
65
+ st.chat_message("human").write(prompt)
66
+ # Note: new messages are saved to history automatically by Langchain during run
67
+ config = {"configurable": {"session_id": "any"}}
68
+ response = chain_with_history.invoke({"question": prompt}, config)
69
+ st.chat_message("ai").write(response.content)
70
+
71
+ # Draw the messages at the end, so newly generated ones show up immediately
72
+ with view_messages:
73
+ """
74
+ Message History initialized with:
75
+ ```python
76
+ msgs = StreamlitChatMessageHistory(key="langchain_messages")
77
+ ```
78
+
79
+ Contents of `st.session_state.langchain_messages`:
80
+ """
81
+ view_messages.json(st.session_state.langchain_messages)