|
|
|
|
|
|
|
import gradio as gr |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, pipeline |
|
from datetime import datetime |
|
import os |
|
import json |
|
import logging |
|
from huggingface_hub import login |
|
import requests |
|
from bs4 import BeautifulSoup |
|
from concurrent.futures import ThreadPoolExecutor |
|
import re |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s - %(levelname)s - %(message)s", |
|
handlers=[ |
|
logging.FileHandler("project.log"), |
|
logging.StreamHandler() |
|
] |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
login(token=os.environ["HF_TOKEN"]) |
|
|
|
|
|
|
|
manager_model_name = "meta-llama/Llama-3.1-8B-Instruct" |
|
manager_model = AutoModelForCausalLM.from_pretrained( |
|
manager_model_name, |
|
device_map="auto", |
|
torch_dtype=torch.bfloat16 |
|
) |
|
manager_tokenizer = AutoTokenizer.from_pretrained(manager_model_name) |
|
|
|
|
|
researcher_model_name = "hiieu/Meta-Llama-3-8B-Instruct-function-calling-json-mode" |
|
researcher_model = AutoModelForCausalLM.from_pretrained( |
|
researcher_model_name, |
|
torch_dtype=torch.bfloat16, |
|
device_map="auto", |
|
) |
|
researcher_tokenizer = AutoTokenizer.from_pretrained(researcher_model_name) |
|
|
|
|
|
analyzer_model_name = "jpacifico/Chocolatine-3B-Instruct-DPO-Revised" |
|
analyzer_model = AutoModelForCausalLM.from_pretrained( |
|
analyzer_model_name, |
|
device_map="auto", |
|
torch_dtype=torch.float16 |
|
) |
|
analyzer_tokenizer = AutoTokenizer.from_pretrained(analyzer_model_name) |
|
|
|
|
|
coder_model_name = "Qwen/Qwen2.5-Coder-7B-Instruct" |
|
coder_model = AutoModelForCausalLM.from_pretrained( |
|
coder_model_name, |
|
torch_dtype="auto", |
|
device_map="auto" |
|
) |
|
coder_tokenizer = AutoTokenizer.from_pretrained(coder_model_name) |
|
|
|
|
|
project_state = { |
|
"AgentManager": {"structured_summary": None}, |
|
"AgentResearcher": {"search_results": None}, |
|
"AgentAnalyzer": {"analysis_report": None, "instruction_for_coder": None}, |
|
"AgentCoder": {"final_code": None} |
|
} |
|
|
|
|
|
manager_prompt_template = """ |
|
Vous êtes l'AgentManager d'un système multi-agent. |
|
|
|
- Votre rôle est d'interagir avec l'utilisateur pour comprendre sa demande. |
|
- Vous devez poser des questions pertinentes pour obtenir toutes les informations nécessaires. |
|
- Une fois que vous estimez avoir suffisamment d'informations, vous générez un résumé structuré du projet. |
|
- Vous incluez les informations des variables du projet si elles ne sont pas vides. |
|
- Vous demandez une validation explicite à l'utilisateur pour le résumé généré. |
|
- Vous pouvez modifier les variables du projet si l'utilisateur en fait la demande. |
|
|
|
Variables du projet : |
|
{variables_context} |
|
""" |
|
|
|
researcher_prompt_template = """ |
|
System: Vous êtes un assistant de recherche. Vos tâches sont : |
|
1. Basé sur le résumé structuré suivant : |
|
{structured_summary} |
|
2. Effectuer des recherches dans la documentation Gradio en ligne. |
|
3. Extraire des extraits de code ou des exemples utiles. |
|
4. Formater clairement les résultats pour validation. |
|
|
|
Format de sortie : |
|
- Documentation : ... |
|
- Extraits de code : ... |
|
""" |
|
|
|
analyzer_prompt_template = """ |
|
Vous êtes un assistant d'analyse. Vos tâches sont : |
|
1. Vérifier la cohérence des résultats de recherche avec le résumé structuré : |
|
{structured_summary} |
|
2. Analyser les résultats de recherche : |
|
{search_results} |
|
3. Générer un rapport indiquant si les résultats sont **valide** ou **non valide**. |
|
4. Si **non valide**, spécifier les éléments manquants ou incohérences. |
|
5. Votre réponse doit commencer par 'Validité: Oui' ou 'Validité: Non', suivi du rapport d'analyse. |
|
""" |
|
|
|
|
|
def get_variables_context(): |
|
variables = {} |
|
for agent, data in project_state.items(): |
|
variables[agent] = {} |
|
for key, value in data.items(): |
|
variables[agent][key] = value if value else "N/A" |
|
variables_context = json.dumps(variables, indent=2, ensure_ascii=False) |
|
return variables_context |
|
|
|
def update_project_state(modifications): |
|
for var, value in modifications.items(): |
|
keys = var.split('.') |
|
target = project_state |
|
for key in keys[:-1]: |
|
target = target.get(key, {}) |
|
target[keys[-1]] = value |
|
|
|
def extract_modifications(user_input): |
|
modifications = {} |
|
if "modifie" in user_input.lower(): |
|
matches = re.findall(r"modifie la variable (\w+(?:\.\w+)*) à (.+)", user_input, re.IGNORECASE) |
|
for match in matches: |
|
var_name, var_value = match |
|
modifications[var_name.strip()] = var_value.strip() |
|
return modifications |
|
|
|
def extract_structured_summary(response): |
|
start_token = "Résumé Structuré :" |
|
end_token = "Fin du Résumé" |
|
start_index = response.find(start_token) |
|
end_index = response.find(end_token, start_index) |
|
if start_index != -1 and end_index != -1: |
|
summary = response[start_index + len(start_token):end_index].strip() |
|
return summary |
|
else: |
|
logging.warning("Le résumé structuré n'a pas pu être extrait.") |
|
return None |
|
|
|
|
|
def agent_manager(chat_history, user_input): |
|
variables_context = get_variables_context() |
|
system_prompt = manager_prompt_template.format(variables_context=variables_context) |
|
|
|
conversation = [{"role": "system", "content": system_prompt}] |
|
|
|
|
|
for turn in chat_history: |
|
conversation.append({"role": "user", "content": turn['user']}) |
|
conversation.append({"role": "assistant", "content": turn['assistant']}) |
|
|
|
|
|
conversation.append({"role": "user", "content": user_input}) |
|
|
|
|
|
modifications = extract_modifications(user_input) |
|
if modifications: |
|
update_project_state(modifications) |
|
response = "Les variables ont été mises à jour selon votre demande." |
|
chat_history.append({'user': user_input, 'assistant': response}) |
|
return response, chat_history, False |
|
|
|
|
|
prompt = manager_tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) |
|
|
|
input_ids = manager_tokenizer(prompt, return_tensors="pt").to(manager_model.device) |
|
output_ids = manager_model.generate( |
|
input_ids["input_ids"], |
|
max_new_tokens=256, |
|
eos_token_id=manager_tokenizer.eos_token_id, |
|
pad_token_id=manager_tokenizer.pad_token_id, |
|
attention_mask=input_ids["attention_mask"] |
|
) |
|
response = manager_tokenizer.decode(output_ids[0], skip_special_tokens=True) |
|
|
|
chat_history.append({'user': user_input, 'assistant': response}) |
|
|
|
|
|
if "Validez-vous ce résumé" in response: |
|
structured_summary = extract_structured_summary(response) |
|
project_state["AgentManager"]["structured_summary"] = structured_summary |
|
return response, chat_history, True |
|
else: |
|
return response, chat_history, False |
|
|
|
|
|
def fetch_webpage(url: str) -> str: |
|
try: |
|
response = requests.get(url, timeout=10) |
|
response.raise_for_status() |
|
logging.info(f"Page téléchargée avec succès : {url}") |
|
return response.text |
|
except requests.RequestException as e: |
|
logging.error(f"Erreur lors de la récupération de la page {url}: {e}") |
|
return "" |
|
|
|
def extract_information_from_html(html: str, keyword: str) -> list: |
|
try: |
|
soup = BeautifulSoup(html, "html.parser") |
|
results = [] |
|
for code_block in soup.find_all("code"): |
|
if keyword.lower() in code_block.get_text().lower(): |
|
results.append(code_block.get_text()) |
|
logging.info(f"Nombre de sections extraites pour '{keyword}' : {len(results)}") |
|
return results |
|
except Exception as e: |
|
logging.error(f"Erreur lors de l'extraction des informations : {e}") |
|
return [] |
|
|
|
def search_gradio_docs(query: str) -> dict: |
|
url = "https://gradio.app/docs/" |
|
logging.info(f"Lancement de la recherche pour la requête : {query}") |
|
html_content = fetch_webpage(url) |
|
if not html_content: |
|
return {"error": "Impossible de télécharger la documentation Gradio."} |
|
results = extract_information_from_html(html_content, query) |
|
if not results: |
|
return {"error": f"Aucun résultat trouvé pour '{query}'."} |
|
return {"query": query, "results": results} |
|
|
|
def agent_researcher(): |
|
structured_summary = project_state["AgentManager"]["structured_summary"] |
|
if not structured_summary: |
|
return "Le résumé structuré n'est pas disponible." |
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": "Vous êtes un assistant de recherche. Vous devez répondre en JSON avec les clés 'documentation' et 'extraits_code'."}, |
|
{"role": "user", "content": researcher_prompt_template.format(structured_summary=structured_summary)} |
|
] |
|
|
|
input_ids = researcher_tokenizer.apply_chat_template( |
|
messages, |
|
add_generation_prompt=True, |
|
return_tensors="pt" |
|
).to(researcher_model.device) |
|
|
|
terminators = [ |
|
researcher_tokenizer.eos_token_id, |
|
researcher_tokenizer.convert_tokens_to_ids("<|eot_id|>") |
|
] |
|
|
|
output_ids = researcher_model.generate( |
|
input_ids["input_ids"], |
|
max_new_tokens=512, |
|
eos_token_id=terminators, |
|
do_sample=True, |
|
temperature=0.6, |
|
top_p=0.9, |
|
) |
|
response_ids = output_ids[0][input_ids["input_ids"].shape[-1]:] |
|
response = researcher_tokenizer.decode(response_ids, skip_special_tokens=True) |
|
|
|
|
|
try: |
|
response_json = json.loads(response) |
|
except json.JSONDecodeError: |
|
logging.error("La réponse du modèle n'est pas un JSON valide.") |
|
response_json = {"documentation": "", "extraits_code": ""} |
|
|
|
|
|
search_results = search_gradio_docs(structured_summary) |
|
if "error" in search_results: |
|
logging.error(search_results["error"]) |
|
return search_results["error"] |
|
|
|
|
|
project_state["AgentResearcher"]["search_results"] = { |
|
"model_response": response_json, |
|
"dynamic_results": search_results["results"] |
|
} |
|
|
|
return f"Résultats de l'AgentResearcher :\n{response_json}\n\nRésultats dynamiques :\n{search_results['results']}" |
|
|
|
|
|
def agent_analyzer(): |
|
structured_summary = project_state["AgentManager"]["structured_summary"] |
|
search_results = project_state["AgentResearcher"]["search_results"] |
|
if not structured_summary or not search_results: |
|
return "Les informations nécessaires ne sont pas disponibles pour l'analyse." |
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": "Vous êtes un assistant d'analyse. Votre tâche est d'analyser les résultats de recherche et de vérifier leur cohérence avec le résumé structuré."}, |
|
{"role": "user", "content": analyzer_prompt_template.format( |
|
structured_summary=structured_summary, |
|
search_results=json.dumps(search_results, ensure_ascii=False) |
|
)} |
|
] |
|
prompt = analyzer_tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) |
|
|
|
|
|
analyzer_pipeline = pipeline( |
|
"text-generation", |
|
model=analyzer_model, |
|
tokenizer=analyzer_tokenizer, |
|
device_map="auto" |
|
) |
|
|
|
|
|
sequences = analyzer_pipeline( |
|
prompt, |
|
do_sample=True, |
|
temperature=0.7, |
|
top_p=0.9, |
|
num_return_sequences=1, |
|
max_new_tokens=256, |
|
) |
|
analysis_report = sequences[0]['generated_text'] |
|
|
|
|
|
project_state["AgentAnalyzer"]["analysis_report"] = analysis_report |
|
|
|
|
|
if "Validité: Oui" in analysis_report: |
|
instruction_for_coder = f"Générer du code basé sur :\n{structured_summary}\n\nRésultats de recherche :\n{search_results}" |
|
project_state["AgentAnalyzer"]["instruction_for_coder"] = instruction_for_coder |
|
return f"Rapport valide.\nInstructions pour l'AgentCoder prêtes." |
|
elif "Validité: Non" in analysis_report: |
|
project_state["AgentAnalyzer"]["instruction_for_coder"] = None |
|
|
|
return f"Rapport non valide. Besoin de clarification.\n{analysis_report}" |
|
else: |
|
project_state["AgentAnalyzer"]["instruction_for_coder"] = None |
|
return f"Le rapport d'analyse ne contient pas d'information claire sur la validité. Besoin de clarification.\n{analysis_report}" |
|
|
|
|
|
def agent_coder(): |
|
instruction_for_coder = project_state["AgentAnalyzer"]["instruction_for_coder"] |
|
if not instruction_for_coder: |
|
return "Les instructions pour le code ne sont pas disponibles." |
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."}, |
|
{"role": "user", "content": instruction_for_coder} |
|
] |
|
prompt = coder_tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True |
|
) |
|
|
|
|
|
model_inputs = coder_tokenizer(prompt, return_tensors="pt").to(coder_model.device) |
|
|
|
|
|
generated_ids = coder_model.generate( |
|
**model_inputs, |
|
max_new_tokens=1024, |
|
temperature=0.7, |
|
top_p=0.9, |
|
) |
|
|
|
generated_ids = generated_ids[:, model_inputs.input_ids.shape[-1]:] |
|
final_code = coder_tokenizer.decode(generated_ids[0], skip_special_tokens=True) |
|
|
|
|
|
project_state["AgentCoder"]["final_code"] = final_code |
|
|
|
return f"Code généré par l'AgentCoder :\n{final_code}" |
|
|
|
|
|
def user_interaction(message, chat_history): |
|
if chat_history is None: |
|
chat_history = [] |
|
|
|
|
|
if chat_history and isinstance(chat_history[-1], dict) and chat_history[-1].get('status') == 'awaiting_validation': |
|
|
|
user_validation = message |
|
if user_validation.lower() in ["oui", "yes"]: |
|
|
|
researcher_response = agent_researcher() |
|
analyzer_response = agent_analyzer() |
|
if "valide" in analyzer_response.lower(): |
|
coder_response = agent_coder() |
|
response = coder_response |
|
else: |
|
response = analyzer_response |
|
else: |
|
response = "Le résumé structuré n'a pas été validé. Veuillez fournir plus de détails." |
|
|
|
chat_history.pop() |
|
chat_history.append({'user': message, 'assistant': response}) |
|
return chat_history, chat_history |
|
else: |
|
|
|
response, chat_history, is_summary_ready = agent_manager(chat_history, message) |
|
if is_summary_ready: |
|
|
|
chat_history.append({'status': 'awaiting_validation'}) |
|
return chat_history, chat_history |
|
|
|
|
|
with gr.Blocks() as interface: |
|
with gr.Tabs(): |
|
|
|
with gr.Tab("Chat"): |
|
with gr.Row(): |
|
|
|
with gr.Column(scale=3): |
|
chatbot = gr.Chatbot(label="Chat Principal") |
|
state = gr.State([]) |
|
msg = gr.Textbox(placeholder="Entrez votre message ici...") |
|
send_btn = gr.Button("Envoyer") |
|
|
|
|
|
with gr.Column(scale=2): |
|
agent_status_chat = gr.Chatbot(label="Suivi des Agents") |
|
logs_box = gr.Textbox( |
|
value="", |
|
lines=10, |
|
interactive=False, |
|
placeholder="Logs d'exécution", |
|
label="Logs", |
|
) |
|
|
|
|
|
with gr.Tab("Output"): |
|
output_code = gr.Code( |
|
value="# Le code généré sera affiché ici.\n", |
|
language="python", |
|
label="Code Final", |
|
) |
|
|
|
|
|
def update_agent_status_and_logs(chat_history): |
|
""" |
|
Met à jour les messages des agents et les logs d'exécution. |
|
""" |
|
|
|
agent_status_messages = [] |
|
|
|
|
|
structured_summary = project_state["AgentManager"]["structured_summary"] |
|
if structured_summary: |
|
manager_message = f"AgentManager : Résumé structuré disponible.\n{structured_summary}" |
|
else: |
|
manager_message = "AgentManager : En attente d'informations de l'utilisateur." |
|
agent_status_messages.append(("AgentManager", manager_message)) |
|
|
|
|
|
researcher_result = project_state["AgentResearcher"]["search_results"] |
|
if researcher_result: |
|
researcher_message = ( |
|
f"AgentResearcher : Résultats obtenus\n" |
|
f"Documentation : {researcher_result.get('documentation', 'N/A')}\n" |
|
f"Extraits de code : {researcher_result.get('extraits_code', 'N/A')}" |
|
) |
|
else: |
|
researcher_message = "AgentResearcher : Recherche en cours..." |
|
agent_status_messages.append(("AgentResearcher", researcher_message)) |
|
|
|
|
|
analysis_report = project_state["AgentAnalyzer"]["analysis_report"] |
|
if analysis_report: |
|
analyzer_message = ( |
|
f"AgentAnalyzer : Analyse terminée\n" |
|
f"{analysis_report}" |
|
) |
|
else: |
|
analyzer_message = "AgentAnalyzer : Analyse en cours..." |
|
agent_status_messages.append(("AgentAnalyzer", analyzer_message)) |
|
|
|
|
|
final_code = project_state["AgentCoder"]["final_code"] |
|
if final_code: |
|
coder_message = "AgentCoder : Code généré avec succès ✔️" |
|
else: |
|
coder_message = "AgentCoder : En attente des instructions." |
|
agent_status_messages.append(("AgentCoder", coder_message)) |
|
|
|
|
|
logs = "" |
|
with open("project.log", "r") as log_file: |
|
logs = log_file.read() |
|
|
|
return agent_status_messages, logs |
|
|
|
|
|
def respond(message, chat_history, agent_chat): |
|
""" |
|
Gestion des interactions principales et mise à jour des statuts/logs. |
|
""" |
|
|
|
updated_chat_history, _ = user_interaction(message, chat_history) |
|
bot_message = updated_chat_history[-1]["assistant"] |
|
|
|
|
|
agent_status, logs = update_agent_status_and_logs(updated_chat_history) |
|
|
|
|
|
agent_chat.clear() |
|
for agent_name, msg_content in agent_status: |
|
agent_chat.append((agent_name, msg_content)) |
|
|
|
|
|
generated_code = project_state["AgentCoder"].get("final_code", "") |
|
if not generated_code: |
|
generated_code = "# Aucun code n'a encore été généré." |
|
else: |
|
generated_code = f"{generated_code}" |
|
|
|
return chatbot.update([(message, bot_message)]), updated_chat_history, agent_chat.update(), logs, generated_code |
|
|
|
|
|
send_btn.click( |
|
respond, |
|
inputs=[msg, state, agent_status_chat], |
|
outputs=[chatbot, state, agent_status_chat, logs_box, output_code], |
|
) |
|
msg.submit( |
|
respond, |
|
inputs=[msg, state, agent_status_chat], |
|
outputs=[chatbot, state, agent_status_chat, logs_box, output_code], |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
interface.launch() |