#!/usr/bin/env python3 import os import sys import time import random import logging import requests import json import re import subprocess import shutil from datetime import datetime from pathlib import Path import streamlit as st import pandas as pd import plotly.express as px import plotly.graph_objects as go from bs4 import BeautifulSoup import html2text import kaggle # Importation des configurations from config import app_config as config # Configuration de la page Streamlit st.set_page_config( page_title="DevSecOps Data Bot", layout="wide", initial_sidebar_state="expanded" ) # Configuration du logging def setup_logging(): log_dir = Path("logs") log_dir.mkdir(exist_ok=True) log_file = log_dir / f"data_collector_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log" logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler(log_file), logging.StreamHandler(sys.stdout) ] ) return logging.getLogger(__name__) logger = setup_logging() # Création des répertoires et scripts nécessaires def create_initial_setup(): dirs = [ "data/devsecops/qa", "data/security/qa", "data/development/qa", "data/data_analysis/qa", "logs", "config", "server", "scripts", "models", "llama.cpp", ".kaggle" ] for dir_path in dirs: Path(dir_path).mkdir(parents=True, exist_ok=True) download_script = Path("scripts/download_with_aria2c.sh") if not download_script.exists(): with open(download_script, 'w') as f: f.write("""#!/bin/bash URL=$1 OUTPUT=$2 MAX_RETRIES=5 for i in $(seq 1 $MAX_RETRIES); do echo "Tentative $i/$MAX_RETRIES: $URL" aria2c -x 16 -s 16 -d "$(dirname "$OUTPUT")" -o "$(basename "$OUTPUT")" "$URL" && break sleep 10 done """) os.chmod(download_script, 0o755) llama_dir = Path("llama.cpp") if not llama_dir.exists(): st.info("Installation de llama.cpp...") subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp.git", str(llama_dir)]) os.chdir(str(llama_dir)) subprocess.run(["mkdir", "-p", "build"]) os.chdir("build") subprocess.run(["cmake", "..", "-DLLAMA_CURL=1"]) subprocess.run(["cmake", "--build", ".", "--config", "Release"]) os.chdir(Path(__file__).parent) model_path = Path("models/qwen2.5-1.5b-instruct-q8_0.gguf") if not model_path.exists(): st.warning("Le modèle GGUF n'existe pas. Téléchargement en cours...") Path("models").mkdir(exist_ok=True) model_url = "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-GGUF/resolve/main/qwen2.5-1.5b-instruct-q8_0.gguf" try: subprocess.run(["bash", str(download_script), model_url, str(model_path)]) if model_path.exists(): st.success("Modèle GGUF téléchargé avec succès!") else: st.error("Échec du téléchargement du modèle GGUF. Veuillez le placer manuellement dans le dossier models/") except Exception as e: st.error(f"Erreur lors du téléchargement du modèle: {str(e)}") # Convertisseur HTML vers texte h = html2text.HTML2Text() h.ignore_links = False h.ignore_images = True h.ignore_emphasis = False h.body_width = 0 # Fonctions pour le serveur LLM (llama.cpp) def check_server_status(): try: response = requests.get("http://localhost:8080/health", timeout=2) if response.status_code == 200: st.session_state.server_status = "Actif" return True else: st.session_state.server_status = "Inactif" return False except requests.exceptions.RequestException: st.session_state.server_status = "Inactif" return False def start_llm_server(): if check_server_status(): st.info("Le serveur llama.cpp est déjà en cours d'exécution.") return model_path = Path("models/qwen2.5-1.5b-instruct-q8_0.gguf") if not model_path.exists(): st.error("Le modèle GGUF n'existe pas. Veuillez le placer dans le dossier models/.") return llama_server = Path("llama.cpp/build/bin/llama-server") if not llama_server.exists(): st.error("llama.cpp n'est pas compilé. Veuillez compiler llama.cpp d'abord.") return start_script = Path("server/start_server.sh") if not start_script.exists(): with open(start_script, 'w') as f: f.write(f"""#!/bin/bash MODEL_PATH="{str(model_path)}" if [ ! -f "$MODEL_PATH" ]; then echo "Le modèle GGUF est introuvable à: $MODEL_PATH" exit 1 fi "{str(llama_server)}" \\ -m "$MODEL_PATH" \\ --port 8080 \\ --host 0.0.0.0 \\ -c 4096 \\ -ngl 999 \\ --threads 8 \\ > "logs/llama_server.log" 2>&1 & echo $! > "server/server.pid" """) os.chmod(start_script, 0o755) try: subprocess.Popen(["bash", str(start_script)]) st.success("Le serveur llama.cpp est en cours de démarrage...") time.sleep(5) if check_server_status(): st.success("Serveur llama.cpp démarré avec succès!") else: st.error("Le serveur n'a pas pu démarrer. Vérifiez les logs dans le dossier logs/.") except Exception as e: st.error(f"Erreur lors du démarrage du serveur: {str(e)}") def stop_llm_server(): stop_script = Path("server/stop_server.sh") if not stop_script.exists(): with open(stop_script, 'w') as f: f.write("""#!/bin/bash PID_FILE="server/server.pid" if [ -f "$PID_FILE" ]; then PID=$(cat "$PID_FILE") kill $PID rm "$PID_FILE" echo "Serveur llama.cpp arrêté." else echo "Aucun PID de serveur trouvé." fi """) os.chmod(stop_script, 0o755) try: subprocess.run(["bash", str(stop_script)]) st.success("Le serveur llama.cpp est en cours d'arrêt...") time.sleep(2) if not check_server_status(): st.success("Serveur llama.cpp arrêté avec succès!") else: st.warning("Le serveur n'a pas pu être arrêté correctement.") except Exception as e: st.error(f"Erreur lors de l'arrêt du serveur: {str(e)}") def load_prompts(): prompts_file = Path("config/prompts.json") if not prompts_file.exists(): default_prompts = { "enrich_qa": { "system": "Tu es un expert DevSecOps. Améliore cette paire question/réponse en y ajoutant des tags, des signatures d'attaques potentielles, et en structurant les informations. Réponds uniquement avec un objet JSON.", "prompt_template": "Question originale: {question}\nRéponse originale: {answer}\nContexte: {context}\n\nFournis une version améliorée sous forme de JSON:\n{{\n \"question\": \"question améliorée\",\n \"answer\": \"réponse améliorée\",\n \"tags\": [\"tag1\", \"tag2\"],\n \"attack_signatures\": [\"signature1\", \"signature2\"]\n}}" }, "analyze_relevance": { "system": "Analyse ce contenu et détermine s'il est pertinent pour DevSecOps. Si pertinent, extrais les signatures d'attaques connues. Réponds uniquement avec un objet JSON.", "prompt_template": "Contenu: {content}...\n\nRéponds sous forme de JSON:\n{{\n \"relevant\": true,\n \"attack_signatures\": [\"signature1\", \"signature2\"],\n \"security_tags\": [\"tag1\", \"tag2\"],\n \"it_relevance_score\": 0-100\n}}" }, "generate_queries": { "system": "Analyse les données actuelles et génère 5 nouvelles requêtes de recherche pour trouver plus de contenu DevSecOps pertinent, en particulier des signatures d'attaques et vulnérabilités. Réponds uniquement avec un objet JSON.", "prompt_template": "Données actuelles: {current_data}...\n\nRéponds sous forme de JSON:\n{{\n \"queries\": [\"query1\", \"query2\", \"query3\", \"query4\", \"query5\"]\n}}" } } with open(prompts_file, 'w') as f: json.dump(default_prompts, f, indent=2) with open(prompts_file, 'r', encoding='utf-8') as f: return json.load(f) PROMPTS = load_prompts() class IAEnricher: def __init__(self): self.server_url = config.LLM_SERVER_URL self.available = check_server_status() if self.available: logger.info("Serveur llama.cpp détecté et prêt.") else: logger.warning("Serveur llama.cpp non disponible. Les fonctionnalités d'IA seront désactivées.") def _get_qwen_response(self, prompt, **kwargs): if not self.available: return None payload = { "prompt": prompt, "n_predict": kwargs.get('n_predict', 512), "temperature": kwargs.get('temperature', 0.7), "stop": ["<|im_end|>", ""] } try: response = requests.post(self.server_url, json=payload, timeout=60) if response.status_code == 200: return response.json().get('content', '') else: logger.error(f"Erreur du serveur LLM: {response.status_code} - {response.text}") return None except requests.exceptions.RequestException as e: logger.error(f"Erreur de connexion au serveur LLM: {str(e)}") return None def enrich_qa_pair(self, question, answer, context=""): if not self.available or not st.session_state.enable_enrichment: return question, answer, [], [] prompt_template = PROMPTS.get("enrich_qa", {}).get("prompt_template", "") system_prompt = PROMPTS.get("enrich_qa", {}).get("system", "") full_prompt = f"{system_prompt}\n\n{prompt_template.format(question=question, answer=answer, context=context[:500])}" response_text = self._get_qwen_response(full_prompt, n_predict=1024) if response_text: try: json_match = re.search(r'\{.*\}', response_text, re.DOTALL) if json_match: enriched_data = json.loads(json_match.group()) return ( enriched_data.get('question', question), enriched_data.get('answer', answer), enriched_data.get('tags', []), enriched_data.get('attack_signatures', []) ) except json.JSONDecodeError as e: logger.warning(f"Erreur de décodage JSON de la réponse IA: {e}") return question, answer, [], [] def analyze_content_relevance(self, content): if not self.available or not st.session_state.enable_enrichment: return True, [], [], 50 prompt_template = PROMPTS.get("analyze_relevance", {}).get("prompt_template", "") system_prompt = PROMPTS.get("analyze_relevance", {}).get("system", "") full_prompt = f"{system_prompt}\n\n{prompt_template.format(content=content[:1500])}" response_text = self._get_qwen_response(full_prompt, n_predict=256, temperature=st.session_state.temperature) if response_text: try: json_match = re.search(r'\{.*\}', response_text, re.DOTALL) if json_match: analysis = json.loads(json_match.group()) return ( analysis.get('relevant', True), analysis.get('attack_signatures', []), analysis.get('security_tags', []), analysis.get('it_relevance_score', 50) ) except json.JSONDecodeError as e: logger.warning(f"Erreur de décodage JSON de la réponse IA: {e}") return True, [], [], 50 def generate_adaptive_queries(self, current_data): if not self.available or not st.session_state.enable_enrichment: return [] prompt_template = PROMPTS.get("generate_queries", {}).get("prompt_template", "") system_prompt = PROMPTS.get("generate_queries", {}).get("system", "") full_prompt = f"{system_prompt}\n\n{prompt_template.format(current_data=current_data[:1000])}" response_text = self._get_qwen_response(full_prompt, n_predict=st.session_state.n_predict) if response_text: try: json_match = re.search(r'\{.*\}', response_text, re.DOTALL) if json_match: queries_data = json.loads(json_match.group()) return queries_data.get('queries', []) except json.JSONDecodeError as e: logger.warning(f"Erreur de décodage JSON de la réponse IA: {e}") return [] ia_enricher = IAEnricher() def check_api_keys(): keys = { 'GITHUB_API_TOKEN': os.getenv('GITHUB_API_TOKEN'), 'HUGGINGFACE_API_TOKEN': os.getenv('HUGGINGFACE_API_TOKEN'), 'NVD_API_KEY': os.getenv('NVD_API_KEY'), 'STACK_EXCHANGE_API_KEY': os.getenv('STACK_EXCHANGE_API_KEY') } valid_keys = {k: v for k, v in keys.items() if v and v != f'your_{k.lower()}_here'} config.USE_API_KEYS = len(valid_keys) == len(keys) if not config.USE_API_KEYS: missing = set(keys.keys()) - set(valid_keys.keys()) logger.warning(f"Clés d'API manquantes ou non configurées: {', '.join(missing)}") logger.warning("Le bot fonctionnera en mode dégradé avec des pauses plus longues.") else: logger.info("Toutes les clés d'API sont configurées.") return config.USE_API_KEYS def make_request(url, headers=None, params=None, is_api_call=True): global REQUEST_COUNT pause_factor = 1 if config.USE_API_KEYS else 2 if config.REQUEST_COUNT >= config.MAX_REQUESTS_BEFORE_PAUSE: pause_time = random.uniform(config.MIN_PAUSE * pause_factor, config.MAX_PAUSE * pause_factor) logger.info(f"Pause de {pause_time:.2f} secondes après {config.REQUEST_COUNT} requêtes...") time.sleep(pause_time) config.REQUEST_COUNT = 0 try: response = requests.get(url, headers=headers, params=params, timeout=30) config.REQUEST_COUNT += 1 if response.status_code == 200: return response elif response.status_code in [401, 403]: logger.warning(f"Accès non autorisé à {url}. Vérifiez vos clés d'API.") return None elif response.status_code == 429: retry_after = int(response.headers.get('Retry-After', 10)) logger.warning(f"Limite de débit atteinte. Pause de {retry_after} secondes...") time.sleep(retry_after) return make_request(url, headers, params, is_api_call) else: logger.warning(f"Statut HTTP {response.status_code} pour {url}") return None except requests.exceptions.RequestException as e: logger.error(f"Erreur lors de la requête à {url}: {str(e)}") return None def clean_html(html_content): if not html_content: return "" text = h.handle(html_content) text = re.sub(r'\s+', ' ', text).strip() return text def save_qa_pair(question, answer, category, subcategory, source, attack_signatures=None, tags=None): global TOTAL_QA_PAIRS if ia_enricher.available and st.session_state.enable_enrichment: enriched_question, enriched_answer, enriched_tags, enriched_signatures = ia_enricher.enrich_qa_pair( question, answer, f"{category}/{subcategory}" ) question = enriched_question answer = enriched_answer tags = list(set((tags or []) + enriched_tags)) attack_signatures = list(set((attack_signatures or []) + enriched_signatures)) save_dir = Path("data") / category / "qa" save_dir.mkdir(parents=True, exist_ok=True) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"{subcategory}_{source}_{TOTAL_QA_PAIRS}_{timestamp}.json" filename = re.sub(r'[^\w\s-]', '', filename).replace(' ', '_') qa_data = { "question": question, "answer": answer, "category": category, "subcategory": subcategory, "source": source, "timestamp": timestamp, "attack_signatures": attack_signatures or [], "tags": tags or [] } try: with open(save_dir / filename, "w", encoding="utf-8") as f: json.dump(qa_data, f, indent=2, ensure_ascii=False) config.TOTAL_QA_PAIRS += 1 st.session_state.total_qa_pairs = config.TOTAL_QA_PAIRS st.session_state.qa_data.append(qa_data) logger.info(f"Paire Q/R sauvegardée: {filename} (Total: {config.TOTAL_QA_PAIRS})") st.session_state.logs.append(f"Sauvegardé: {filename}") except Exception as e: logger.error(f"Erreur lors de la sauvegarde du fichier {filename}: {str(e)}") def collect_kaggle_data(queries): logger.info("Début de la collecte des données Kaggle...") kaggle_dir = Path(".kaggle") kaggle_json = kaggle_dir / "kaggle.json" if not kaggle_json.exists(): logger.warning("Fichier kaggle.json non trouvé. Veuillez le placer dans le dossier .kaggle/") return os.environ['KAGGLE_CONFIG_DIR'] = str(kaggle_dir.absolute()) try: kaggle.api.authenticate() except Exception as e: logger.error(f"Erreur d'authentification Kaggle: {str(e)}") return search_queries = queries.split('\n') if queries else ["cybersecurity", "vulnerability"] if ia_enricher.available and st.session_state.enable_enrichment: adaptive_queries = ia_enricher.generate_adaptive_queries("Initial data keywords: " + ", ".join(search_queries)) search_queries.extend(adaptive_queries) for query in list(set(search_queries)): logger.info(f"Recherche de datasets Kaggle pour: {query}") try: datasets = kaggle.api.dataset_list(search=query, max_results=5) for dataset in datasets: dataset_ref = dataset.ref if ia_enricher.available and st.session_state.enable_enrichment: is_relevant, _, _, relevance_score = ia_enricher.analyze_content_relevance(dataset.title + " " + dataset.subtitle) if not is_relevant or relevance_score < st.session_state.min_relevance: logger.info(f"Dataset non pertinent ({relevance_score}%): {dataset_ref}. Ignoré.") continue logger.info(f"Traitement du dataset: {dataset_ref}") download_dir = Path("data") / "security" / "kaggle" / dataset_ref.replace('/', '_') download_dir.mkdir(parents=True, exist_ok=True) kaggle.api.dataset_download_files(dataset_ref, path=download_dir, unzip=True) for file_path in download_dir.glob('*'): if file_path.is_file() and file_path.suffix.lower() in ['.json', '.csv', '.txt']: try: with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: file_content = f.read()[:5000] is_relevant, signatures, security_tags, _ = ia_enricher.analyze_content_relevance(file_content) if is_relevant: save_qa_pair( question=f"Quelles informations de sécurité contient le fichier {file_path.name} du dataset '{dataset.title}'?", answer=file_content, category="security", subcategory="vulnerability", source=f"kaggle_{dataset_ref}", attack_signatures=signatures, tags=security_tags ) except Exception as e: logger.error(f"Erreur lors du traitement du fichier {file_path}: {str(e)}") time.sleep(random.uniform(2, 4)) except Exception as e: logger.error(f"Erreur lors de la collecte des données Kaggle pour {query}: {str(e)}") logger.info("Collecte des données Kaggle terminée.") def collect_github_data(queries): logger.info("Début de la collecte des données GitHub...") base_url = "https://api.github.com" headers = {"Accept": "application/vnd.github.v3+json"} if config.USE_API_KEYS: token = os.getenv('GITHUB_API_TOKEN') headers["Authorization"] = f"token {token}" search_queries = queries.split('\n') if queries else ["topic:devsecops", "topic:security"] for query in search_queries: logger.info(f"Recherche de repositories pour: {query}") search_url = f"{base_url}/search/repositories" params = {"q": query, "sort": "stars", "per_page": 10} response = make_request(search_url, headers=headers, params=params) if not response: continue data = response.json() for repo in data.get("items", []): repo_name = repo["full_name"].replace("/", "_") logger.info(f"Traitement du repository: {repo['full_name']}") issues_url = f"{base_url}/repos/{repo['full_name']}/issues" issues_params = {"state": "closed", "labels": "security,bug,vulnerability", "per_page": 10} issues_response = make_request(issues_url, headers=headers, params=issues_params) if issues_response: issues_data = issues_response.json() for issue in issues_data: if "pull_request" in issue: continue question = issue.get("title", "") body = clean_html(issue.get("body", "")) if not question or not body or len(body) < 50: continue comments_url = issue.get("comments_url") comments_response = make_request(comments_url, headers=headers) answer_parts = [] if comments_response: comments_data = comments_response.json() for comment in comments_data: comment_body = clean_html(comment.get("body", "")) if comment_body: answer_parts.append(comment_body) if answer_parts: answer = "\n\n".join(answer_parts) save_qa_pair( question=f"{question}: {body}", answer=answer, category="devsecops", subcategory="github", source=f"github_{repo_name}" ) time.sleep(random.uniform(1, 3)) logger.info("Collecte des données GitHub terminée.") def collect_huggingface_data(queries): logger.info("Début de la collecte des données Hugging Face...") base_url = "https://huggingface.co/api" headers = {"Accept": "application/json"} if config.USE_API_KEYS: token = os.getenv('HUGGINGFACE_API_TOKEN') headers["Authorization"] = f"Bearer {token}" search_queries = queries.split('\n') if queries else ["security", "devsecops"] for query in search_queries: logger.info(f"Recherche de datasets pour: {query}") search_url = f"{base_url}/datasets" params = {"search": query, "limit": 10} response = make_request(search_url, headers=headers, params=params) if not response: continue data = response.json() for dataset in data: dataset_id = dataset["id"].replace("/", "_") logger.info(f"Traitement du dataset: {dataset['id']}") dataset_url = f"{base_url}/datasets/{dataset['id']}" dataset_response = make_request(dataset_url, headers=headers) if dataset_response: dataset_data = dataset_response.json() description = clean_html(dataset_data.get("description", "")) if not description or len(description) < 100: continue tags = dataset_data.get("tags", []) tags_text = ", ".join(tags) if tags else "No tags" answer = f"Dataset: {dataset_data.get('id', '')}\nDownloads: {dataset_data.get('downloads', 0)}\nTags: {tags_text}\n\n{description}" save_qa_pair( question=f"What is the {dataset_data.get('id', '')} dataset about?", answer=answer, category="security", subcategory="dataset", source=f"huggingface_{dataset_id}", tags=tags ) time.sleep(random.uniform(1, 3)) logger.info("Collecte des données Hugging Face terminée.") def collect_nvd_data(): logger.info("Début de la collecte des données NVD...") base_url = "https://services.nvd.nist.gov/rest/json/cves/2.0" headers = {"Accept": "application/json"} if config.USE_API_KEYS: key = os.getenv('NVD_API_KEY') headers["apiKey"] = key params = {"resultsPerPage": 50} response = make_request(base_url, headers=headers, params=params) if not response: logger.warning("Impossible de récupérer les données du NVD.") return data = response.json() vulnerabilities = data.get("vulnerabilities", []) logger.info(f"Traitement de {len(vulnerabilities)} vulnérabilités...") for vuln in vulnerabilities: cve_data = vuln.get("cve", {}) cve_id = cve_data.get("id", "") descriptions = cve_data.get("descriptions", []) description = next((desc.get("value", "") for desc in descriptions if desc.get("lang") == "en"), "") if not description or len(description) < 50: continue cvss_v3 = cve_data.get("metrics", {}).get("cvssMetricV31", [{}])[0].get("cvssData", {}) severity = cvss_v3.get("baseSeverity", "UNKNOWN") score = cvss_v3.get("baseScore", 0) references = [ref.get("url", "") for ref in cve_data.get("references", [])] answer = f"CVE ID: {cve_id}\nSeverity: {severity}\nCVSS Score: {score}\nReferences: {', '.join(references[:5])}\n\nDescription: {description}" save_qa_pair( question=f"What is the vulnerability {cve_id}?", answer=answer, category="security", subcategory="vulnerability", source=f"nvd_{cve_id}" ) logger.info("Collecte des données NVD terminée.") def collect_stack_exchange_data(queries): logger.info("Début de la collecte des données Stack Exchange...") base_url = "https://api.stackexchange.com/2.3" params_base = {"pagesize": 10, "order": "desc", "sort": "votes", "filter": "withbody"} if config.USE_API_KEYS: key = os.getenv('STACK_EXCHANGE_API_KEY') params_base["key"] = key sites = [ {"site": "security", "category": "security", "subcategory": "security"}, {"site": "devops", "category": "devsecops", "subcategory": "devops"} ] tags_by_site = { "security": ["security", "vulnerability"], "devops": ["devops", "ci-cd"] } for site_config in sites: site = site_config["site"] category = site_config["category"] subcategory = site_config["subcategory"] logger.info(f"Collecte des données du site: {site}") tags = tags_by_site.get(site, []) + (queries.split('\n') if queries else []) for tag in list(set(tags)): logger.info(f"Recherche de questions avec le tag: {tag}") questions_url = f"{base_url}/questions" params = {**params_base, "site": site, "tagged": tag} response = make_request(questions_url, params=params) if not response: continue questions_data = response.json() for question in questions_data.get("items", []): question_id = question.get("question_id") title = question.get("title", "") body = clean_html(question.get("body", "")) if not body or len(body) < 50: continue answers_url = f"{base_url}/questions/{question_id}/answers" answers_params = {**params_base, "site": site} answers_response = make_request(answers_url, params=answers_params) answer_body = "" if answers_response and answers_response.json().get("items"): answer_body = clean_html(answers_response.json()["items"][0].get("body", "")) if answer_body: save_qa_pair( question=title, answer=answer_body, category=category, subcategory=subcategory, source=f"{site}_{question_id}", tags=question.get("tags", []) ) time.sleep(random.uniform(1, 3)) logger.info("Collecte des données Stack Exchange terminée.") def run_data_collection(sources, queries): st.session_state.bot_status = "En cours d'exécution" st.session_state.logs = [] check_api_keys() progress_bar = st.progress(0) status_text = st.empty() enabled_sources = [s for s, enabled in sources.items() if enabled] total_sources = len(enabled_sources) completed_sources = 0 for source_name in enabled_sources: status_text.text(f"Collecte des données de {source_name}...") try: if source_name == "Kaggle": collect_kaggle_data(queries.get("Kaggle", "")) elif source_name == "GitHub": collect_github_data(queries.get("GitHub", "")) elif source_name == "Hugging Face": collect_huggingface_data(queries.get("Hugging Face", "")) elif source_name == "NVD": collect_nvd_data() elif source_name == "Stack Exchange": collect_stack_exchange_data(queries.get("Stack Exchange", "")) except Exception as e: logger.error(f"Erreur fatale lors de la collecte de {source_name}: {str(e)}") completed_sources += 1 progress_bar.progress(completed_sources / total_sources) st.session_state.bot_status = "Terminé" status_text.text(f"Collecte terminée. Total de paires Q/R sauvegardées: {st.session_state.total_qa_pairs}") def create_visualizations(): if not st.session_state.qa_data: st.info("Aucune donnée à visualiser. Lancez d'abord la collecte de données.") return df = pd.DataFrame(st.session_state.qa_data) st.subheader("Répartition des données par catégorie") category_counts = df['category'].value_counts() fig1 = px.pie(values=category_counts.values, names=category_counts.index, title="Répartition par catégorie") st.plotly_chart(fig1, use_container_width=True) st.subheader("Répartition des données par sous-catégorie") subcategory_counts = df['subcategory'].value_counts().head(10) fig2 = px.bar(x=subcategory_counts.values, y=subcategory_counts.index, orientation='h', title="Top 10 des sous-catégories", labels={'x': 'Nombre de paires Q/R', 'y': 'Sous-catégorie'}) st.plotly_chart(fig2, use_container_width=True) st.subheader("Répartition des données par source") source_counts = df['source'].value_counts().head(10) fig3 = px.bar(x=source_counts.values, y=source_counts.index, orientation='h', title="Top 10 des sources", labels={'x': 'Nombre de paires Q/R', 'y': 'Source'}) st.plotly_chart(fig3, use_container_width=True) st.subheader("Tags les plus fréquents") all_tags = [tag for tags_list in df['tags'] for tag in tags_list] if all_tags: tag_counts = pd.Series(all_tags).value_counts().head(15) fig5 = px.bar(x=tag_counts.values, y=tag_counts.index, orientation='h', title="Top 15 des tags", labels={'x': 'Fréquence', 'y': 'Tag'}) st.plotly_chart(fig5, use_container_width=True) st.subheader("Signatures d'attaques les plus fréquentes") all_signatures = [sig for sigs_list in df['attack_signatures'] for sig in sigs_list] if all_signatures: signature_counts = pd.Series(all_signatures).value_counts().head(15) fig6 = px.bar(x=signature_counts.values, y=signature_counts.index, orientation='h', title="Top 15 des signatures d'attaques", labels={'x': 'Fréquence', 'y': 'Signature'}) st.plotly_chart(fig6, use_container_width=True) def main(): create_initial_setup() st.title("🤖 DevSecOps Data Bot") st.markdown("---") col1, col2, col3 = st.columns(3) with col1: st.metric("Statut du bot", st.session_state.bot_status) with col2: check_server_status() # Met à jour le statut st.metric("Statut du serveur llama.cpp", st.session_state.server_status) with col3: st.metric("Paires Q/R collectées", st.session_state.total_qa_pairs) st.markdown("---") tab1, tab2, tab3, tab4 = st.tabs(["Réglages", "Collecte", "Traitement IA", "Résultats"]) with tab1: st.header("Réglages") subtab1, subtab2, subtab3 = st.tabs(["Clés d'API", "Serveur llama.cpp", "Performance"]) with subtab1: st.subheader("Clés d'API") github_token = st.text_input("GitHub API Token", value=os.getenv('GITHUB_API_TOKEN', ''), type="password") huggingface_token = st.text_input("Hugging Face API Token", value=os.getenv('HUGGINGFACE_API_TOKEN', ''), type="password") nvd_api_key = st.text_input("NVD API Key", value=os.getenv('NVD_API_KEY', ''), type="password") stack_exchange_key = st.text_input("Stack Exchange API Key", value=os.getenv('STACK_EXCHANGE_API_KEY', ''), type="password") if st.button("Sauvegarder les clés d'API"): with open(config.env_path, 'w') as f: f.write(f"GITHUB_API_TOKEN={github_token}\n") f.write(f"HUGGINGFACE_API_TOKEN={huggingface_token}\n") f.write(f"NVD_API_KEY={nvd_api_key}\n") f.write(f"STACK_EXCHANGE_API_KEY={stack_exchange_key}\n") f.write(f"LLM_SERVER_URL={os.getenv('LLM_SERVER_URL', 'http://localhost:8080/completion')}\n") config.load_dotenv(dotenv_path=config.env_path) check_api_keys() st.success("Clés d'API sauvegardées!") with subtab2: st.subheader("Serveur llama.cpp") col1, col2 = st.columns(2) with col1: if st.button("Démarrer le serveur"): start_llm_server() with col2: if st.button("Arrêter le serveur"): stop_llm_server() st.markdown(f"**Statut actuel:** `{st.session_state.server_status}`") llm_url = st.text_input("URL du serveur LLM", value=config.LLM_SERVER_URL) if st.button("Mettre à jour l'URL"): config.LLM_SERVER_URL = llm_url os.environ['LLM_SERVER_URL'] = llm_url st.success("URL mise à jour!") with subtab3: st.subheader("Paramètres de performance") max_requests = st.number_input("Nombre de requêtes avant pause", value=config.MAX_REQUESTS_BEFORE_PAUSE) min_pause = st.number_input("Pause minimum (secondes)", value=config.MIN_PAUSE) max_pause = st.number_input("Pause maximum (secondes)", value=config.MAX_PAUSE) if st.button("Sauvegarder les paramètres"): config.MAX_REQUESTS_BEFORE_PAUSE = max_requests config.MIN_PAUSE = min_pause config.MAX_PAUSE = max_pause st.success("Paramètres sauvegardés!") with tab2: st.header("Collecte de données") sources = { "Kaggle": st.checkbox("Kaggle", value=True), "GitHub": st.checkbox("GitHub", value=True), "Hugging Face": st.checkbox("Hugging Face", value=True), "NVD": st.checkbox("NVD", value=True), "Stack Exchange": st.checkbox("Stack Exchange", value=True) } st.subheader("Requêtes de recherche") queries = { "Kaggle": st.text_area("Requêtes Kaggle (une par ligne)", value="cybersecurity\nvulnerability"), "GitHub": st.text_area("Requêtes GitHub (une par ligne)", value="topic:devsecops\ntopic:security"), "Hugging Face": st.text_area("Requêtes Hugging Face (une par ligne)", value="security\ndevsecops"), "Stack Exchange": st.text_area("Requêtes Stack Exchange (une par ligne)", value="security\nvulnerability") } if st.button("Lancer la collecte"): run_data_collection(sources, queries) with tab3: st.header("Traitement IA") enable_enrichment = st.checkbox("Activer l'enrichissement IA", value=st.session_state.enable_enrichment) st.session_state.enable_enrichment = enable_enrichment if enable_enrichment: st.session_state.min_relevance = st.slider("Score de pertinence minimum", 0, 100, st.session_state.min_relevance) st.session_state.num_queries = st.number_input("Nombre de nouvelles requêtes", value=st.session_state.num_queries) st.subheader("Paramètres du LLM") st.session_state.temperature = st.slider("Température", 0.0, 1.0, 0.7) st.session_state.n_predict = st.number_input("Nombre de tokens de prédiction", value=512) prompts = load_prompts() st.subheader("Prompts") for task, task_data in prompts.items(): st.write(f"**{task}**") st.text_area(f"System Prompt - {task}", value=task_data.get("system", ""), height=100, key=f"system_{task}") st.text_area(f"Prompt Template - {task}", value=task_data.get("prompt_template", ""), height=150, key=f"template_{task}") if st.button("Sauvegarder les prompts"): updated_prompts = { task: { "system": st.session_state[f"system_{task}"], "prompt_template": st.session_state[f"template_{task}"] } for task in prompts } with open("config/prompts.json", 'w') as f: json.dump(updated_prompts, f, indent=2) global PROMPTS PROMPTS = load_prompts() st.success("Prompts sauvegardés!") with tab4: st.header("Résultats") subtab1, subtab2, subtab3 = st.tabs(["Visualisations", "Données", "Logs"]) with subtab1: create_visualizations() with subtab2: st.subheader("Aperçu des données") if st.session_state.qa_data: df = pd.DataFrame(st.session_state.qa_data) st.dataframe(df.tail(10)) st.subheader("Téléchargement des données") col1, col2 = st.columns(2) with col1: json_data = json.dumps(st.session_state.qa_data, indent=2) st.download_button(label="Télécharger JSON", data=json_data, file_name=f"devsecops_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json", mime="application/json") with col2: csv_data = df.to_csv(index=False) st.download_button(label="Télécharger CSV", data=csv_data, file_name=f"devsecops_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv", mime="text/csv") else: st.info("Aucune donnée à afficher. Lancez d'abord la collecte.") with subtab3: st.subheader("Logs d'exécution") for log in st.session_state.logs: st.text(log) if __name__ == "__main__": main()