Spaces:
Sleeping
Sleeping
| import os | |
| from pathlib import Path | |
| from typing import List, Dict | |
| from PIL import Image | |
| from ultralytics import YOLO # detection d'images | |
| from transformers import pipeline | |
| # ============================================================================ | |
| # 1. DÉTECTEUR NSFW PRÉ-ENTRAÎNÉ | |
| # ============================================================================ | |
| class NSFWDetector: | |
| """ | |
| Détecteur de contenu NSFW utilisant un modèle pré-entraîné | |
| """ | |
| def __init__(self, model_name: str = "Falconsai/nsfw_image_detection"): | |
| """ | |
| Initialise le détecteur NSFW | |
| Args: | |
| model_name: Nom du modèle Hugging Face à utiliser | |
| """ | |
| print("🔧 Chargement du modèle NSFW...") | |
| try: | |
| self.classifier = pipeline("image-classification", model=model_name) | |
| print(f"✓ Modèle NSFW chargé: {model_name}") | |
| except Exception as e: | |
| print(f"⚠️ Erreur lors du chargement du modèle NSFW: {e}") | |
| print(" Essayez: pip install transformers pillow torch") | |
| self.classifier = None | |
| def predict(self, image_path: str, threshold: float = 0.7) -> Dict: | |
| """ | |
| Détecte si une image contient du contenu NSFW | |
| Args: | |
| image_path: Chemin vers l'image | |
| threshold: Seuil de confiance (0-1) | |
| Returns: | |
| { | |
| 'is_nsfw': bool, | |
| 'confidence': float, | |
| 'label': str, | |
| 'all_scores': dict | |
| } | |
| """ | |
| if self.classifier is None: | |
| return { | |
| 'is_nsfw': False, | |
| 'confidence': 0.0, | |
| 'label': 'unknown', | |
| 'all_scores': {}, | |
| 'error': 'Classifier not loaded' | |
| } | |
| try: | |
| # Charger l'image | |
| image = Image.open(image_path).convert('RGB') | |
| # Prédire | |
| results = self.classifier(image) | |
| # Analyser les résultats | |
| all_scores = {item['label']: item['score'] for item in results} | |
| # Détecter NSFW (les labels varient selon le modèle) | |
| nsfw_labels = ['nsfw', 'porn', 'hentai', 'sexy', 'explicit'] | |
| nsfw_score = 0.0 | |
| detected_label = 'safe' | |
| for item in results: | |
| label_lower = item['label'].lower() | |
| if any(nsfw_label in label_lower for nsfw_label in nsfw_labels): | |
| if item['score'] > nsfw_score: | |
| nsfw_score = item['score'] | |
| detected_label = item['label'] | |
| is_nsfw = nsfw_score >= threshold | |
| return { | |
| 'is_nsfw': is_nsfw, | |
| 'confidence': nsfw_score, | |
| 'label': detected_label, | |
| 'all_scores': all_scores | |
| } | |
| except Exception as e: | |
| return { | |
| 'is_nsfw': False, | |
| 'confidence': 0.0, | |
| 'label': 'error', | |
| 'all_scores': {}, | |
| 'error': str(e) | |
| } | |
| # ============================================================================ | |
| # 2. DÉTECTEUR D'ARMES AVEC YOLO | |
| # ============================================================================ | |
| class WeaponDetectorYOLO: | |
| """ | |
| Détecteur d'armes utilisant YOLOv8 | |
| """ | |
| def __init__(self, model_path: str = "yolov8n.pt", confidence_threshold: float = 0.5): | |
| """ | |
| Initialise le détecteur d'armes YOLOv8 | |
| Args: | |
| model_path: Chemin vers le modèle YOLO ou nom du modèle | |
| confidence_threshold: Seuil de confiance pour les détections | |
| """ | |
| print("🔧 Chargement du modèle YOLO...") | |
| try: | |
| self.model = YOLO(model_path) | |
| self.confidence_threshold = confidence_threshold | |
| print(f"✓ Modèle YOLO chargé: {model_path}") | |
| except ImportError: | |
| print("⚠️ ultralytics n'est pas installé!") | |
| print(" Installez avec: pip install ultralytics") | |
| self.model = None | |
| except Exception as e: | |
| print(f"⚠️ Erreur lors du chargement de YOLO: {e}") | |
| self.model = None | |
| # Classes d'armes à détecter | |
| self.weapon_keywords = [ | |
| 'knife', 'gun', 'rifle', 'pistol', 'weapon', | |
| 'scissors', 'firearm', 'handgun', 'revolver' | |
| ] | |
| def predict(self, image_path: str) -> Dict: | |
| """ | |
| Détecte les armes dans une image | |
| Returns: | |
| { | |
| 'weapons_detected': bool, | |
| 'weapon_count': int, | |
| 'weapons': List[dict], | |
| 'confidence': float | |
| } | |
| """ | |
| if self.model is None: | |
| return { | |
| 'weapons_detected': False, | |
| 'weapon_count': 0, | |
| 'weapons': [], | |
| 'confidence': 0.0, | |
| 'error': 'YOLO model not loaded' | |
| } | |
| try: | |
| # Effectuer la détection | |
| results = self.model(image_path, conf=self.confidence_threshold) | |
| detected_weapons = [] | |
| max_confidence = 0.0 | |
| # Analyser les résultats | |
| for result in results: | |
| boxes = result.boxes | |
| for box in boxes: | |
| class_id = int(box.cls[0]) | |
| class_name = result.names[class_id].lower() | |
| confidence = float(box.conf[0]) | |
| # Vérifier si c'est une arme | |
| if any(weapon_keyword in class_name for weapon_keyword in self.weapon_keywords): | |
| detected_weapons.append({ | |
| 'type': class_name, | |
| 'confidence': confidence, | |
| 'bbox': box.xyxy[0].tolist() # [x1, y1, x2, y2] | |
| }) | |
| max_confidence = max(max_confidence, confidence) | |
| return { | |
| 'weapons_detected': len(detected_weapons) > 0, | |
| 'weapon_count': len(detected_weapons), | |
| 'weapons': detected_weapons, | |
| 'confidence': max_confidence | |
| } | |
| except Exception as e: | |
| return { | |
| 'weapons_detected': False, | |
| 'weapon_count': 0, | |
| 'weapons': [], | |
| 'confidence': 0.0, | |
| 'error': str(e) | |
| } | |
| # ============================================================================ | |
| # 3. MODÉRATEUR D'IMAGES COMPLET | |
| # ============================================================================ | |
| class ImageModerator: | |
| """ | |
| Modérateur d'images combinant NSFW et détection d'armes | |
| """ | |
| def __init__(self, | |
| nsfw_threshold: float = 0.7, | |
| weapon_threshold: float = 0.5, | |
| yolo_model: str = "yolov8n.pt"): | |
| """ | |
| Initialise le modérateur d'images | |
| Args: | |
| nsfw_threshold: Seuil pour la détection NSFW (0-1) | |
| weapon_threshold: Seuil pour la détection d'armes (0-1) | |
| yolo_model: Modèle YOLO à utiliser | |
| """ | |
| self.nsfw_detector = NSFWDetector() | |
| self.weapon_detector = WeaponDetectorYOLO( | |
| model_path=yolo_model, | |
| confidence_threshold=weapon_threshold | |
| ) | |
| self.nsfw_threshold = nsfw_threshold | |
| self.weapon_threshold = weapon_threshold | |
| def moderate_image(self, image_path: str) -> Dict: | |
| """ | |
| Modère une image (NSFW + armes) | |
| Returns: | |
| { | |
| 'approved': bool, | |
| 'rejection_reason': str or None, | |
| 'nsfw_result': dict, | |
| 'weapon_result': dict | |
| } | |
| """ | |
| # Vérifier que l'image existe | |
| if not os.path.exists(image_path): | |
| return { | |
| 'approved': False, | |
| 'rejection_reason': f"Image introuvable: {image_path}", | |
| 'nsfw_result': {}, | |
| 'weapon_result': {} | |
| } | |
| # 1. Vérification NSFW | |
| nsfw_result = self.nsfw_detector.predict(image_path, self.nsfw_threshold) | |
| if nsfw_result.get('is_nsfw', False): | |
| return { | |
| 'approved': False, | |
| 'rejection_reason': f"Contenu NSFW détecté ({nsfw_result['label']}, confiance: {nsfw_result['confidence']:.2%})", | |
| 'nsfw_result': nsfw_result, | |
| 'weapon_result': {} | |
| } | |
| # 2. Détection d'armes | |
| weapon_result = self.weapon_detector.predict(image_path) | |
| if weapon_result.get('weapons_detected', False): | |
| weapons = weapon_result['weapons'] | |
| weapon_types = [w['type'] for w in weapons] | |
| return { | |
| 'approved': False, | |
| 'rejection_reason': f"Arme(s) détectée(s): {', '.join(weapon_types)} (confiance: {weapon_result['confidence']:.2%})", | |
| 'nsfw_result': nsfw_result, | |
| 'weapon_result': weapon_result | |
| } | |
| # 3. Image approuvée | |
| return { | |
| 'approved': True, | |
| 'rejection_reason': None, | |
| 'nsfw_result': nsfw_result, | |
| 'weapon_result': weapon_result | |
| } | |
| def moderate_images(self, image_paths: List[str]) -> Dict: | |
| """ | |
| Modère plusieurs images d'une annonce | |
| Returns: | |
| { | |
| 'all_approved': bool, | |
| 'rejection_reasons': List[str], | |
| 'results': List[dict] | |
| } | |
| """ | |
| results = [] | |
| rejection_reasons = [] | |
| for image_path in image_paths: | |
| result = self.moderate_image(image_path) | |
| results.append(result) | |
| if not result['approved']: | |
| rejection_reasons.append(f"{Path(image_path).name}: {result['rejection_reason']}") | |
| return { | |
| 'all_approved': len(rejection_reasons) == 0, | |
| 'rejection_reasons': rejection_reasons, | |
| 'results': results | |
| } | |