| |
|
| | |
| | |
| | |
| | |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torchvision.transforms as transforms |
| | import cv2 |
| | import numpy as np |
| | from PIL import Image, ImageDraw, ImageFont |
| | import os |
| | import matplotlib.pyplot as plt |
| | from pathlib import Path |
| | import time |
| | from tqdm import tqdm |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | MODEL_PATH = r"../Training/best_face_classifier_real_data.pth" |
| | TEST_IMAGES_PATH = r"\Pictures\Saved Pictures" |
| | OUTPUT_PATH = "test_results" |
| |
|
| | |
| | IMAGE_SIZE = 224 |
| | INPUT_CHANNELS = 3 |
| | NUM_CLASSES = 1 |
| | CONV_FILTERS = [128, 256, 512] |
| | FC_SIZES = [1024, 512] |
| | DROPOUT_RATES = [0.3, 0.5] |
| |
|
| | |
| | FACE_DETECTION_SCALE_FACTOR = 1.1 |
| | FACE_DETECTION_MIN_NEIGHBORS = 5 |
| | MIN_FACE_SIZE = (30, 30) |
| | IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp'] |
| |
|
| | |
| | CONFIDENCE_THRESHOLD = 0.8 |
| | SAVE_RESULTS = True |
| | SHOW_PLOTS = True |
| | SAVE_INDIVIDUAL_IMAGES = True |
| | CREATE_COMPREHENSIVE_SUMMARY = True |
| |
|
| | |
| | |
| | |
| |
|
| | class ImprovedFaceClassifierCNN(nn.Module): |
| | """Same architecture as used in training""" |
| |
|
| | def __init__(self): |
| | super().__init__() |
| |
|
| | |
| | self.features = nn.Sequential( |
| | |
| | nn.Conv2d(INPUT_CHANNELS, CONV_FILTERS[0], 3, padding=1), |
| | nn.BatchNorm2d(CONV_FILTERS[0]), |
| | nn.ReLU(inplace=True), |
| | nn.Conv2d(CONV_FILTERS[0], CONV_FILTERS[0], 3, padding=1), |
| | nn.BatchNorm2d(CONV_FILTERS[0]), |
| | nn.ReLU(inplace=True), |
| | nn.MaxPool2d(2, 2), |
| | nn.Dropout(DROPOUT_RATES[0]), |
| |
|
| | |
| | nn.Conv2d(CONV_FILTERS[0], CONV_FILTERS[1], 3, padding=1), |
| | nn.BatchNorm2d(CONV_FILTERS[1]), |
| | nn.ReLU(inplace=True), |
| | nn.Conv2d(CONV_FILTERS[1], CONV_FILTERS[1], 3, padding=1), |
| | nn.BatchNorm2d(CONV_FILTERS[1]), |
| | nn.ReLU(inplace=True), |
| | nn.MaxPool2d(2, 2), |
| | nn.Dropout(DROPOUT_RATES[0]), |
| |
|
| | |
| | nn.Conv2d(CONV_FILTERS[1], CONV_FILTERS[2], 3, padding=1), |
| | nn.BatchNorm2d(CONV_FILTERS[2]), |
| | nn.ReLU(inplace=True), |
| | nn.Conv2d(CONV_FILTERS[2], CONV_FILTERS[2], 3, padding=1), |
| | nn.BatchNorm2d(CONV_FILTERS[2]), |
| | nn.ReLU(inplace=True), |
| | nn.MaxPool2d(2, 2), |
| | nn.Dropout(DROPOUT_RATES[0]), |
| |
|
| | |
| | nn.AdaptiveAvgPool2d((7, 7)) |
| | ) |
| |
|
| | |
| | self.classifier = nn.Sequential( |
| | nn.Linear(CONV_FILTERS[2] * 7 * 7, FC_SIZES[0]), |
| | nn.BatchNorm1d(FC_SIZES[0]), |
| | nn.ReLU(inplace=True), |
| | nn.Dropout(DROPOUT_RATES[1]), |
| |
|
| | nn.Linear(FC_SIZES[0], FC_SIZES[1]), |
| | nn.ReLU(inplace=True), |
| | nn.Dropout(DROPOUT_RATES[1]), |
| |
|
| | nn.Linear(FC_SIZES[1], NUM_CLASSES) |
| | ) |
| |
|
| | def forward(self, x): |
| | x = self.features(x) |
| | x = x.view(x.size(0), -1) |
| | return self.classifier(x) |
| |
|
| | |
| | |
| | |
| |
|
| | class FaceProcessor: |
| | """Face detection and processing for classification""" |
| |
|
| | def __init__(self): |
| | |
| | self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') |
| |
|
| | |
| | try: |
| | |
| | self.net = None |
| | self.use_dnn = False |
| | |
| | except: |
| | self.net = None |
| | self.use_dnn = False |
| |
|
| | |
| | self.transform = transforms.Compose([ |
| | transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), |
| | transforms.ToTensor(), |
| | transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
| | ]) |
| |
|
| | def detect_faces(self, image): |
| | """Detect faces in image and return bounding boxes with duplicate removal""" |
| | if isinstance(image, Image.Image): |
| | |
| | image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
| | else: |
| | image_cv = image.copy() |
| |
|
| | |
| | gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY) |
| |
|
| | |
| | faces = self.face_cascade.detectMultiScale( |
| | gray, |
| | scaleFactor=FACE_DETECTION_SCALE_FACTOR, |
| | minNeighbors=FACE_DETECTION_MIN_NEIGHBORS, |
| | minSize=MIN_FACE_SIZE, |
| | flags=cv2.CASCADE_SCALE_IMAGE |
| | ) |
| |
|
| | |
| | if len(faces) > 1: |
| | faces = self._remove_duplicate_faces(faces) |
| |
|
| | return faces |
| |
|
| | def _remove_duplicate_faces(self, faces, overlap_threshold=0.15): |
| | """Remove duplicate/overlapping face detections using improved NMS""" |
| | if len(faces) <= 1: |
| | return faces |
| |
|
| | |
| | face_list = list(faces) |
| | |
| | |
| | face_info = [] |
| | for i, (x, y, w, h) in enumerate(face_list): |
| | area = w * h |
| | face_info.append({ |
| | 'index': i, |
| | 'bbox': (x, y, w, h), |
| | 'area': area, |
| | 'x1': x, 'y1': y, 'x2': x + w, 'y2': y + h |
| | }) |
| | |
| | |
| | face_info.sort(key=lambda f: f['area'], reverse=True) |
| | |
| | keep_indices = [] |
| | |
| | for i, current_face in enumerate(face_info): |
| | should_keep = True |
| | |
| | |
| | for kept_idx in keep_indices: |
| | kept_face = face_info[kept_idx] |
| | |
| | |
| | x1 = max(current_face['x1'], kept_face['x1']) |
| | y1 = max(current_face['y1'], kept_face['y1']) |
| | x2 = min(current_face['x2'], kept_face['x2']) |
| | y2 = min(current_face['y2'], kept_face['y2']) |
| | |
| | if x1 < x2 and y1 < y2: |
| | intersection = (x2 - x1) * (y2 - y1) |
| | |
| | |
| | union = current_face['area'] + kept_face['area'] - intersection |
| | iou = intersection / union if union > 0 else 0 |
| | |
| | |
| | smaller_area = min(current_face['area'], kept_face['area']) |
| | overlap_ratio = intersection / smaller_area if smaller_area > 0 else 0 |
| | |
| | |
| | if iou > overlap_threshold or overlap_ratio > 0.5: |
| | should_keep = False |
| | break |
| | |
| | if should_keep: |
| | keep_indices.append(i) |
| | |
| | |
| | filtered_faces = np.array([face_info[i]['bbox'] for i in keep_indices]) |
| | |
| | |
| | if len(faces) != len(filtered_faces): |
| | print(f" [NMS] Removed {len(faces) - len(filtered_faces)} duplicate faces " |
| | f"({len(faces)} → {len(filtered_faces)})") |
| | |
| | return filtered_faces |
| |
|
| | def crop_face(self, image, face_bbox, expand_ratio=0.2): |
| | """Crop face from image with some padding""" |
| | x, y, w, h = face_bbox |
| |
|
| | |
| | pad_x = int(w * expand_ratio) |
| | pad_y = int(h * expand_ratio) |
| |
|
| | |
| | x1 = max(0, x - pad_x) |
| | y1 = max(0, y - pad_y) |
| | x2 = min(image.width if isinstance(image, Image.Image) else image.shape[1], x + w + pad_x) |
| | y2 = min(image.height if isinstance(image, Image.Image) else image.shape[0], y + h + pad_y) |
| |
|
| | |
| | if isinstance(image, Image.Image): |
| | face_crop = image.crop((x1, y1, x2, y2)) |
| | else: |
| | |
| | face_crop = image[y1:y2, x1:x2] |
| | face_crop = Image.fromarray(cv2.cvtColor(face_crop, cv2.COLOR_BGR2RGB)) |
| |
|
| | return face_crop, (x1, y1, x2, y2) |
| |
|
| | def preprocess_face(self, face_image): |
| | """Preprocess face for model input""" |
| | |
| | if not isinstance(face_image, Image.Image): |
| | face_image = Image.fromarray(face_image) |
| |
|
| | |
| | face_tensor = self.transform(face_image) |
| |
|
| | |
| | face_batch = face_tensor.unsqueeze(0) |
| |
|
| | return face_batch |
| |
|
| | |
| | |
| | |
| |
|
| | class FaceClassifierTester: |
| | """Test trained face classifier on new images""" |
| |
|
| | def __init__(self, model_path, device='auto'): |
| | self.device = self._setup_device(device) |
| | self.model = self._load_model(model_path) |
| | self.face_processor = FaceProcessor() |
| | self.results = [] |
| |
|
| | print(f"[*] Face Classifier Tester initialized") |
| | print(f" Device: {self.device}") |
| | print(f" Model: {model_path}") |
| |
|
| | def _setup_device(self, device): |
| | """Setup computing device""" |
| | if device == 'auto': |
| | if torch.cuda.is_available(): |
| | device = torch.device('cuda:0') |
| | print(f"[GPU] Using GPU: {torch.cuda.get_device_name(0)}") |
| | else: |
| | device = torch.device('cpu') |
| | print("[CPU] Using CPU") |
| | else: |
| | device = torch.device(device) |
| |
|
| | return device |
| |
|
| | def _load_model(self, model_path): |
| | """Load trained model from checkpoint""" |
| | try: |
| | |
| | checkpoint = torch.load(model_path, map_location=self.device) |
| |
|
| | |
| | model = ImprovedFaceClassifierCNN() |
| |
|
| | |
| | if 'model_state_dict' in checkpoint: |
| | model.load_state_dict(checkpoint['model_state_dict']) |
| | print(f"[OK] Model loaded from checkpoint") |
| | print(f" Epoch: {checkpoint.get('epoch', 'Unknown')}") |
| | print(f" Validation Accuracy: {checkpoint.get('val_acc', 'Unknown'):.2f}%") |
| | else: |
| | |
| | model.load_state_dict(checkpoint) |
| | print(f"[OK] Model loaded successfully") |
| |
|
| | model.to(self.device) |
| | model.eval() |
| |
|
| | return model |
| |
|
| | except Exception as e: |
| | print(f"[ERROR] Error loading model: {e}") |
| | print("Make sure the model file exists and matches the architecture") |
| | raise |
| |
|
| | def classify_face(self, face_image): |
| | """Classify a single face image""" |
| | try: |
| | |
| | face_tensor = self.face_processor.preprocess_face(face_image) |
| | face_tensor = face_tensor.to(self.device) |
| |
|
| | |
| | with torch.no_grad(): |
| | logits = self.model(face_tensor) |
| | probability = torch.sigmoid(logits).cpu().numpy()[0][0] |
| |
|
| | |
| | prediction = "REAL" if probability > CONFIDENCE_THRESHOLD else "FAKE" |
| | confidence = probability if prediction == "REAL" else (1 - probability) |
| |
|
| | return { |
| | 'prediction': prediction, |
| | 'confidence': confidence, |
| | 'probability': probability, |
| | 'raw_logit': logits.cpu().numpy()[0][0] |
| | } |
| |
|
| | except Exception as e: |
| | print(f"[ERROR] Error in classification: {e}") |
| | return { |
| | 'prediction': 'ERROR', |
| | 'confidence': 0.0, |
| | 'probability': 0.0, |
| | 'raw_logit': 0.0 |
| | } |
| |
|
| | def process_image(self, image_path): |
| | """Process a single image: detect faces and classify them""" |
| | try: |
| | |
| | image = Image.open(image_path).convert('RGB') |
| | image_name = os.path.basename(image_path) |
| |
|
| | print(f"\n[PROCESSING] {image_name}") |
| |
|
| | |
| | faces = self.face_processor.detect_faces(image) |
| |
|
| | if len(faces) == 0: |
| | print(f" [WARNING] No faces detected in {image_name}") |
| | return { |
| | 'image_path': image_path, |
| | 'image_name': image_name, |
| | 'num_faces': 0, |
| | 'faces': [], |
| | 'status': 'no_faces' |
| | } |
| |
|
| | print(f" [FACES] Found {len(faces)} face(s)") |
| |
|
| | |
| | face_results = [] |
| | for i, face_bbox in enumerate(faces): |
| | |
| | face_crop, expanded_bbox = self.face_processor.crop_face(image, face_bbox) |
| |
|
| | |
| | classification = self.classify_face(face_crop) |
| |
|
| | |
| | face_result = { |
| | 'face_id': i, |
| | 'bbox': face_bbox.tolist(), |
| | 'expanded_bbox': expanded_bbox, |
| | 'face_crop': face_crop, |
| | 'classification': classification |
| | } |
| | face_results.append(face_result) |
| |
|
| | print(f" Face {i+1}: {classification['prediction']} " |
| | f"({classification['confidence']:.1%} confidence)") |
| |
|
| | return { |
| | 'image_path': image_path, |
| | 'image_name': image_name, |
| | 'image': image, |
| | 'num_faces': len(faces), |
| | 'faces': face_results, |
| | 'status': 'success' |
| | } |
| |
|
| | except Exception as e: |
| | print(f"[ERROR] Error processing {image_path}: {e}") |
| | return { |
| | 'image_path': image_path, |
| | 'image_name': os.path.basename(image_path), |
| | 'num_faces': 0, |
| | 'faces': [], |
| | 'status': 'error', |
| | 'error': str(e) |
| | } |
| |
|
| | def test_folder(self, folder_path, max_images=None): |
| | """Test all images in a folder""" |
| | print(f"\n[TESTING] FACE CLASSIFIER") |
| | print(f"="*60) |
| | print(f"Test folder: {folder_path}") |
| | print(f"Model: {MODEL_PATH}") |
| |
|
| | |
| | if not os.path.exists(folder_path): |
| | print(f"[ERROR] Test folder not found: {folder_path}") |
| | return [] |
| |
|
| | |
| | image_files_set = set() |
| | for ext in IMAGE_EXTENSIONS: |
| | |
| | image_files_set.update(Path(folder_path).glob(f"*{ext}")) |
| | image_files_set.update(Path(folder_path).glob(f"*{ext.upper()}")) |
| | |
| | |
| | image_files = [] |
| | seen_paths = set() |
| | for file_path in image_files_set: |
| | resolved_path = file_path.resolve() |
| | if resolved_path not in seen_paths: |
| | image_files.append(file_path) |
| | seen_paths.add(resolved_path) |
| |
|
| | if not image_files: |
| | print(f"[ERROR] No images found in {folder_path}") |
| | print(f" Looking for extensions: {IMAGE_EXTENSIONS}") |
| | return [] |
| |
|
| | if max_images: |
| | image_files = image_files[:max_images] |
| |
|
| | print(f"[FILES] Found {len(image_files)} images to process") |
| |
|
| | |
| | results = [] |
| | start_time = time.time() |
| |
|
| | for image_path in tqdm(image_files, desc="Processing images"): |
| | result = self.process_image(str(image_path)) |
| | results.append(result) |
| | self.results.append(result) |
| |
|
| | total_time = time.time() - start_time |
| |
|
| | |
| | self._print_summary(results, total_time) |
| |
|
| | |
| | if SAVE_RESULTS: |
| | self._save_results(results) |
| | self._save_individual_images(results) |
| |
|
| | if SHOW_PLOTS: |
| | |
| | self._create_comprehensive_summary(results) |
| |
|
| | return results |
| |
|
| | def _print_summary(self, results, total_time): |
| | """Print testing summary""" |
| | print(f"\n[SUMMARY] TESTING SUMMARY") |
| | print(f"="*40) |
| |
|
| | total_images = len(results) |
| | successful = len([r for r in results if r['status'] == 'success']) |
| | total_faces = sum(r['num_faces'] for r in results) |
| | no_faces = len([r for r in results if r['status'] == 'no_faces']) |
| | errors = len([r for r in results if r['status'] == 'error']) |
| |
|
| | print(f"Images processed: {total_images}") |
| | print(f"Successful: {successful}") |
| | print(f"No faces detected: {no_faces}") |
| | print(f"Errors: {errors}") |
| | print(f"Total faces detected: {total_faces}") |
| | print(f"Processing time: {total_time:.1f}s") |
| | print(f"Average time per image: {total_time/total_images:.2f}s") |
| |
|
| | |
| | if total_faces > 0: |
| | real_faces = sum(len([f for f in r['faces'] if f['classification']['prediction'] == 'REAL']) |
| | for r in results if r['status'] == 'success') |
| | fake_faces = total_faces - real_faces |
| |
|
| | print(f"\n[RESULTS] Classification Results:") |
| | print(f"Real faces: {real_faces} ({real_faces/total_faces:.1%})") |
| | print(f"Fake faces: {fake_faces} ({fake_faces/total_faces:.1%})") |
| |
|
| | def _save_results(self, results): |
| | """Save results to files""" |
| | os.makedirs(OUTPUT_PATH, exist_ok=True) |
| |
|
| | |
| | import csv |
| | csv_path = os.path.join(OUTPUT_PATH, 'classification_results.csv') |
| |
|
| | with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile: |
| | writer = csv.writer(csvfile) |
| | writer.writerow(['Image', 'Face_ID', 'Prediction', 'Confidence', 'Probability', 'Bbox_X', 'Bbox_Y', 'Bbox_W', 'Bbox_H']) |
| |
|
| | for result in results: |
| | if result['status'] == 'success': |
| | for face in result['faces']: |
| | bbox = face['bbox'] |
| | cls = face['classification'] |
| | writer.writerow([ |
| | result['image_name'], |
| | face['face_id'], |
| | cls['prediction'], |
| | f"{cls['confidence']:.3f}", |
| | f"{cls['probability']:.3f}", |
| | bbox[0], bbox[1], bbox[2], bbox[3] |
| | ]) |
| |
|
| | print(f"[SAVED] Results saved to: {csv_path}") |
| |
|
| | def _save_individual_images(self, results): |
| | """Save each processed image with bounding boxes and classifications""" |
| | os.makedirs(OUTPUT_PATH, exist_ok=True) |
| | individual_dir = os.path.join(OUTPUT_PATH, 'annotated_images') |
| | os.makedirs(individual_dir, exist_ok=True) |
| |
|
| | saved_count = 0 |
| | for result in results: |
| | if result['status'] in ['success', 'no_faces']: |
| | try: |
| | |
| | if 'image' in result: |
| | image = result['image'].copy() |
| | else: |
| | image = Image.open(result['image_path']).convert('RGB') |
| |
|
| | |
| | draw = ImageDraw.Draw(image) |
| |
|
| | |
| | try: |
| | font = ImageFont.truetype("arial.ttf", 20) |
| | except: |
| | font = ImageFont.load_default() |
| |
|
| | if result['num_faces'] > 0: |
| | for face in result['faces']: |
| | bbox = face['bbox'] |
| | cls = face['classification'] |
| | |
| | |
| | color = 'green' if cls['prediction'] == 'REAL' else 'red' |
| | |
| | |
| | x, y, w, h = bbox |
| | draw.rectangle([x, y, x+w, y+h], outline=color, width=3) |
| | |
| | |
| | label = f"{cls['prediction']} ({cls['confidence']:.1%})" |
| | |
| | |
| | text_bbox = draw.textbbox((x, y-25), label, font=font) |
| | draw.rectangle(text_bbox, fill=color) |
| | |
| | |
| | draw.text((x, y-25), label, fill='white', font=font) |
| | else: |
| | |
| | draw.text((10, 10), "NO FACES DETECTED", fill='orange', font=font) |
| |
|
| | |
| | base_name = os.path.splitext(result['image_name'])[0] |
| | output_filename = f"{base_name}_annotated.jpg" |
| | output_path = os.path.join(individual_dir, output_filename) |
| | |
| | image.save(output_path, 'JPEG', quality=95) |
| | saved_count += 1 |
| |
|
| | except Exception as e: |
| | print(f"[WARNING] Could not save annotated image for {result['image_name']}: {e}") |
| |
|
| | print(f"[SAVED] {saved_count} annotated images saved to: {individual_dir}") |
| |
|
| | def _visualize_results(self, results, max_display=6): |
| | """Visualize results with matplotlib (limited sample)""" |
| | try: |
| | |
| | display_results = [r for r in results if r['status'] == 'success' and r['num_faces'] > 0] |
| | display_results = display_results[:max_display] |
| |
|
| | if not display_results: |
| | print("No results to visualize") |
| | return |
| |
|
| | |
| | fig, axes = plt.subplots(2, 3, figsize=(15, 10)) |
| | axes = axes.flatten() |
| |
|
| | for i, result in enumerate(display_results): |
| | if i >= len(axes): |
| | break |
| |
|
| | ax = axes[i] |
| | image = result['image'] |
| |
|
| | |
| | draw_image = image.copy() |
| | draw = ImageDraw.Draw(draw_image) |
| |
|
| | for face in result['faces']: |
| | bbox = face['bbox'] |
| | cls = face['classification'] |
| |
|
| | |
| | color = 'green' if cls['prediction'] == 'REAL' else 'red' |
| |
|
| | |
| | draw.rectangle([bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]], |
| | outline=color, width=3) |
| |
|
| | |
| | label = f"{cls['prediction']} ({cls['confidence']:.1%})" |
| | draw.text((bbox[0], bbox[1]-20), label, fill=color) |
| |
|
| | |
| | ax.imshow(draw_image) |
| | ax.set_title(f"{result['image_name']}\n{result['num_faces']} face(s)") |
| | ax.axis('off') |
| |
|
| | |
| | for i in range(len(display_results), len(axes)): |
| | axes[i].axis('off') |
| |
|
| | plt.tight_layout() |
| | plt.savefig(os.path.join(OUTPUT_PATH, 'sample_classification.png'), dpi=150, bbox_inches='tight') |
| | plt.show() |
| |
|
| | except Exception as e: |
| | print(f"[WARNING] Could not create sample visualization: {e}") |
| |
|
| | def _create_comprehensive_summary(self, results): |
| | """Create a comprehensive grid summary of all processed images""" |
| | try: |
| | |
| | all_results = results |
| | |
| | if not all_results: |
| | print("No results to create comprehensive summary") |
| | return |
| |
|
| | |
| | total_images = len(all_results) |
| | cols = 4 |
| | rows = (total_images + cols - 1) // cols |
| | |
| | |
| | fig, axes = plt.subplots(rows, cols, figsize=(20, 5*rows)) |
| | |
| | |
| | if rows == 1: |
| | axes = axes.reshape(1, -1) if total_images > 1 else [axes] |
| | |
| | |
| | axes_flat = axes.flatten() if total_images > 1 else [axes] |
| |
|
| | for i, result in enumerate(all_results): |
| | ax = axes_flat[i] |
| | |
| | try: |
| | |
| | if 'image' in result and result['image'] is not None: |
| | image = result['image'].copy() |
| | else: |
| | image = Image.open(result['image_path']).convert('RGB') |
| | |
| | |
| | draw_image = image.copy() |
| | draw = ImageDraw.Draw(draw_image) |
| | |
| | |
| | title_parts = [result['image_name'][:20]] |
| | |
| | if result['status'] == 'success': |
| | if result['num_faces'] > 0: |
| | |
| | for face in result['faces']: |
| | bbox = face['bbox'] |
| | cls = face['classification'] |
| | |
| | |
| | color = 'green' if cls['prediction'] == 'REAL' else 'red' |
| | |
| | |
| | x, y, w, h = bbox |
| | draw.rectangle([x, y, x+w, y+h], outline=color, width=2) |
| | |
| | |
| | label = f"{cls['prediction']}\n{cls['confidence']:.0%}" |
| | draw.text((x, y-15), label, fill=color) |
| | |
| | title_parts.append(f"{result['num_faces']} face(s)") |
| | |
| | |
| | real_count = sum(1 for f in result['faces'] if f['classification']['prediction'] == 'REAL') |
| | fake_count = result['num_faces'] - real_count |
| | if real_count > 0: |
| | title_parts.append(f"Real: {real_count}") |
| | if fake_count > 0: |
| | title_parts.append(f"Fake: {fake_count}") |
| | else: |
| | title_parts.append("No faces") |
| | |
| | draw.text((10, 10), "NO FACES", fill='orange') |
| | |
| | elif result['status'] == 'no_faces': |
| | title_parts.append("No faces detected") |
| | draw.text((10, 10), "NO FACES", fill='orange') |
| | |
| | elif result['status'] == 'error': |
| | title_parts.append("Error") |
| | draw.text((10, 10), "ERROR", fill='red') |
| | |
| | |
| | ax.imshow(draw_image) |
| | ax.set_title('\n'.join(title_parts), fontsize=8) |
| | ax.axis('off') |
| | |
| | except Exception as e: |
| | |
| | ax.text(0.5, 0.5, f"Error loading\n{result['image_name']}", |
| | ha='center', va='center', transform=ax.transAxes) |
| | ax.set_title(f"Error: {result['image_name'][:20]}") |
| | ax.axis('off') |
| | |
| | |
| | for i in range(total_images, len(axes_flat)): |
| | axes_flat[i].axis('off') |
| | |
| | |
| | total_faces = sum(r['num_faces'] for r in results if r['status'] == 'success') |
| | real_faces = sum(len([f for f in r['faces'] if f['classification']['prediction'] == 'REAL']) |
| | for r in results if r['status'] == 'success') |
| | fake_faces = total_faces - real_faces |
| | |
| | fig.suptitle(f"Face Classification Results - {total_images} Images, {total_faces} Faces\n" |
| | f"Real: {real_faces} ({real_faces/total_faces*100 if total_faces > 0 else 0:.1f}%), " |
| | f"Fake: {fake_faces} ({fake_faces/total_faces*100 if total_faces > 0 else 0:.1f}%)", |
| | fontsize=16, y=0.98) |
| | |
| | plt.tight_layout() |
| | plt.subplots_adjust(top=0.92) |
| | |
| | |
| | summary_path = os.path.join(OUTPUT_PATH, 'comprehensive_summary.png') |
| | plt.savefig(summary_path, dpi=200, bbox_inches='tight') |
| | print(f"[SAVED] Comprehensive summary saved to: {summary_path}") |
| | |
| | plt.show() |
| |
|
| | except Exception as e: |
| | print(f"[WARNING] Could not create comprehensive summary: {e}") |
| |
|
| | |
| | |
| | |
| |
|
| | def main(): |
| | """Main testing function""" |
| | print("[*] FACE CLASSIFIER TESTING") |
| | print("="*50) |
| |
|
| | |
| | if not os.path.exists(MODEL_PATH): |
| | print(f"[ERROR] Model file not found: {MODEL_PATH}") |
| | print("Please make sure you have trained the model first.") |
| | print("Expected file: best_face_classifier_real_data.pth") |
| | return |
| |
|
| | |
| | if not os.path.exists(TEST_IMAGES_PATH): |
| | print(f"[ERROR] Test images folder not found: {TEST_IMAGES_PATH}") |
| | print("Please check the path and make sure it contains images.") |
| | return |
| |
|
| | try: |
| | |
| | tester = FaceClassifierTester(MODEL_PATH) |
| |
|
| | |
| | results = tester.test_folder(TEST_IMAGES_PATH, max_images=20) |
| |
|
| | print(f"\n[OK] Testing completed!") |
| | print(f"Check '{OUTPUT_PATH}' folder for detailed results.") |
| |
|
| | except Exception as e: |
| | print(f"[ERROR] Testing failed: {e}") |
| | import traceback |
| | traceback.print_exc() |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|