import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix

DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
OUTPUT_DIR = os.path.join(DESKTOP_PATH, "CNN_model")

# 设置随机种子，确保结果可复现
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

# 配置参数
class Config:
    # 图像尺寸适合手机屏幕比例
    IMAGE_SIZE = (180, 320) 
    BATCH_SIZE = 32
    EPOCHS = 50
    LEARNING_RATE = 1e-4 #初始学习率
    # 数据集路径，要至少包含normal和abnormal两个子文件夹
    DATA_DIR = os.path.join(OUTPUT_DIR, "phone_screen_dataset")
    # 模型保存路径
    MODEL_SAVE_PATH = os.path.join(DESKTOP_PATH, "screen_glitch_detector.pth")
    # 设备配置
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 早停轮数
    PATIENCE = 5
    # 训练集与验证集划分比例
    VALIDATION_SPLIT = 0.2

# 数据集类
class ScreenDataset(Dataset):
    def __init__(self, image_paths, labels, transform=None):
        self.image_paths = image_paths
        self.labels = labels
        self.transform = transform
        
    def __len__(self):
        return len(self.image_paths)
    
    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        image = Image.open(img_path).convert('RGB')
        label = self.labels[idx]
        
        if self.transform:
            image = self.transform(image)
            
        return image, label

# CNN模型
class ScreenGlitchCNN(nn.Module):
    def __init__(self):
        super(ScreenGlitchCNN, self).__init__()
        # 特征提取部分
        self.features = nn.Sequential(
            # 第一个卷积块
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            
            # 第二个卷积块
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            
            # 第三个卷积块
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            
            # 第四个卷积块
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        
        # 计算特征图大小用于全连接层
        self.feature_size = self._calculate_feature_size()
        
        # 分类部分
        self.classifier = nn.Sequential(
            nn.Linear(self.feature_size, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(512, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(128, 1),
            nn.Sigmoid()  # 二分类输出0-1之间的概率
        )
        
    def _calculate_feature_size(self):
        """计算卷积部分输出的特征图大小"""
        with torch.no_grad():
            dummy_input = torch.zeros(1, 3, *Config.IMAGE_SIZE[::-1])  # 注意：尺寸顺序是高x宽
            output = self.features(dummy_input)
            return int(torch.prod(torch.tensor(output.size()[1:])))
        
    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)  # 展平
        x = self.classifier(x)
        return x

# 加载数据集
def load_dataset():
    
    image_paths = []
    labels = []
    
    # 正常屏幕图像：标签0
    normal_dir = os.path.join(Config.DATA_DIR, "normal")
    for img_file in os.listdir(normal_dir):
        if img_file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp')):
            image_paths.append(os.path.join(normal_dir, img_file))
            labels.append(0)
    
    # 花屏图像：标签1
    glitch_dir = os.path.join(Config.DATA_DIR, "abnormal")
    for img_file in os.listdir(glitch_dir):
        if img_file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp')):
            image_paths.append(os.path.join(glitch_dir, img_file))
            labels.append(1)
    
    # 划分训练和验证
    dataset_size = len(image_paths)
    val_size = int(dataset_size * Config.VALIDATION_SPLIT)
    train_size = dataset_size - val_size
    
    # 数据变换
    train_transform = transforms.Compose([
        transforms.Resize(Config.IMAGE_SIZE),
        transforms.RandomHorizontalFlip(p=0.3),
        transforms.RandomVerticalFlip(p=0.1),
        transforms.RandomRotation(degrees=5),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    val_transform = transforms.Compose([
        transforms.Resize(Config.IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    # 创建数据集
    full_dataset = ScreenDataset(image_paths, labels, transform=train_transform)
    
    # 划分训练集和验证集
    train_dataset, val_dataset = random_split(
        full_dataset, [train_size, val_size],
        generator=torch.Generator().manual_seed(seed)
    )
    
    # 验证集使用不同的变换
    val_dataset.dataset.transform = val_transform
    
    # 数据加载
    train_loader = DataLoader(
        train_dataset, 
        batch_size=Config.BATCH_SIZE, 
        shuffle=True,
        num_workers=0
    )
    
    val_loader = DataLoader(
        val_dataset, 
        batch_size=Config.BATCH_SIZE, 
        shuffle=False,
        num_workers=0
    )
    
    print(f"数据集加载完成 - 总样本数: {dataset_size}")
    print(f"训练集: {train_size} 样本, 验证集: {val_size} 样本")
    print(f"正常屏幕样本: {sum(1 for l in labels if l == 0)}")
    print(f"花屏样本: {sum(1 for l in labels if l == 1)}")
    
    return train_loader, val_loader

# 训练模型
def train_model(train_loader, val_loader):
    # 初始化模型、损失函数、优化器
    model = ScreenGlitchCNN().to(Config.DEVICE)
    criterion = nn.BCELoss()  # 二分类交叉熵损失
    optimizer = optim.Adam(model.parameters(), lr=Config.LEARNING_RATE)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5)
    
    history = {
        'train_loss': [], 'train_acc': [],
        'val_loss': [], 'val_acc': [],
        'val_precision': [], 'val_recall': [], 'val_f1': []
    }
    
    # 早停机制变量
    best_val_loss = float('inf')
    early_stopping_counter = 0
    
    for epoch in range(Config.EPOCHS):
        model.train()
        train_loss = 0.0
        train_preds = []
        train_labels = []
        
        # 训练循环
        train_pbar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{Config.EPOCHS} - Training")
        for images, labels in train_pbar:
            images = images.to(Config.DEVICE)
            labels = labels.to(Config.DEVICE).float().unsqueeze(1)
            
            # 前向传播
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            # 累计损失
            train_loss += loss.item() * images.size(0)
            
            # 记录预测结果
            preds = (outputs > 0.5).float()
            train_preds.extend(preds.cpu().numpy())
            train_labels.extend(labels.cpu().numpy())

            train_pbar.set_postfix({"Loss": f"{loss.item():.4f}"})
            
        
        # 计算训练集指标
        train_loss /= len(train_loader.dataset)
        train_acc = accuracy_score(train_labels, train_preds)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_preds = []
        val_true = []
        
        with torch.no_grad():
            val_pbar = tqdm(val_loader, desc=f"Epoch {epoch+1}/{Config.EPOCHS} - Validation")
            for images, labels in val_pbar:
                images = images.to(Config.DEVICE)
                labels = labels.to(Config.DEVICE).float().unsqueeze(1)
                
                outputs = model(images)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item() * images.size(0)
                
                preds = (outputs > 0.5).float()
                val_preds.extend(preds.cpu().numpy())
                val_true.extend(labels.cpu().numpy())
                
                val_pbar.set_postfix({"Loss": f"{loss.item():.4f}"})
        
        # 计算验证集指标
        val_loss /= len(val_loader.dataset)
        val_acc = accuracy_score(val_true, val_preds)
        val_precision = precision_score(val_true, val_preds)
        val_recall = recall_score(val_true, val_preds)
        val_f1 = f1_score(val_true, val_preds)
        
        # 更新学习率调度器
        scheduler.step(val_loss)
        
        # 记录历史数据
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_acc)
        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_acc)
        history['val_precision'].append(val_precision)
        history['val_recall'].append(val_recall)
        history['val_f1'].append(val_f1)
        
        # 打印每个 epoch 结果
        print(f"\nEpoch {epoch+1}/{Config.EPOCHS}")
        print(f"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f}")
        print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
        print(f"Val Precision: {val_precision:.4f} | Val Recall: {val_recall:.4f} | Val F1: {val_f1:.4f}")
        
        # 早停机制
        if val_loss < best_val_loss - 1e-5:
            best_val_loss = val_loss
            torch.save(model.state_dict(), Config.MODEL_SAVE_PATH)
            print(f"模型已保存至 {Config.MODEL_SAVE_PATH}")
            early_stopping_counter = 0
        else:
            early_stopping_counter += 1
            print(f"早停计数: {early_stopping_counter}/{Config.PATIENCE}")
            if early_stopping_counter >= Config.PATIENCE:
                print("早停机制触发，停止训练")
                break
    
    # 画训练历史图线
    plot_training_history(history)
    
    # 加载最佳模型
    model.load_state_dict(torch.load(Config.MODEL_SAVE_PATH))
    return model, history

# 训练历史图线
def plot_training_history(history):
    plt.figure(figsize=(15, 10))
    
    # 损失曲线
    plt.subplot(2, 2, 1)
    plt.plot(history['train_loss'], label='Train Loss')
    plt.plot(history['val_loss'], label='Val Loss')
    plt.title('Loss Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    
    # 准确率曲线
    plt.subplot(2, 2, 2)
    plt.plot(history['train_acc'], label='Train Accuracy')
    plt.plot(history['val_acc'], label='Val Accuracy')
    plt.title('Accuracy Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    
    # 精确率和召回率曲线
    plt.subplot(2, 2, 3)
    plt.plot(history['val_precision'], label='Val Precision')
    plt.plot(history['val_recall'], label='Val Recall')
    plt.title('Precision & Recall Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Score')
    plt.legend()
    
    # F1分数曲线
    plt.subplot(2, 2, 4)
    plt.plot(history['val_f1'], label='Val F1-score')
    plt.title('F1-score Curve')
    plt.xlabel('Epoch')
    plt.ylabel('F1-score')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig('training_history.png')
    plt.close()

# 评估模型
def evaluate_model(model, val_loader):
    model.eval()
    all_preds = []
    all_true = []
    all_probs = []
    
    with torch.no_grad():
        for images, labels in val_loader:
            images = images.to(Config.DEVICE)
            labels = labels.to(Config.DEVICE)
            
            outputs = model(images)
            probs = outputs.cpu().numpy()
            preds = (probs > 0.5).astype(int)
            
            all_preds.extend(preds)
            all_true.extend(labels.cpu().numpy())
            all_probs.extend(probs)
    
    # 评估指标
    accuracy = accuracy_score(all_true, all_preds)
    precision = precision_score(all_true, all_preds)
    recall = recall_score(all_true, all_preds)
    f1 = f1_score(all_true, all_preds)
    cm = confusion_matrix(all_true, all_preds)
    
    print("\n===== 模型评估结果 =====")
    print(f"准确率 (Accuracy): {accuracy:.4f}")
    print(f"精确率 (Precision): {precision:.4f}")
    print(f"召回率 (Recall): {recall:.4f}")
    print(f"F1分数 (F1 Score): {f1:.4f}")
    print("\n混淆矩阵:")
    print(cm)
    print(f"真阴性(TN): {cm[0][0]} (正常识别为正常)")
    print(f"假阳性(FP): {cm[0][1]} (正常误判为花屏)")
    print(f"假阴性(FN): {cm[1][0]} (花屏误判为正常)")
    print(f"真阳性(TP): {cm[1][1]} (花屏识别为花屏)")
    
    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'confusion_matrix': cm
    }

# 预测单个图像
def predict_image(model, image_path):
    # 图像变换
    transform = transforms.Compose([
        transforms.Resize(Config.IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    # 加载并预处理
    image = Image.open(image_path).convert('RGB')
    image = transform(image).unsqueeze(0)  # 添加批次维度
    image = image.to(Config.DEVICE)
    
    # 预测
    model.eval()
    with torch.no_grad():
        output = model(image)
        prob = output.item()
        prediction = 1 if prob > 0.5 else 0
    
    result = "存在花屏" if prediction == 1 else "正常屏幕"
    return result, prob

def main():
    print(f"使用设备: {Config.DEVICE}")
    
    print("加载数据集...")
    train_loader, val_loader = load_dataset()
    
    print("开始训练模型...")
    model, history = train_model(train_loader, val_loader)
    
    print("评估模型性能...")
    evaluate_model(model, val_loader)
    
    print("\n模型训练和评估完成")

if __name__ == "__main__":
    main()
