import os
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

import random
import numpy as np
import torchvision
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
import torchvision.transforms as transforms
from tqdm import tqdm
import matplotlib.pyplot as plt

from utils import img_util


# 设置随机种子确保结果可复现
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True


# 每张图片生成 多少种角度, 图片太多了，控制随机旋转角度种类数量
num_angles = 1

# 分类 数量
num_classes = 120


# 数据处理相关函数
class RotationDataset(Dataset):
    """旋转验证码数据集"""

    def __init__(self, image_paths, labels, transform=None, augment=False):
        self.image_paths = image_paths
        self.labels = labels  # 旋转角度(0-119)
        self.transform = transform
        self.augment = augment

        self.types = 360//num_classes

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        class_label = self.labels[idx]
        label_x3 = class_label * self.types

        # 读取图像
        image = Image.open(img_path).convert('RGB')

        # 数据增强（训练时使用）
        if self.augment:
            # 随机亮度、对比度和饱和度变化
            color_jitter = transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2)
            image = color_jitter(image)

        image = img_util.img_rotate(image, label_x3)

        # 应用转换
        if self.transform:
            image = self.transform(image)

        # if idx % 200 == 0:
        #     torchvision.utils.save_image(image, f"{idx}_{label_x3}.jpg")

        # 将角度转换为弧度（归一化）
        angle_rad = np.radians(label_x3)

        return image, class_label, angle_rad


def create_data_transforms():
    """创建图像转换"""
    train_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        # transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], inplace=True),
    ])

    val_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        # transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], inplace=True)
    ])

    return train_transform, val_transform


def get_image_paths_and_labels(data_dirs):
    """获取图像路径和标签"""
    # 获取所有图像路径

    image_files_list = []
    for data_dir in data_dirs:
        image_files = [os.path.join(data_dir, f) for f in os.listdir(data_dir)
                       if f.endswith(('.png', '.jpg', '.jpeg'))]

        image_files_list.extend(image_files)

    # 为每个原始图片生成360个角度（0-359度）

    types = 360 // num_classes

    image_paths = []
    class_labels = []
    for path in image_files_list:
        selected_angles = random.sample(range(0, 359), num_angles)
        for angle in selected_angles:
            image_paths.append(path)  # 原始图片路径重复 num_angles 次

            class_label = angle // types

            class_labels.append(class_label)

    return image_paths, class_labels


def create_data_loaders(data_dirs, batch_size=32, num_workers=4):
    image_paths, class_labels = get_image_paths_and_labels(data_dirs)

    total_samples = len(image_paths)
    val_size = int(0.02 * total_samples)
    train_size = total_samples - val_size

    train_paths = image_paths[:train_size]
    train_class_labels = class_labels[:train_size]

    val_paths = image_paths[train_size:]
    val_class_labels = class_labels[train_size:]

    # 创建图像转换
    train_transform, val_transform = create_data_transforms()

    # 创建数据集
    train_dataset = RotationDataset(train_paths, train_class_labels,
                                    transform=train_transform, augment=True)
    val_dataset = RotationDataset(val_paths, val_class_labels,
                                  transform=val_transform, augment=False)

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size,
                              shuffle=True, num_workers=num_workers)
    val_loader = DataLoader(val_dataset, batch_size=batch_size,
                            shuffle=False, num_workers=num_workers)

    return train_loader, val_loader


# 模型相关函数
def create_model(train=False):
    """创建ResNet模型"""
    # 使用预训练的ResNet18

    weights = None
    if train:
        weights = models.RegNet_Y_3_2GF_Weights.DEFAULT
    model = models.regnet_y_3_2gf(weights=weights)

    # 修改最后一层以适应旋转角度分类任务
    model.fc = nn.Linear(in_features=model.fc.in_features, out_features=num_classes)

    return model


# 训练相关函数
def train_one_epoch(model, train_loader, criterion, optimizer, device):
    """训练一个epoch"""
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    train_progress = tqdm(train_loader, desc='Training')
    for inputs, targets, _ in train_progress:
        inputs, targets = inputs.to(device), targets.to(device)

        # 前向传播
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 统计
        running_loss += loss.item() * inputs.size(0)
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        train_progress.set_postfix({'loss': loss.item(), 'acc': correct / total})

    # 计算训练指标
    epoch_loss = running_loss / len(train_loader.dataset)
    epoch_acc = correct / total

    return epoch_loss, epoch_acc


def validate(model, val_loader, criterion, device):
    """验证模型"""
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0

    val_progress = tqdm(val_loader, desc='Validation')
    with torch.no_grad():
        for inputs, targets, _ in val_progress:
            inputs, targets = inputs.to(device), targets.to(device)

            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            # 统计
            running_loss += loss.item() * inputs.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            val_progress.set_postfix({'loss': loss.item(), 'acc': correct / total})

    # 计算验证指标
    epoch_loss = running_loss / len(val_loader.dataset)
    epoch_acc = correct / total

    return epoch_loss, epoch_acc


def train_model(model, train_loader, val_loader, criterion, optimizer, scheduler, device, num_epochs=10):
    """训练和验证模型"""
    best_val_acc = 0.0
    history = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}

    for epoch in range(num_epochs):
        print(f'Epoch {epoch + 1}/{num_epochs}')

        # 训练阶段
        train_loss, train_acc = train_one_epoch(model, train_loader, criterion, optimizer, device)
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_acc)

        # 验证阶段
        val_loss, val_acc = validate(model, val_loader, criterion, device)
        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_acc)

        # 学习率调整
        scheduler.step(val_loss)

        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), 'best_rotation_model_out.pth')
            print(f'Best model saved with accuracy: {best_val_acc:.4f}')

        print(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, '
              f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}')

    return history


# 评估相关函数
def evaluate_model(model, data_loader, device):
    """评估模型性能"""
    model.eval()
    angle_errors = []

    types = 360 // num_classes

    with torch.no_grad():
        for inputs, _, angles_rad in tqdm(data_loader, desc='Evaluating'):
            inputs = inputs.to(device)

            # 前向传播
            outputs = model(inputs)
            _, predicted_classes = outputs.max(1)

            # 将预测类别转换为角度
            predicted_angles_deg = predicted_classes.float() * types
            predicted_angles_rad = np.radians(predicted_angles_deg.cpu().numpy())

            true_angles_rad = angles_rad.numpy()

            # 计算角度误差（考虑周期性）
            errors_rad = np.abs(predicted_angles_rad - true_angles_rad)
            # 考虑角度周期性（例如，359度和0度之间的误差是1度）
            errors_rad = np.minimum(errors_rad, 2 * np.pi - errors_rad)
            # 转换为角度
            errors_deg = np.degrees(errors_rad)
            angle_errors.extend(errors_deg)

    # 计算平均误差和中值误差
    mean_error = np.mean(angle_errors)
    median_error = np.median(angle_errors)
    accuracy_within_5 = np.mean(np.array(angle_errors) <= 5) * 100
    accuracy_within_10 = np.mean(np.array(angle_errors) <= 10) * 100

    print(f'平均角度误差: {mean_error:.2f} 度')
    print(f'中位角度误差: {median_error:.2f} 度')
    print(f'误差在5度以内的准确率: {accuracy_within_5:.2f}%')
    print(f'误差在10度以内的准确率: {accuracy_within_10:.2f}%')

    return {
        'mean_error': mean_error,
        'median_error': median_error,
        'accuracy_within_5': accuracy_within_5,
        'accuracy_within_10': accuracy_within_10,
        'errors': angle_errors
    }




# 可视化相关函数
def plot_training_history(history):
    """绘制训练历史"""
    plt.figure(figsize=(12, 4))

    # 绘制训练和验证损失
    plt.subplot(1, 2, 1)
    plt.plot(history['train_loss'], label='Train Loss')
    plt.plot(history['val_loss'], label='Val Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Validation Loss')
    plt.legend()
    plt.grid(True)

    # 绘制训练和验证准确率
    plt.subplot(1, 2, 2)
    plt.plot(history['train_acc'], label='Train Acc')
    plt.plot(history['val_acc'], label='Val Acc')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.title('Training and Validation Accuracy')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig('training_results.png')


def plot_angle_errors(error_metrics):
    """绘制角度误差分布"""
    plt.figure(figsize=(10, 5))
    plt.hist(error_metrics['errors'], bins=50, alpha=0.7)
    plt.axvline(error_metrics['mean_error'], color='r', linestyle='dashed', linewidth=2,
                label=f'Mean: {error_metrics["mean_error"]:.2f}°')
    plt.axvline(error_metrics['median_error'], color='g', linestyle='dashed', linewidth=2,
                label=f'Median: {error_metrics["median_error"]:.2f}°')
    plt.xlabel('Angle Error (degrees)')
    plt.ylabel('Frequency')
    plt.title('Distribution of Angle Errors')
    plt.legend()
    plt.grid(True)
    plt.savefig('angle_errors.png')


def visualize_results(history, error_metrics=None):
    """可视化训练结果和评估指标"""
    plot_training_history(history)

    if error_metrics:
        plot_angle_errors(error_metrics)


# 主函数
def main():
    # 设置参数
    data_dirs = [
        '../../dataset/baidu/input',
        # '../../dataset/baidu/output'
    ]

    batch_size = 64
    num_epochs = 10
    learning_rate = 0.001
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print(f'使用设备: {device}')
    set_seed()

    # 创建数据加载器
    print('准备数据...')
    train_loader, val_loader = create_data_loaders(data_dirs, batch_size)

    # 创建模型
    print('创建模型...')
    model = create_model(False).to(device)
    # model.load_state_dict(torch.load('best_rotation_model.pth', weights_only=True, map_location=device))

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2, factor=0.5)

    # 训练模型
    print('开始训练...')
    history = train_model(model, train_loader, val_loader, criterion, optimizer, scheduler, device, num_epochs)

    model.load_state_dict(torch.load('best_rotation_model_out.pth', weights_only=True, map_location=device))

    # 评估模型
    print('评估模型...')
    error_metrics = evaluate_model(model, val_loader, device)

    # 可视化结果
    visualize_results(history, error_metrics)

    print('训练和评估完成！')


if __name__ == '__main__':
    main()

