import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
from utils import load_and_preprocess_data, SignalDataset
from cnn_model import get_model
import json
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl

# 设置matplotlib中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

def parse_args():
    parser = argparse.ArgumentParser(description='训练CNN模型')
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮数')
    parser.add_argument('--lr', type=float, default=0.001, help='学习率')
    parser.add_argument('--use_gan', action='store_true', help='是否使用GAN生成的样本')
    parser.add_argument('--use_sim', action='store_true', help='是否使用仿真数据')
    parser.add_argument('--use_sim_real', action='store_true', help='是否使用真实仿真数据')
    parser.add_argument('--gan_type', type=str, default='vanilla', 
                       choices=['vanilla', 'wgan', 'cgan', 'wgan_gp'], help='GAN类型')
    return parser.parse_args()

def train_model(model, train_loader, val_loader, criterion, optimizer, device, args, save_dir):
    history = {
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': []
    }
    best_val_acc = 0.0
    
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        
        for inputs, labels in train_loader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            # 确保输入维度正确
            if inputs.dim() == 2:
                inputs = inputs.unsqueeze(1)  # 添加通道维度
            
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            train_total += labels.size(0)
            train_correct += predicted.eq(labels).sum().item()
        
        train_loss = train_loss / len(train_loader)
        train_acc = 100. * train_correct / train_total
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        
        with torch.no_grad():
            for inputs, labels in val_loader:
                inputs = inputs.to(device)
                labels = labels.to(device)
                
                # 确保输入维度正确
                if inputs.dim() == 2:
                    inputs = inputs.unsqueeze(1)  # 添加通道维度
                
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                _, predicted = outputs.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()
        
        val_loss = val_loss / len(val_loader)
        val_acc = 100. * val_correct / val_total
        
        # 更新历史记录
        history['train_loss'].append(float(train_loss))
        history['train_acc'].append(float(train_acc))
        history['val_loss'].append(float(val_loss))
        history['val_acc'].append(float(val_acc))
        
        print(f'Epoch {epoch+1}/{args.epochs}:')
        print(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%')
        print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%')
        
        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), os.path.join(save_dir, 'best_model.pth'))
    
    return history

def plot_training_history(history, save_dir):
    plt.figure(figsize=(12, 4))
    
    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(history['train_loss'], label='训练损失')
    plt.plot(history['val_loss'], label='验证损失')
    plt.title('损失曲线', fontproperties='SimHei')
    plt.xlabel('轮次', fontproperties='SimHei')
    plt.ylabel('损失', fontproperties='SimHei')
    plt.legend(prop={'family': 'SimHei'})
    
    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(history['train_acc'], label='训练准确率')
    plt.plot(history['val_acc'], label='验证准确率')
    plt.title('准确率曲线', fontproperties='SimHei')
    plt.xlabel('轮次', fontproperties='SimHei')
    plt.ylabel('准确率 (%)', fontproperties='SimHei')
    plt.legend(prop={'family': 'SimHei'})
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'training_history.png'))
    plt.close()

def main():
    args = parse_args()
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'使用设备: {device}')
    
    # 加载数据
    X_train, X_test, y_train, y_test = load_and_preprocess_data(
        use_gan=args.use_gan,
        gan_type=args.gan_type,
        use_sim=args.use_sim,
        use_sim_real=args.use_sim_real
    )
    
    # 创建数据集和数据加载器
    train_dataset = SignalDataset(X_train, y_train)
    test_dataset = SignalDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size)
    
    # 创建保存目录
    if args.use_sim_real:
        save_dir = os.path.join("./diagnosis_models/cnn", "sim_real")
    elif args.use_gan:
        save_dir = os.path.join("./diagnosis_models/cnn", args.gan_type)
    elif args.use_sim:
        save_dir = os.path.join("./diagnosis_models/cnn", "sim")
    else:
        save_dir = os.path.join("./diagnosis_models/cnn", "real")
    
    os.makedirs(save_dir, exist_ok=True)
    
    # 创建模型
    num_classes = len(np.unique(y_train))
    model = get_model(num_classes, device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    
    # 训练模型
    history = train_model(model, train_loader, test_loader, criterion, optimizer, device, args, save_dir)
    
    # 绘制训练历史
    plot_training_history(history, save_dir)
    
    # 保存训练历史
    history_path = os.path.join(save_dir, 'training_history.json')
    with open(history_path, 'w') as f:
        json.dump(history, f, indent=4)
    
    # 在测试集上评估模型
    model.load_state_dict(torch.load(os.path.join(save_dir, 'best_model.pth')))
    model.eval()
    test_loss = 0.0
    test_correct = 0
    test_total = 0
    
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            # 确保输入维度正确
            if inputs.dim() == 2:
                inputs = inputs.unsqueeze(1)  # 添加通道维度
            
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            test_total += labels.size(0)
            test_correct += predicted.eq(labels).sum().item()
    
    test_loss = test_loss / len(test_loader)
    test_acc = 100. * test_correct / test_total
    
    print(f'\n测试集结果:')
    print(f'测试损失: {test_loss:.4f}')
    print(f'测试准确率: {test_acc:.2f}%')
    
    # 保存评估结果
    results = {
        'test_accuracy': test_acc,
        'test_loss': test_loss,
        'best_validation_accuracy': max(history['val_acc']),
        'final_training_accuracy': history['train_acc'][-1],
        'final_validation_accuracy': history['val_acc'][-1],
        'final_training_loss': history['train_loss'][-1],
        'final_validation_loss': history['val_loss'][-1]
    }
    
    with open(os.path.join(save_dir, 'evaluation_results.json'), 'w') as f:
        json.dump(results, f, indent=4)

if __name__ == '__main__':
    main() 