# 导入必要的库
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from data_provider.data_factory import data_provider
from utils.metrics import metric
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.loss_functions import fast_channel_reconstruction_loss
import numpy as np

def train(model, train_data, train_loader, vali_data, vali_loader, test_data, test_loader, args):
    # 设置优化器
    if args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    elif args.optimizer == 'adamw':
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    else:
        raise NotImplementedError

    # 学习率调度器
    if args.scheduler == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.train_epochs)
    elif args.scheduler == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
    else:
        scheduler = None

    # 早停器
    early_stopping = EarlyStopping(patience=args.patience, verbose=True)
    
    # 设置损失权重
    lambda_recon = getattr(args, 'lambda_recon', 1.0)
    lambda_sparse = getattr(args, 'lambda_sparse', 0.1)
    lambda_dict = getattr(args, 'lambda_dict', 0.01)  # 新增字典多样性损失权重
    
    print(f"损失权重配置: lambda_recon={lambda_recon}, lambda_sparse={lambda_sparse}, lambda_dict={lambda_dict}")

    # 训练循环
    for epoch in range(args.train_epochs):
        # 训练模式
        model.train()
        train_loss = []

        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
            # 数据移动到设备
            batch_x = batch_x.float().to(args.device)
            batch_y = batch_y.float().to(args.device)
            
            if batch_x_mark is not None:
                batch_x_mark = batch_x_mark.float().to(args.device)
            if batch_y_mark is not None:
                batch_y_mark = batch_y_mark.float().to(args.device)

            # 前向传播
            outputs = model(batch_x, batch_x_mark)
            
            # 计算损失
            # 使用新的损失函数计算信道重构相关损失
            total_loss, loss_dict = fast_channel_reconstruction_loss(
                model_output=outputs,
                original_channel=batch_x,  # 使用输入信道特征作为重构目标
                lambda_recon=lambda_recon,
                lambda_sparse=lambda_sparse,
                lambda_dict=lambda_dict  # 添加字典多样性损失权重
            )
            
            # 反向传播和优化
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()
            
            # 记录损失
            train_loss.append(total_loss.item())
            
            # 打印训练进度
            if (i+1) % 10 == 0:
                print(f'Epoch: {epoch+1}, batch: {i+1}, loss: {total_loss.item():.6f}, '
                      f'recon: {loss_dict["recon_loss"].item():.6f}, '
                      f'sparsity: {loss_dict["sparsity_loss"].item():.6f}, '
                      f'diversity: {loss_dict["diversity_loss"].item():.6f}')  # 增加多样性损失输出

        # 计算平均训练损失
        train_loss = torch.tensor(train_loss).mean().item()
        
        # 验证
        vali_loss, vali_metrics = validate(model, vali_data, vali_loader, args, lambda_recon, lambda_sparse, lambda_dict)
        
        # 打印当前轮次结果
        print(f"Epoch: {epoch+1} | Train Loss: {train_loss:.6f} Vali Loss: {vali_loss:.6f}")
        print(f"验证指标: 重构损失={vali_metrics['recon_loss']:.6f}, "
              f"多样性={vali_metrics['diversity_loss']:.6f}")
        
        # 学习率调整
        if scheduler is not None:
            scheduler.step()
        
        # 早停判断
        early_stopping(vali_loss, model, args.path)
        if early_stopping.early_stop:
            print("Early stopping")
            break

    # 训练结束，加载最佳模型并进行测试
    best_model_path = os.path.join(args.path, 'checkpoint.pth')
    model.load_state_dict(torch.load(best_model_path))
    
    # 在测试集上评估
    test_loss, test_metrics = test(model, test_data, test_loader, args, lambda_recon, lambda_sparse, lambda_dict)
    print(f"Test Loss: {test_loss:.6f}")
    for metric_name, metric_value in test_metrics.items():
        print(f"{metric_name}: {metric_value:.6f}")
    
    return model

def validate(model, vali_data, vali_loader, args, lambda_recon=1.0, lambda_sparse=0.1, lambda_dict=0.01):
    model.eval()
    total_loss = []
    all_metrics = {}
    
    with torch.no_grad():
        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
            batch_x = batch_x.float().to(args.device)
            batch_y = batch_y.float().to(args.device)
            
            if batch_x_mark is not None:
                batch_x_mark = batch_x_mark.float().to(args.device)
            if batch_y_mark is not None:
                batch_y_mark = batch_y_mark.float().to(args.device)
                
            # 前向传播
            outputs = model(batch_x, batch_x_mark)
            
            # 计算损失
            loss, loss_dict = fast_channel_reconstruction_loss(
                model_output=outputs,
                original_channel=batch_x,
                lambda_recon=lambda_recon,
                lambda_sparse=lambda_sparse,
                lambda_dict=lambda_dict
            )
            
            total_loss.append(loss.item())
            
            # 收集各种指标
            for k, v in loss_dict.items():
                if k not in all_metrics:
                    all_metrics[k] = []
                all_metrics[k].append(v.item())
    
    # 计算平均损失和平均指标
    avg_loss = torch.tensor(total_loss).mean().item()
    for k in all_metrics:
        all_metrics[k] = torch.tensor(all_metrics[k]).mean().item()
    
    return avg_loss, all_metrics

def test(model, test_data, test_loader, args, lambda_recon=1.0, lambda_sparse=0.1, lambda_dict=0.01):
    model.eval()
    total_loss = []
    all_pred_indices = []
    all_pred_weights = []
    all_true_channels = []
    all_metrics = {}
    
    with torch.no_grad():
        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):
            batch_x = batch_x.float().to(args.device)
            batch_y = batch_y.float().to(args.device)
            
            if batch_x_mark is not None:
                batch_x_mark = batch_x_mark.float().to(args.device)
            if batch_y_mark is not None:
                batch_y_mark = batch_y_mark.float().to(args.device)
                
            # 前向传播
            outputs = model(batch_x, batch_x_mark)
            
            # 计算损失
            loss, loss_dict = fast_channel_reconstruction_loss(
                model_output=outputs,
                original_channel=batch_x,
                lambda_recon=lambda_recon,
                lambda_sparse=lambda_sparse,
                lambda_dict=lambda_dict
            )
            
            # 收集结果用于计算指标
            all_pred_indices.append(outputs['probe_indices'].cpu().numpy())
            all_pred_weights.append(outputs['probe_weights'].cpu().numpy())
            all_true_channels.append(batch_x.cpu().numpy())
            
            total_loss.append(loss.item())
            
            # 收集各种指标
            for k, v in loss_dict.items():
                if k not in all_metrics:
                    all_metrics[k] = []
                all_metrics[k].append(v.item())
    
    # 计算平均损失和平均指标
    avg_loss = torch.tensor(total_loss).mean().item()
    for k in all_metrics:
        all_metrics[k] = torch.tensor(all_metrics[k]).mean().item()
    
    # 增加选中探头的多样性指标
    if len(all_pred_indices) > 0:
        indices_array = np.concatenate(all_pred_indices, axis=0)
        unique_count = np.zeros(indices_array.shape[0])
        
        for i in range(indices_array.shape[0]):
            unique_count[i] = len(np.unique(indices_array[i]))
        
        all_metrics['probe_diversity'] = float(np.mean(unique_count) / indices_array.shape[1])
    
    return avg_loss, all_metrics 