import argparse
import torch
import numpy as np
import os
import time
import random
from torch import nn, optim
import matplotlib.pyplot as plt

from models import ChannelLLM
from data_provider.data_factory import data_provider
from utils.tools import EarlyStopping, compute_metrics, visualize_weights

# 创建命令行参数
args = argparse.ArgumentParser().parse_args()

# 基本配置
args.task_name = 'probe_weight_prediction'
args.is_training = 1
args.model_id = 'DEBUG'
args.model_comment = 'DEBUG-MODE'
args.model = 'ChannelLLM'

# 数据加载参数
args.data = 'CHANNEL'
args.root_path = './dataset/channel_data/'
args.data_path = 'channel_data.npz'
args.features = 'M'
args.loader = 'channel'
args.checkpoints = './checkpoints/'

# 序列长度参数 - 减小数值以便调试
args.seq_len = 48
args.label_len = 24
args.pred_len = 12

# 模型定义参数
args.n_clusters = 20
args.n_probes = 32
args.d_model = 64
args.n_heads = 4
args.d_ff = 128
args.dropout = 0.1
args.llm_layers = 2

# 优化参数
args.num_workers = 0
args.itr = 1
args.train_epochs = 3
args.batch_size = 4
args.eval_batch_size = 4
args.patience = 3
args.learning_rate = 0.001
args.des = 'debug'
args.loss = 'mse'
args.use_amp = False
args.print_every = 1
args.optim = 'adam'

# 设置随机种子
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)

# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")

def main():
    # 加载数据
    print("Loading data...")
    train_data, train_loader = data_provider(args, 'train')
    vali_data, vali_loader = data_provider(args, 'val')
    test_data, test_loader = data_provider(args, 'test')
    
    # 获取数据形状信息
    batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark = next(iter(train_loader))
    print(f"Input shape: {batch_x_dict['data'].shape}")
    print(f"Original data shape: {batch_x_dict['orig_data'].shape}")
    print(f"Target shape: {batch_y_dict['data'].shape}")
    print(f"Mark shape: {batch_x_mark.shape}")
    
    # 创建模型
    print("Creating model...")
    model = ChannelLLM(args).float().to(device)
    
    # 打印模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"Total parameters: {total_params}")
    print(f"Trainable parameters: {trainable_params}")
    
    # 创建优化器和损失函数
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    criterion = nn.MSELoss()
    
    # 创建保存目录
    checkpoint_dir = f"./checkpoints/debug_{int(time.time())}/"
    os.makedirs(checkpoint_dir, exist_ok=True)
    
    early_stopping = EarlyStopping(patience=args.patience, verbose=True)
    
    # 快速训练
    print("Starting mini training...")
    for epoch in range(args.train_epochs):
        model.train()
        epoch_losses = []
        
        for i, (batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark) in enumerate(train_loader):
            optimizer.zero_grad()
            
            # 移动数据到设备
            batch_x = batch_x_dict['data'].float().to(device)
            batch_y = batch_y_dict['data'].float().to(device)
            batch_x_mark = batch_x_mark.float().to(device)
            
            # 前向传播
            outputs = model(batch_x, None, batch_x_mark, None)
            
            # ChannelLLM返回的是字典，不是transformer的原始输出
            if isinstance(outputs, dict):
                # 从返回的字典中获取损失
                if 'spatial_rmse_loss' in outputs:
                    loss = outputs['spatial_rmse_loss']
                else:
                    # 如果没有损失，使用预测的探头权重计算损失
                    pred_weights = outputs['probe_weights']  # [B, seq_len, n_probes]
                    # 这里需要根据你的具体任务调整损失计算
                    # 暂时使用一个简单的目标
                    target_weights = torch.ones_like(pred_weights) * 0.5
                    loss = criterion(pred_weights, target_weights)
            else:
                # 如果不是字典（旧版本兼容）
                pred = outputs[:, -args.pred_len:, :]
                true = batch_y[:, -args.pred_len:, :].to(device)
                loss = criterion(pred, true)
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            epoch_losses.append(loss.item())
            
            print(f"Epoch {epoch+1}, Batch {i+1}, Loss: {loss.item():.6f}")
            
            # 仅处理少量批次进行调试
            if i >= 5:
                break
        
        avg_loss = np.mean(epoch_losses)
        print(f"Epoch {epoch+1} Average Loss: {avg_loss:.6f}")
        
        # 快速验证
        model.eval()
        val_losses = []
        preds = []
        trues = []
        
        with torch.no_grad():
            for i, (batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
                # 移动数据到设备
                batch_x = batch_x_dict['data'].float().to(device)
                batch_y = batch_y_dict['data'].float().to(device)
                batch_x_mark = batch_x_mark.float().to(device)
                
                # 前向传播
                outputs = model(batch_x, None, batch_x_mark, None)
                
                # ChannelLLM返回的是字典，不是transformer的原始输出
                if isinstance(outputs, dict):
                    # 从返回的字典中获取损失
                    if 'spatial_rmse_loss' in outputs:
                        loss = outputs['spatial_rmse_loss']
                    else:
                        # 如果没有损失，使用预测的探头权重计算损失
                        pred_weights = outputs['probe_weights']  # [B, seq_len, n_probes]
                        # 这里需要根据你的具体任务调整损失计算
                        # 暂时使用一个简单的目标
                        target_weights = torch.ones_like(pred_weights) * 0.5
                        loss = criterion(pred_weights, target_weights)
                else:
                    # 如果不是字典（旧版本兼容）
                    pred = outputs[:, -args.pred_len:, :]
                    true = batch_y[:, -args.pred_len:, :].to(device)
                    loss = criterion(pred, true)
                
                val_losses.append(loss.item())
                
                # 真实值转换为numpy
                true = batch_y.cpu().numpy()
                
                preds.append(pred.cpu().numpy())
                trues.append(true)
                
                # 仅处理少量批次进行调试
                if i >= 3:
                    break
        
        val_loss = np.mean(val_losses)
        preds = np.concatenate(preds, axis=0)
        trues = np.concatenate(trues, axis=0)
        
        # 计算指标
        metrics = compute_metrics(preds, trues)
        print(f"Validation Loss: {val_loss:.6f}, MSE: {metrics['mse']:.6f}, MAE: {metrics['mae']:.6f}")
        
        # 早停检查
        early_stopping(val_loss, model, checkpoint_dir)
        if early_stopping.early_stop:
            print("Early stopping triggered")
            break
    
    # 加载最佳模型
    print("Loading best model...")
    model.load_state_dict(torch.load(checkpoint_dir + 'checkpoint'))
    
    # 快速测试
    print("Running mini test...")
    model.eval()
    test_preds = []
    test_trues = []
    
    with torch.no_grad():
        for i, (batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark) in enumerate(test_loader):
            # 移动数据到设备
            batch_x = batch_x_dict['data'].float().to(device)
            batch_y = batch_y_dict['data'].float().to(device)
            batch_x_mark = batch_x_mark.float().to(device)
            
            # 前向传播
            outputs = model(batch_x, None, batch_x_mark, None)
            
            # ChannelLLM返回的是字典，不是transformer的原始输出
            if isinstance(outputs, dict):
                # 从返回的字典中获取损失
                if 'spatial_rmse_loss' in outputs:
                    loss = outputs['spatial_rmse_loss']
                else:
                    # 如果没有损失，使用预测的探头权重计算损失
                    pred_weights = outputs['probe_weights']  # [B, seq_len, n_probes]
                    # 这里需要根据你的具体任务调整损失计算
                    # 暂时使用一个简单的目标
                    target_weights = torch.ones_like(pred_weights) * 0.5
                    loss = criterion(pred_weights, target_weights)
            else:
                # 如果不是字典（旧版本兼容）
                pred = outputs[:, -args.pred_len:, :]
                true = batch_y[:, -args.pred_len:, :].to(device)
                loss = criterion(pred, true)
            
            # 转换为numpy数组
            pred = pred.cpu().numpy()
            true = true.cpu().numpy()
            
            test_preds.append(pred)
            test_trues.append(true)
            
            # 仅处理少量批次进行调试
            if i >= 2:
                break
    
    test_preds = np.concatenate(test_preds, axis=0)
    test_trues = np.concatenate(test_trues, axis=0)
    
    # 计算测试指标
    test_metrics = compute_metrics(test_preds, test_trues)
    print(f"Test MSE: {test_metrics['mse']:.6f}, MAE: {test_metrics['mae']:.6f}")
    print(f"Test Relative Error: {test_metrics['rel_err']:.6f}, KL Divergence: {test_metrics['kl_div']:.6f}")
    
    # 可视化一个样本
    sample_idx = 0
    visualize_weights(
        test_preds[sample_idx], 
        test_trues[sample_idx], 
        n_probes=args.n_probes,
        save_path=f"{checkpoint_dir}/sample_visualization.png"
    )
    
    print(f"Debug run completed. Results saved to {checkpoint_dir}")
    
    # 保存一些预测结果
    np.save(f"{checkpoint_dir}/test_pred.npy", test_preds)
    np.save(f"{checkpoint_dir}/test_true.npy", test_trues)
    
    return model, test_preds, test_trues

if __name__ == "__main__":
    main() 