import os
import sys
import pickle
import random
import argparse
import torch
import torch.onnx

import matplotlib.pyplot as plt
import lib.tools as my_tools
import numpy as np

from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from loguru import logger
from lib import rltrain
from lib.rltrain import attitudePolicy
logger.add("train.log")

#由于python3.10存在GIL锁的问题，导致多线程时CPU占用率不高。未来切换至CPython3.14
num_cores = os.cpu_count()  # 获取系统总核心数
logger.info(f"Number of CPU cores available: {num_cores}")
torch.set_num_threads(4)  # 使用全部核心

if torch.cuda.is_available():
    device = 'cuda'
    logger.info("Using Nvidia GPU")
elif torch.xpu.is_available():
    device = 'xpu'
    logger.info("Using Intel XPU")
else:
    device = 'cpu'
    logger.info("Using CPU")
    
def train_attitude(yaml_name):
    # 加载参数
    config = my_tools.load_yaml(yaml_name)
    if config is None:
        # 训练中止
        return
    
    logger.info(f"Params {format(config)} have been loaded.")
    # 读取参数文件中的参数
    cfg_batch_size = config['batch_size']
    cfg_lr = config['learning_rate']
    cfg_lr_min = config['learning_rate_min']
    cfg_t_max = config['T_max']
    cfg_num_epochs = config['num_epochs']
    cfg_tensorboard_name = config['tensorboard_name']

    # 每列的含义为
    # ['D_value', 'omega_x', 'omega_y', 'omega_z', 'n3_x', 'n3_y', 'n3_z', 'n3_xd', 'n3_yd', 'n3_zd']
    # state is 'omega_z', 'n3_x', 'n3_y', 'n3_z', 'n3_xd', 'n3_yd', 'n3_zd'
    # output is 'omega_x', 'omega_y',

    ## npy文件格式：
    datasetfilename = 'sorted_dataset_attitude.npy'
    file_size = os.path.getsize(datasetfilename)
    num_items = file_size // (12 * 4)  # 12个float32，每个4字节
    data_set = np.memmap(datasetfilename, dtype='float32', mode='r', shape=(num_items,12))

    ## pickle文件格式：
    # data_set = my_tools.load_dataset("sorted_dataset_attitude.pickle")
    # data_set = np.array(data_set)

    # 并打乱顺序
    # np.random.shuffle(data_set)
    logger.info(f"data_set shape is {data_set.shape}")
    
    # 整理成数据集的输入
    n_3d = data_set[:,7:10]
    state = np.column_stack((data_set[:,3:7], n_3d))
    logger.info(f"state shape is {state.shape}")

    # 整理成数据集的输出
    output = data_set[:,1:3]
    logger.info(f"output shape is {output.shape}")

    kNetDataset = rltrain.Mydataset(state.astype(np.float32), output.astype(np.float32))


    kNetTrain_attitude = DataLoader(kNetDataset, batch_size = cfg_batch_size, shuffle=True, num_workers=0)

    attPolicy = attitudePolicy().to(device)
    ## 选择优化器
    # 损失函数采用MSE，即输出结果与标签值之间的欧式距离 | 损失函数也可采用L1Loss
    criterion_attitude_net = torch.nn.MSELoss(reduction = 'mean')
    # 优化求解采用Adam方法，设置超参数
    optimizer_attitude_net = torch.optim.Adam(attPolicy.parameters(), lr = cfg_lr, betas = (0.9,0.999), weight_decay=0)
    # 设置学习率调度器为cosine 暂无退火
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                                        optimizer=optimizer_attitude_net,
                                        T_max=cfg_t_max,
                                        eta_min=cfg_lr_min,
                                        last_epoch=-1)

    # 设置二次训练的模型保存路径
    save_path = "attitude_net_and_optimizer_state.pth"

    # 加载已有模型
    try:
        # 加载保存的状态
        checkpoint = torch.load(save_path, map_location=device)
        # 加载模型参数
        attPolicy.load_state_dict(checkpoint['model_state_dict'])
        # 加载优化器状态
        optimizer_attitude_net.load_state_dict(checkpoint['optimizer_state_dict'])
        # 加载学习率调度器状态
        lr_scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
    
        # attPolicy = torch.load('attitude_net.pt', weights_only=False)
        logger.warning(f"Model and optimizer state loaded from {save_path}")
    except FileNotFoundError:
        logger.warning("Model file not found. A new model will be trained")
    except Exception as e:
        # 捕获其他所有异常
        logger.error(f"A error occurs when load model:{e}")

    attPolicy.train()
    logger.info(f"Training mode? {attPolicy.training}")

    # 手动修改学习率和权重衰减值
    # for param_group in optimizer_attitude_net.param_groups:
        # param_group['lr'] = 2e-5  # 新的学习率
        # param_group['weight_decay'] = 0  # 新的权重衰减

    # lr_scheduler.eta_min = 1e-6

    writer = SummaryWriter('runs/' + cfg_tensorboard_name)
    writer.add_text('Parameter', f'batch_size={cfg_batch_size}, lr={cfg_lr}, lr_min={cfg_lr_min}, t_max={cfg_t_max}, num_epochs={cfg_num_epochs}', 1)
    ## if training mode is True, process training
    if attPolicy.training:
        loss_value_list, X_list, y_list, pred_list, last_epoch_loss_value = train(kNetTrain_attitude, 
                                                                                  attPolicy, 
                                                                                  criterion_attitude_net, 
                                                                                  optimizer_attitude_net,
                                                                                  lr_scheduler, 
                                                                                  num_epochs=cfg_num_epochs, 
                                                                                  tensorboard_name=cfg_tensorboard_name)
        my_tools.save_loss_curve(loss_value_list, 'train_result/attitude_training_loss.png')
        my_tools.save_last_epoch_curve(X_list, y_list, pred_list, last_epoch_loss_value, output_path='train_result/attitude_last_epoch_curve.png')
        logger.info("Training finished!")

    ## 设置为评估模式
    attPolicy.eval()

    ## 多种模型保存，用于多种用途
    # 保存模型状态和优化器状态
    torch.save({
        'model_state_dict': attPolicy.state_dict(),
        'optimizer_state_dict': optimizer_attitude_net.state_dict(),
        'scheduler_state_dict': lr_scheduler.state_dict(),
    }, save_path)
    # 保存模型
    torch.save(attPolicy, 'attitude_net.pt')
    # 仅保存模型参数
    torch.save(attPolicy.state_dict(), 'attitude_net_dict.pt')
    # 用于matlab导入
    example_forward_input = torch.rand(7).to(device)
    module = torch.jit.trace(attPolicy.forward, example_forward_input)
    torch.jit.save(module, "attitude_net_mt.pt")
    # 导出onnx格式
    torch.onnx.export(attPolicy, example_forward_input, "attitude_net.onnx", 
                  input_names=["input"], output_names=["output"], opset_version=18, export_params=True)
    

    # 随机选择一个测试样本
    test_state = state[10,:]
    test_state = torch.from_numpy(test_state).to(torch.float32).to(device)
    test_output = attPolicy(test_state)

    logger.info(f"model input is {test_state}")
    logger.info(f"model output is {test_output.detach()}, while the label is {output[10,:]}")

    # 使用批量处理提高速度
    batch_size = 1024  # 可以根据GPU内存调整
    output_diff = []
    
    for i in tqdm(range(0, len(state), batch_size), desc="Calculating output differences", ncols=100):
        end_idx = min(i + batch_size, len(state))
        batch_state = state[i:end_idx]
        
        # 批量转换为tensor并移到设备
        batch_state_tensor = torch.from_numpy(batch_state).to(torch.float32).to(device)
        
        # 批量前向传播
        with torch.no_grad():  # 禁用梯度计算，提高速度
            batch_output = attPolicy(batch_state_tensor).cpu().numpy()
        
        # 计算差异
        batch_diff = batch_output - output[i:end_idx]
        output_diff.append(batch_diff)
    
    # 合并所有批次的差异
    output_diff = np.vstack(output_diff)

    # 创建 2行1列 的子图布局
    fig, axes = plt.subplots(2, 1, figsize=(10, 8))

    # 绘制第1列数据
    axes[0].plot(output_diff[:, 0], 'b-', label='Column 1')  # 蓝色实线
    axes[0].set_title('Column 1 of output_diff')
    axes[0].set_xlabel('Index')
    axes[0].set_ylabel('Value')
    axes[0].grid(True)
    axes[0].legend()

    # 绘制第2列数据
    axes[1].plot(output_diff[:, 1], 'r--', label='Column 2')  # 红色虚线
    axes[1].set_title('Column 2 of output_diff')
    axes[1].set_xlabel('Index')
    axes[1].set_ylabel('Value')
    axes[1].grid(True)
    axes[1].legend()

    # 调整布局防止重叠
    plt.tight_layout()
    plt.savefig('train_result/attitude_output_diff.png')  # 保存为 PNG
    # plt.show()

def test_attitude():
    attPolicy = attitudePolicy()
    attPolicy = torch.load('./attitude_net.pt', weights_only=False)
    print("模型加载成功！")

    # 测试 2
    test_state = np.array([0, 0, 0, 0, 0, 0, 0])
    test_state = torch.from_numpy(test_state).to(torch.float32)
    test_output = attPolicy(test_state)
    print(f"test_state is {test_state}")
    print(f"test_output is {test_output}")

# 训练函数
def train(dataloader, model, loss_fn, optimizer, lr_scheduler, num_epochs=2, tensorboard_name='set_custom_name'):
    model.train()

    loss_value_list = []
    X_list = []
    y_list = []
    pred_list =[]
    last_epoch_loss_value = []
    
    writer = SummaryWriter('runs/' + tensorboard_name)
    # 记录模型结构
    # fake_input = torch.randn(1,7).to(device)
    # writer.add_graph(model=model, input_to_model=fake_input)

    # 开始训练
    for epoch in range(num_epochs):
        pbar = tqdm(dataloader, desc=f"Epoch {epoch+1}/{num_epochs}", ncols=110)

        # 当前epoch下的实时损失均值，随着batch增加而更新，最后趋于稳定
        avg_loss_rt = 0
        current_count_in_epoch = 0
        for X, y in pbar:
            X, y = X.to(device), y.to(device)
            
            pred = model(X)
            loss = loss_fn(pred, y)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_value = loss.item()
            # 记录整个训练过程的loss_value
            loss_value_list.append(loss_value)
            # 计算当前epoch下的实时损失均值
            avg_loss_rt = current_count_in_epoch/(current_count_in_epoch+1)*avg_loss_rt + 1/(current_count_in_epoch+1)*loss_value
            current_count_in_epoch += 1
            # current_count_in_epoch_cpu += 1
            if current_count_in_epoch % 200 == 0:
                # 更新进度条，附带当前epoch平均损失和当前batch损失
                pbar.set_postfix({'avg_loss': f'{avg_loss_rt:.10f}'})
                writer.add_scalar('Loss/avg_training', avg_loss_rt, current_count_in_epoch)
                

            # 仅保存最后一个epoch的过程数据(可以尝试删除以减少耗时)
            if epoch+1 == num_epochs:
                X_list.append(X.detach().cpu())
                y_list.append(y.detach().cpu())
                pred_list.append(pred.detach().cpu())
                last_epoch_loss_value.append(loss_value)

        # 记录梯度和参数
        # for name, param in model.named_parameters():
            # writer.add_histogram(tag=name+'_data', values=param.data, global_step=epoch)
            # if param.grad is not None:  
            #     writer.add_histogram(tag=name+'_grad', values=param.grad, global_step=epoch)
        
        ## tensorboard记录
        lr = optimizer.param_groups[0]['lr']
        writer.add_scalar('Loss/Learning Rate', lr, epoch) # 获取当前学习率
        writer.add_scalar('Loss/avg_train', avg_loss_rt, epoch) # 记录平均损失

        lr_scheduler.step() # 更新学习率 注释掉可使用固定学习率

    writer.close()
    if device == 'cuda':
        torch.cuda.empty_cache()
    return loss_value_list, X_list, y_list, pred_list, last_epoch_loss_value

COMMANDS = {
    "attitude": "Train attitude policy",
    "test": "Test attitude policy"
}

def get_parser():
    parser = argparse.ArgumentParser(description="Train or test attitude policy")
    parser.add_argument('training_target', choices=['attitude', 'pg'], help='输入训练目标')
    parser.add_argument('-p','--params', default="train",type=str, help='YAML parameters file name')
    return parser

if __name__ == "__main__":
    # 设置随机种子，确保后续加载模型多次训练一致性
    seed = 42
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    parser = get_parser()
    args = parser.parse_args()
    # logger.info(args.training_target)
    # logger.info(args.params)
    
    if args.training_target == "attitude":
        if args.params:
            logger.warning(f"Training attitude policy with {args.params}.yaml")
            train_attitude(args.params)
        else:
            pass
        
        
    