import os
import argparse
import torch
import torch.optim as optim
# import torch.nn as nn
# import torch.nn.DataParallel
from torch.utils.tensorboard import SummaryWriter
from dataLoader import DrivAerDataset
from model import phsoffNet
from datetime import datetime
import yaml
import time
import glob
import random
import numpy as np
from timm.scheduler.cosine_lr import CosineLRScheduler
import multiprocessing
# Set the multiprocessing start method to 'spawn'
multiprocessing.set_start_method('spawn', force=True)
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
# Set the multiprocessing start method to 'spawn'
# import csv
import os
import pandas as pd
import time


def set_random_seed(seed, deterministic=False):
    """Set random seed.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

def load_config(config_path):
    with open(config_path, 'r') as file:
        config = yaml.safe_load(file)
    return config



##### MRE, skip y=0 
def train_one_epoch_old(data_loader, model, optimizer, device):
    model.train()
    total_loss_accumulated = 0.0
    mse_Loss = 0.0
    mae_Loss = 0.0
    r2_sum = 0.0  # 用于累加 R^2 值
    total_count = 0  # 统计符合条件的样本数
    skipped_count = 0  # 统计被跳过的样本数
    max_mae = 0.0  # 初始化最大的 MAE
    re = 0.0 
    max_re = 0.0  # 初始化最大的 MAE
    
    for x, y, filename,cubesize in data_loader:
        x, y,cubesize = x.to(device), y.to(device),cubesize.to(device)
        optimizer.zero_grad()
        # 记录模型推理前的时间
        start_time = time.time()
        output = model(x,cubesize)
        # output = output*0.13+0.25
        output = (output + 1) / 2 * (0.38 - 0.25) + 0.25 
        mask = (y.abs() >= 1e-4)

        if mask.sum() > 0:
            filtered_y = y[mask]
            filtered_output = output[mask]
            # 计算相对误差 MSE
            relative_error1 = (filtered_y - filtered_output)**2
            re = ((filtered_y - filtered_output).abs()) /(filtered_y.abs())
            mse_error = ((filtered_y - filtered_output)) ** 2
            mae_error = ((filtered_y - filtered_output).abs())
            
            loss = re.mean()  # 计算loss平均相对误差 Mre
            loss2 = mse_error.mean()
            loss3 = mae_error.mean()
            total_loss_accumulated += loss.item() * mask.sum().item()
            mae_Loss += loss3.item() * mask.sum().item()
            total_count += mask.sum().item()
            
                            # 更新最大的 MAE
            # max_mae = max(max_mae, mae_error.max().item())  # 计算当前 batch 中的最大 MAE
            max_re = max(max_re, re.max().item())  # 计算当前 batch 中的最大 re
        else:
            skipped_count += 1  # 统计不符合条件的样本数

        loss.backward()
        optimizer.step()

    average_loss = total_loss_accumulated / total_count if total_count > 0 else 0.0
    # mixloss = average_loss*0.5+max_mae*0.5
    mixloss = average_loss + max_re
    print(f"Skipped count: {skipped_count}")  # 输出被跳过的样本数量 
    return average_loss


### mix loss 1.9
def train_one_epoch(data_loader, model, optimizer, device):
    model.train()
    total_loss_accumulated = 0.0
    mse_Loss = 0.0 
    mae_Loss = 0.0
    r2_sum = 0.0  
    total_count = 0  
    skipped_count = 0  
    max_mae = 0.0  
    re = 0.0 
    max_re = -float('inf')  # 初始化为负无穷，确保任何误差都会更新它

    for x, y, filename, cubesize in data_loader:
        x, y, cubesize = x.to(device), y.to(device), cubesize.to(device)
        optimizer.zero_grad()
        start_time = time.time()
        output = model(x, cubesize)
        output = (output + 1) / 2 * (0.38 - 0.25) + 0.25
        mask = (y.abs() >= 1e-4)

        if mask.sum() > 0:
            filtered_y = y[mask]
            filtered_output = output[mask]
            re = ((filtered_y - filtered_output).abs()) / (filtered_y.abs())
            mse_error = ((filtered_y - filtered_output)) ** 2
            mae_error = ((filtered_y - filtered_output).abs())
            
            # 计算 MRE 和 max_re
            mre = re.mean()  # MRE 是相对误差的均值
            max_re = max(max_re, re.max().item())  # 更新全局的 max_re

            loss2 = mse_error.mean()
            loss3 = mae_error.mean()
            total_loss_accumulated += mre.item() * mask.sum().item()
            mse_Loss += loss2.item() * mask.sum().item()
            mae_Loss += loss3.item() * mask.sum().item()
            total_count += mask.sum().item()

            # 计算 R^2
            y_mean = filtered_y.mean()
            ss_total = ((filtered_y - y_mean) ** 2).sum()
            ss_residual = ((filtered_y - filtered_output) ** 2).sum()
            r2_batch = 1 - ss_residual / ss_total
            r2_sum += r2_batch.item() * mask.sum().item()

        else:
            skipped_count += 1  

        loss = mre + max_re  # 计算混合损失: MRE + max_re
        loss.backward()
        optimizer.step()

    average_loss = total_loss_accumulated / total_count if total_count > 0 else 0.0
    mse = mse_Loss / total_count if total_count > 0 else 0.0
    mae = mae_Loss / total_count if total_count > 0 else 0.0
    mixloss = average_loss + max_re  # 最终返回的混合损失
    r2 = r2_sum / total_count if total_count > 0 else 0.0
    print(f"Skipped count: {skipped_count}")
    print(f"Global max_re: {max_re}")  # 输出全局的 max_re
    return mixloss, average_loss, max_re, mse, mae, r2


def test(data_loader, model, device):
    model.eval()
    total_loss_accumulated = 0.0
    mse_Loss = 0.0
    mae_Loss = 0.0
    r2_sum = 0.0  # 用于累加 R^2 值
    total_count = 0  # 统计符合条件的样本数
    skipped_count = 0  # 统计被跳过的样本数
    max_mae = 0.0  # 初始化最大的 MAE
    re = 0.0 
    max_re = 0.0  # 初始化最大的 MAE
    with torch.no_grad():
        for x, y, filename, cubesize in data_loader:
            x, y, cubesize = x.to(device), y.to(device), cubesize.to(device)       
            # y = (y-0.037) *  0.142  ### Anti-normalization
            start_time = time.time() 
            output = model(x, cubesize) 
            # output = output*0.13+0.25
            output = (output + 1) / 2 * (0.38 - 0.25) + 0.25 
            # output = output * 0.2 + 0.2  # 对输出进行缩放
            # output = (output-0.037)*0.142
            # 记录模型推理后的时间并计算时间差
            end_time = time.time()

            # 创建掩码：选择 y 的绝对值大于或等于 10^-4 的位置
            mask = (y.abs() >= 1e-4)

            if mask.sum() > 0:
                filtered_y = y[mask]
                filtered_output = output[mask]
                
                # 计算rere误差 MRE
                re = ((filtered_y - filtered_output).abs()) / (filtered_y.abs())
                mse_error = ((filtered_y - filtered_output)) ** 2
                mae_error = ((filtered_y - filtered_output).abs())

                loss1 = re.mean()  # 计算平均相对误ee
                loss2 = mse_error.mean()
                loss3 = mae_error.mean()
                
                total_loss_accumulated += loss1.item() * mask.sum().item()
                mse_Loss += loss2.item() * mask.sum().item()
                mae_Loss += loss3.item() * mask.sum().item()
                total_count += mask.sum().item()

                # 更新最大的 MAE
                # max_mae = max(max_mae, mae_error.max().item())  # 计算当前 batch 中的最大 MAE
                max_re = max(max_re, re.max().item())  # 计算当前 batch 中的最大 re

                # 计算 R^2
                y_mean = filtered_y.mean()
                ss_total = ((filtered_y - y_mean) ** 2).sum()  # 总平方和
                ss_residual = ((filtered_y - filtered_output) ** 2).sum()  # 残差平方和
                r2_batch = 1 - ss_residual / ss_total  # 计算当前 batch 的 R^2
                r2_sum += r2_batch.item() * mask.sum().item()  # 累加 R^2

            else:
                skipped_count += 1  # 统计不符合条件的样本数

    # 计算平均损失
    average_loss = total_loss_accumulated / total_count if total_count > 0 else 0.0
    mse_Loss = mse_Loss / total_count if total_count > 0 else 0.0
    mae_Loss = mae_Loss / total_count if total_count > 0 else 0.0
    # 计算总的 R^2
    r2 = r2_sum / total_count if total_count > 0 else 0.0

    return average_loss, mse_Loss, mae_Loss, max_re, r2


# def val(data_loader, model, device):
#     model.eval()
#     total_loss = 0.0
#     with torch.no_grad():
#         for x, y ,filename in data_loader:
#             x, y = x.to(device), y.to(device)
#             # output, _, _ = model(x)
#             output  = model(x)
#             # loss = ((y - output) / y).abs().mean()
#             loss = ((y - output).pow(2)).mean()
#             total_loss += loss.item()
#     average_loss = total_loss / len(data_loader)
#     return average_loss


if __name__ == "__main__":
    torch.cuda.empty_cache()
    set_random_seed(42) ### set random seed to ensure the same results
    # 创建解析器，并设置描述
    parser = argparse.ArgumentParser(description="训练和测试模型")
    # 添加位置参数config_path，用来指定配置文件的路径
    parser.add_argument("config_path", type=str, help="配置文件的路径")
    

    # 解析命令行参数
    # args = parser.parse_args()
    try:
        args = parser.parse_args()
    except SystemExit as e:
        print(f"Error: {e}")
        # 打印帮助信息或自定义错误信息


    # 从配置文件中加载配置
    config = load_config(args.config_path)

    # 从配置中获取变量
    train_data_path = config['paths']['train_data_path']
    # train_csv_path = config['paths']['train_csv_path']
    test_data_path = config['paths']['test_data_path']
    train_csv_name = config['paths']['train_csv_name']
    test_csv_name = config['paths']['test_csv_name']
    test3900_csv_name = config['paths']['test3900_csv_name']
    test1600_csv_name = config['paths']['test1600_csv_name']
    
    log_dir = config['paths']['log_dir']
    checkpoint_path = config['paths']['checkpoint_path']
    dim = config['model']['dim']
    heads = config['model']['heads']
    group_size = config['model']['group_size']
    num_group = config['model']['num_group']
    batch_size = config['training']['batch_size']
    num_workers = config['training']['num_workers']
    learning_rate = config['training']['learning_rate']
    weight_decay = config['training']['weight_decay']
    epoch_number = config['training']['epoch_number']
    best_test_loss = config['training'].get('best_test_loss', float('inf'))
    # csv_path = config['paths']['norm_data_csv_path'] 
    #"path/to/statistics.csv"    

    # 检查显卡是否使用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("device-------------", device)
    
    # 设置tensorbaord
    experiment_name = config.get('experiment_name', "exp_with_custom_params")
    current_time = datetime.now().strftime('%b%d_%H-%M-%S')
    log_dir = f"{log_dir}{experiment_name}_{current_time}"
    writer = SummaryWriter(log_dir=log_dir)

    # 创建模型权重保存路径
    ckpt_path = os.path.join(checkpoint_path, experiment_name, current_time)
    os.makedirs(ckpt_path, exist_ok=True)



# 记录开始时间
    # start_time = time.time()
        # 加载数据集
    # test3900_dataset = DrivAerDataset(config, test3900_csv_name)
    # test1600_dataset = DrivAerDataset(config, test1600_csv_name)
    # test_dataset = npy1PointCloudDataset(filepath)
    # test3900_loader = torch.utils.data.DataLoader(test3900_dataset, batch_size=batch_size, num_workers=num_workers)
    # test1600_loader = torch.utils.data.DataLoader(test1600_dataset, batch_size=batch_size, num_workers=num_workers)

    train_dataset = DrivAerDataset(config, train_csv_name)
    test_dataset = DrivAerDataset(config, test_csv_name)

    # 记录结束时间
    end_time = time.time()

# 打印时间差
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                               num_workers=num_workers)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,shuffle=True,num_workers=num_workers)

    
    # 初始化模型、优化器和学习率调度器

    # model = phsNet(dim=dim, heads=heads, group_size=group_size, num_group=num_group) 
    model = phsoffNet(dim=dim)
    # model = phsoffNet_out(dim=dim, heads=heads, group_size=group_size, num_group=num_group)

    # model = MLPM()
    ###  newadd 支持多卡 newadd /ldz
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model.to(device)
    #################
    
    print('# generator parameters:', sum(param.numel() for param in model.parameters()))
    optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    scheduler = CosineLRScheduler(optimizer,
                                        t_initial=epoch_number,
                                        lr_min=1e-7,
                                        warmup_lr_init=1e-7,
                                        warmup_t=5,
                                        cycle_limit=1,
                                        t_in_epochs=True)

    # scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(train_loader), epochs=10)
    for epoch in range(epoch_number):
            start_time = time.time()
            mixloss,mre,max_re,mse,mae,r2 = train_one_epoch(train_loader, model, optimizer, device)
            start_test_time = time.time()
            test_mre_loss,test_mse_loss,test_mae_loss,test_max_mae,test_r2 = test(test_loader, model, device)
            writeend_time = time.time()
            writer.add_scalar('Loss/Train-mse', mixloss, epoch)
            writer.add_scalar('Loss/Test-mre', mre, epoch) 
            writer.add_scalar('Loss/Test-mse', max_re, epoch)
            writer.add_scalar('Loss/Test-mae', mse, epoch)
            writer.add_scalar('Loss/Val', mae, epoch)       
            print(
        f"Epoch: {epoch + 1}, Train mix Loss: {mixloss:.10f}, Test mre Loss: {test_mre_loss:.10f},Test mse Loss: {test_mse_loss:.5f},Test mae Loss: {test_mae_loss:.10f}, Test MAX-re Loss: {test_max_mae:.10f},Test R2 : {test_r2:.10f}\
            lr: {optimizer.param_groups[0]['lr']:.10f},Train Time: {start_test_time - start_time:.2f}s,Test Time: {time.time() - start_test_time:.2f}s,Total Time: {time.time() - start_time:.2f}s")
            
            if test_mre_loss < best_test_loss:
                best_test_loss = test_mre_loss
                save_path = os.path.join(ckpt_path, f"best_model_mixloss_{best_test_loss:.10f}.pt")
                torch.save(model.state_dict(), save_path)

                print("Model saved as best_model.pt")
            scheduler.step(epoch)
            
      
    writer.close()
    # 删除最旧的权重文件，只保留最低的两个权重文件
    ckpt_files = glob.glob(os.path.join(ckpt_path, '*.pt'))
    ckpt_files.sort(key=os.path.getmtime)  # 按修改时间排序，最旧的文件排在前面
    if len(ckpt_files) > 5:
        for i in range(len(ckpt_files) - 5):
            os.remove(ckpt_files[i])


 