import os
import argparse
import torch
from dataLoader import DrivAerDataset,DrivAerDataset_stl4pre_random,DrivAerDataset_stl4pre_random_adapt,DrivAerDataset_stl4pre_random_cd
from model import phsoffNet
import yaml
import time
import numpy as np
import multiprocessing
import random
import sys
import csv
from tqdm import tqdm
import pandas as pd



# Set the multiprocessing start method to 'spawn'
multiprocessing.set_start_method('spawn', force=True)


def set_random_seed(seed, deterministic=False):
    """Set random seed."""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False


def load_config(config_path):
    with open(config_path, 'r') as file:
        config = yaml.safe_load(file)
    return config


### valide for 1 .stl file 
def valide(data_loader, model, device, output_csv=None):
    model.eval()
    total_loss_accumulated = 0.0
    mse_Loss = 0.0
    mae_Loss = 0.0
    total_count = 0  # 统计符合条件的样本数
    skipped_count = 0  # 统计被跳过的样本数
    max_mae = 0.0  # 初始化最大的 MAE
    all_filenames = []  # 用于存储所有的文件名
    all_y = []  # 用于存储所有的真实值
    all_output = []  # 用于存储所有的预测值
    all_absolute_error = []  # 用于存储真实值和预测值之间的绝对误差
    re = 0.0

    with torch.no_grad():
        for x, cubesize in data_loader:
            x, cubesize = x.to(device), cubesize.to(device)
            start_time = time.time()
            # 获取模型输出
            output = model(x, cubesize)
            # output = output*0.13+0.25 ### 反归一化
            output = (output + 1) / 2 * (0.38 - 0.25) + 0.25
            re = (output-0.31977284)/0.31977284
            over_time = time.time()
            ref_time = over_time-start_time
            # 创建掩码：选择 y 的绝对值大于或等于 10^-4 的位置
            # mask = (y.abs() >= 1e-4)

            # if mask.sum() > 0:
            #     filtered_y = y[mask]
            #     filtered_output = output[mask]
            #     # 计算相对误差 MRE
            #     relative_error = ((filtered_y - filtered_output).abs())/(filtered_y.abs())
            #     mse_error = ((filtered_y - filtered_output))**2
            #     mae_error = ((filtered_y - filtered_output).abs())
            #     loss1 = relative_error.mean()   #  计算平均相对误差
            #     loss2 = mse_error.mean()        ## 计算均方误差
            #     loss3 = mae_error.mean()        ## 计算平均绝对误差
            #     total_loss_accumulated += loss1.item() * mask.sum().item()
            #     mse_Loss += loss2.item() * mask.sum().item()
            #     mae_Loss += loss3.item() * mask.sum().item()
            #     total_count += mask.sum().item()

            #     # 更新最大的 MAE
            #     max_mae = max(max_mae, mae_error.max().item())  # 计算当前 batch 中的最大 MAE
                
            #     # 保存用于计算 R² 和误差的值
            #     # all_filenames.extend(filename)  # 保存文件名
            #     all_y.extend(filtered_y.cpu().numpy())  # 保存真实值
            #     all_output.extend(filtered_output.cpu().numpy())  # 保存预测值
            #     all_absolute_error.extend(mae_error.cpu().numpy())  # 保存绝对误差
            # else:
            #     skipped_count += 1  # 统计不符合条件的样本数

    # average_loss = total_loss_accumulated / total_count if total_count > 0 else 0.0
    # mse_Loss = mse_Loss / total_count if total_count > 0 else 0.0
    # mae_Loss = mae_Loss / total_count if total_count > 0 else 0.0

    # 计算 R²
    # y = np.array(all_y)
    # output = np.array(all_output)
    # ss_res = np.sum((y - output) ** 2)  # Residual sum of squares
    # ss_tot = np.sum((y - np.mean(y)) ** 2)  # Total sum of squares
    # r2 = 1 - (ss_res / ss_tot)  # R-squared

    # 输出 R²
    # print(f"R²: {r2:.4f}")

    # # 将结果保存到 CSV 文件
    # results_df = pd.DataFrame({
    #     "Filename": all_filenames,
    #     "True Value (y)": all_y,
    #     "Predicted Value": all_output,
    #     "Absolute Error": all_absolute_error
    # })
    # results_df.to_csv(output_csv, index=False)

    return output, ref_time,re  # 返回结果，包括 R²

def valide_950st_cd_old(data_loader, model, device, output_csv=None):
    model.eval()
    total_loss_accumulated = 0.0
    mse_Loss = 0.0
    mae_Loss = 0.0
    mre_Loss = 0.0
    total_count = 0  # 统计符合条件的样本数
    skipped_count = 0  # 统计被跳过的样本数
    max_re = 0.0  # 初始化最大的 MAE
    all_filenames = []  # 用于存储所有的文件名
    all_y = []  # 用于存储所有的真实值
    all_output = []  # 用于存储所有的预测值
    all_absolute_error = []  # 用于存储真实值和预测值之间的绝对误差

    with torch.no_grad():
        for x, cubesize,y,filename in data_loader:
            x, cubesize,y = x.to(device), cubesize.to(device), y.to(device)
            start_time = time.time()
            # 获取模型输出
            output = model(x, cubesize)
            # output = output*0.13+0.25 ### 反归一化
            output = (output + 1) / 2 * (0.38 - 0.25) + 0.25
            over_time = time.time()
            ref_time = over_time-start_time
            # 创建掩码：选择 y 的绝对值大于或等于 10^-4 的位置
            mask = (y.abs() >= 1e-4)

            # if mask.sum() > 0:
            filtered_y = y
            filtered_output = output
            # 计算相对误差 MRE
            relative_error = ((filtered_y - filtered_output).abs())/(filtered_y.abs())
            mse_error = ((filtered_y - filtered_output))**2
            mae_error = ((filtered_y - filtered_output).abs())
            loss1 = relative_error.mean()   #  计算平均相对误差
            loss2 = mse_error.mean()        ## 计算均方误差
            loss3 = mae_error.mean()        ## 计算平均绝对误差
            total_loss_accumulated += loss1.item() * mask.sum().item()
            mre_Loss += loss1.item() * mask.sum().item()
            mse_Loss += loss2.item() * mask.sum().item()
            mae_Loss += loss3.item() * mask.sum().item()
            total_count += mask.sum().item()

            # 更新最大的 MAE
            # max_mae = max(max_mae, mae_error.max().item())  # 计算当前 batch 中的最大 MAE
            max_re = max(max_re, relative_error.max().item())  # 计算当前 batch 中的最大 re 
            # 保存用于计算 R² 和误差的值
            all_filenames.extend(filename)  # 保存文件名
            all_y.extend(filtered_y.cpu().numpy())  # 保存真rere
            all_output.extend(filtered_output.cpu().numpy())  # 保存预测值
            all_absolute_error.extend(relative_error.cpu().numpy())  # 保存绝对误差
            # else:
            #     skipped_count += 1  # 统计不符合条件的样本数

    average_loss = total_loss_accumulated / total_count if total_count > 0 else 0.0
    mre_Loss = mre_Loss / total_count if total_count > 0 else 0.0
    mse_Loss = mse_Loss / total_count if total_count > 0 else 0.0
    mae_Loss = mae_Loss / total_count if total_count > 0 else 0.0
    

    #计算 R²
    y = np.array(all_y)
    output = np.array(all_output)
    ss_res = np.sum((y - output) ** 2)  # Residual sum of squares
    ss_tot = np.sum((y - np.mean(y)) ** 2)  # Total sum of squares
    r2 = 1 - (ss_res / ss_tot)  # R-squared

    # 输出 R²
    # print(f"R²: {r2:.4f}")
    writ_cd = 1
    # # 将结果保存到 CSV 文件
    if writ_cd == 1:
        results_df = pd.DataFrame({
            "Filename": all_filenames,
            "True Value (y)": all_y,
            "Predicted Value": all_output,
            "Absolute Error": all_absolute_error
        })
        results_df.to_csv(output_csv, index=False)

    return output, mre_Loss,mae_Loss, mse_Loss,max_re ,r2, ref_time  # 返回结果，包括 R²

def valide_950st_cd(data_loader, model, device, output_csv=None):
    model.eval()
    total_loss_accumulated = 0.0
    mse_Loss = 0.0
    mae_Loss = 0.0
    mre_Loss = 0.0
    total_count = 0  # 统计符合条件的样本数
    skipped_count = 0  # 统计被跳过的样本数
    max_re = 0.0  # 初始化最大的 MRE
    all_filenames = []  # 用于存储所有的文件名
    all_y = []  # 用于存储所有的真实值
    all_output = []  # 用于存储所有的预测值
    all_absolute_error = []  # 用于存储真实值和预测值之间的绝对误差
    all_relative_error = []  # 用于存储每个样本的相对误差

    with torch.no_grad():
        for x, cubesize, y, filename in data_loader:
            x, cubesize, y = x.to(device), cubesize.to(device), y.to(device)
            start_time = time.time()
            # 获取模型输出
            output = model(x, cubesize)
            # output = output*0.13+0.25 ### 反归一化
            output = (output + 1) / 2 * (0.38 - 0.25) + 0.25
            over_time = time.time()
            ref_time = over_time - start_time
            # 创建掩码：选择 y 的绝对值大于或等于 10^-4 的位置
            mask = (y.abs() >= 1e-4)

            # 只处理符合条件的样本
            filtered_y = y
            filtered_output = output
            # 计算相对误差 MRE
            relative_error = ((filtered_y - filtered_output).abs()) / (filtered_y.abs())
            mse_error = ((filtered_y - filtered_output)) ** 2
            mae_error = ((filtered_y - filtered_output).abs())
            
            loss1 = relative_error.mean()   # 计算平均相对误差
            loss2 = mse_error.mean()        # 计算均方误差
            loss3 = mae_error.mean()        # 计算平均绝对误差
            total_loss_accumulated += loss1.item() * mask.sum().item()
            mre_Loss += loss1.item() * mask.sum().item()
            mse_Loss += loss2.item() * mask.sum().item()
            mae_Loss += loss3.item() * mask.sum().item()
            total_count += mask.sum().item()

            # 更新最大的 MRE
            max_re = max(max_re, relative_error.max().item())  # 计算当前 batch 中的最大 MRE
            
            # 保存用于计算 R² 和误差的值
            all_filenames.extend(filename)  # 保存文件名
            all_y.extend(filtered_y.cpu().numpy())  # 保存真实值
            all_output.extend(filtered_output.cpu().numpy())  # 保存预测值
            all_absolute_error.extend(mae_error.cpu().numpy())  # 保存每个样本的绝对误差
            all_relative_error.extend(relative_error.cpu().numpy())  # 保存每个样本的相对误差

    # 计算 R²
    y = np.array(all_y)
    output = np.array(all_output)
    ss_res = np.sum((y - output) ** 2)  # Residual sum of squares
    ss_tot = np.sum((y - np.mean(y)) ** 2)  # Total sum of squares
    r2 = 1 - (ss_res / ss_tot)  # R-squared

    # 输出 R²
    writ_cd = 0
    # 将结果保存到 CSV 文件
    if writ_cd == 1:
        results_df = pd.DataFrame({
            "Filename": all_filenames,
            "True Value (y)": all_y,
            "Predicted Value": all_output,
            "Absolute Error": all_absolute_error
        })
        results_df.to_csv(output_csv, index=False)

    average_loss = total_loss_accumulated / total_count if total_count > 0 else 0.0
    mre_Loss = mre_Loss / total_count if total_count > 0 else 0.0
    mse_Loss = mse_Loss / total_count if total_count > 0 else 0.0
    mae_Loss = mae_Loss / total_count if total_count > 0 else 0.0

    return output, mre_Loss, mae_Loss, mse_Loss, max_re, r2, ref_time


if __name__ == "__main__":
    torch.cuda.empty_cache()
    set_random_seed(42)

    # 创建解析器，并设置描述
    parser = argparse.ArgumentParser(description="测试模型")
    parser.add_argument("config_path", type=str, help="配置文件的路径")
    parser.add_argument("model_path", type=str, help="训练好的模型权重路径")
    
    # 解析命令行参数
    # args = parser.parse_args()
    print("Arguments received:", sys.argv)
    args = parser.parse_args()
    # 从配置文件中加载配置
    config = load_config(args.config_path)

    # 从配置中获取变量
    filepath = config['paths']['plt_data_path']
    batch_size = config['testing']['batch_size']
    num_workers = config['testing']['num_workers']
    dim = config['model']['dim']
    heads = config['model']['heads']
    group_size = config['model']['group_size']
    num_group = config['model']['num_group']
    # output_dir = "./testout-npy/"
    output_dir = config['paths']['output_path']
    test900_csv_name = config['paths']['test_csv_name']
    test1600_csv_name = config['paths']['test1600_csv_name']
    # csv_path = config['paths']['norm_data_csv_path'] 

    # 检查显卡是否使用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("device:", device)

    # # 加载数据集
    # test900_dataset = DrivAerDataset(config, test900_csv_name)
    # test1600_dataset = DrivAerDataset(config, test1600_csv_name)
    # # test_dataset = npy1PointCloudDataset(filepath)
    # test900_loader = torch.utils.data.DataLoader(test900_dataset, batch_size=batch_size, num_workers=num_workers)
    # test1600_loader = torch.utils.data.DataLoader(test1600_dataset, batch_size=batch_size, num_workers=num_workers)

   ### for  1 stl file test
    # data_dir_1stl = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/stl800point"
    data_dir_1stl = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/new-general-stl"

    stlfile_dataset = DrivAerDataset_stl4pre_random(data_dir_1stl) ##  均匀采样
    # stlfile_dataset = DrivAerDataset_stl4pre_random_adapt(data_dir) ###  自适应采样 
    stlfile_loader_1stl = torch.utils.data.DataLoader(stlfile_dataset, batch_size=batch_size, num_workers=num_workers)
    
    ### for 36 stls
    # data_dir_36 = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/DrivAer_test36"
    data_dir_36 = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/562-stl-test"
    
    stlfile_dataset_36 = DrivAerDataset_stl4pre_random_cd(data_dir_36,config) ##  均匀采样
    stlfile_loader_36 = torch.utils.data.DataLoader(stlfile_dataset_36, batch_size=16, num_workers=8)


    # 初始化模型
    model = phsoffNet(dim=dim)
    # model = MLPM()
    # if torch.cuda.device_count() > 1:
    #     model = torch.nn.DataParallel(model)
    model.to(device)

     # 加载模型权重
    state_dict = torch.load(args.model_path)
    if torch.cuda.device_count() > 0:
        # If model was trained with DataParallel, adjust the state_dict keys
        new_state_dict = {}
        for k, v in state_dict.items():
            if k.startswith('module.'):
                new_state_dict[k[7:]] = v
            else:
                new_state_dict[k] = v
        model.load_state_dict(new_state_dict, strict=False)
    else:
        model.load_state_dict(state_dict)
   
    
    # 测试模型
    start_time = time.time()
    ## 11.25
    # test3900_result="./test900_result.csv"
    # test1600_result="./test1600_result.csv"
    # test900_mre_loss,test900_mse_loss,test900_mae_loss,test900_max_mae,test3900_r2 = test1(test900_loader, model, device,test3900_result)
    # print(f"mre: {test900_mre_loss:.6f} , mse: {test900_mse_loss:.6f} ,mae: {test900_mae_loss:.6f} ,r2: {test3900_r2:.6f} ")

    # middle_time = time.time()
    # test1600_mre_loss,test1600_mse_loss,test1600_mae_loss,test1600_max_mae,test1600_r2 = test1(test1600_loader, model, device,test1600_result)
    # print(f"Test3900 mre Loss: {test3900_mre_loss:.10f}, Test3900 mse Loss: {test3900_mse_loss:.10f},Test3900 mae Loss: {test3900_mae_loss:.5f},Test3900 R2: {test3900_max_mae:.5f},Test3900 r2: {test3900_r2:.5f},\
    #     test3900 Time: {middle_time - start_time:.2f}s,\
    #     Test1600 mre Loss: {test1600_mre_loss:.10f}, Test1600 mse Loss: {test1600_mse_loss:.10f},Test1600 mae Loss: {test1600_mae_loss:.5f},Test1600 max-mae Loss: {test1600_max_mae:.5f}, Test1600 r2: {test1600_r2:.5f},\
    #         test1600 Time: {time.time() - middle_time:.2f}s")
    
    
    
    # test1="./test1"
    # ## 1 stl file test 
    # cd, ref_time,re = valide(stlfile_loader_1stl , model, device,test1)
    # print(f"pre cd: {cd.item():.6f} , ref time : {ref_time:.6f}, re : {re.item():.6f}")
    
    # 记录开始时间
    start_time = time.time()
    test36="./test562.csv"
    # 调用 valide_950st_cd 函数
    output, mre_Loss, mae_Loss, mse_Loss, max_re, r2, ref_time = valide_950st_cd(stlfile_loader_36, model, device, test36)

    # 记录结束时间
    end_time = time.time()

    # 计算执行时间
    elapsed_time = end_time - start_time

    # # 打印结果
    # print(f"Execution time of valide_950st_cd: {elapsed_time:.6f} seconds")
    print(f"mre: {mre_Loss:.6f} , mae: {mae_Loss:.6f} , mse: {mse_Loss:.6f} , max re: {max_re:.6f} , r2: {r2:.6f} , ref time: {ref_time:.6f}")
        