from torch.utils.data import Dataset, DataLoader
import torch
import os
import json
import h5py
import random
import numpy as np
from cadlib.macro import *  # 确保导入正确的宏定义

def normalize_data(sv_data):
    normalization_param = {'area': [2.3211679274979176, 2.1119031633358403],
                            'vol': [0.1655415327248157, 0.28387722447379904]}   
    return {
        "area": (sv_data["area"] - normalization_param["area"][0]) / normalization_param["area"][1],
        "vol": (sv_data["vol"] - normalization_param["vol"][0]) / normalization_param["vol"][1]
    }  

def get_dataloader(phase, config, shuffle=None):
    is_shuffle = phase == 'train' if shuffle is None else shuffle
    dataset = CADDataset(phase, config)
    dataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=is_shuffle, num_workers=config.num_workers,
                            worker_init_fn=np.random.seed(), collate_fn=custom_collate_fn)
    return dataloader


class CADDataset(Dataset):
    def __init__(self, phase, config):
        super(CADDataset, self).__init__()
        self.raw_data = os.path.join(config.data_root, "cad_vec")  # h5 data root
        self.phase = phase
        self.aug = config.augment
        self.path = os.path.join(config.data_root, "train_val_test_split.json")
        self.sv_path = os.path.join(config.data_root, "sv_data.json")
        with open(self.path, "r") as fp:
            self.all_data = json.load(fp)[phase]
        with open(self.sv_path, "r") as fp:
            self.all_sv_data = json.load(fp)[phase]
            
        print(f"Loading {len(self.all_data)} data entries. Filtering in progress...")
        self.all_data = [data_id for data_id in self.all_data if self.is_valid_data(data_id)]
        print(f"{len(self.all_data)} data entries remaining.")
        
        self.normalization_param = {'area': [2.3211679274979176, 2.1119031633358403],
                                    'vol': [0.1655415327248157, 0.28387722447379904]}
        
        # self.normalization_param = self._get_normalization_param()
        # print(self.normalization_param)

        self.max_n_loops = config.max_n_loops  # Number of paths (N_P)
        self.max_n_curves = config.max_n_curves  # Number of commands (N_C)
        self.max_total_len = config.max_total_len
        self.size = 256
        
    def _get_normalization_param(self):
        area_list = []
        vol_list = []
        
        for data_id in self.all_data:
            area_list.append(self.all_sv_data[data_id]["area"])
            vol_list.append(self.all_sv_data[data_id]["vol"])
        
        area_array = np.array(area_list)
        vol_array = np.array(vol_list)
        
        area_mu, area_sigma = area_array.mean(), area_array.std()
        vol_mu, vol_sigma = vol_array.mean(), vol_array.std()
        
        return {
            "area": [area_mu, area_sigma],
            "vol": [vol_mu, vol_sigma]
        }
        
    def is_valid_data(self, data_id):
        return not (self.all_sv_data[data_id]["area"] <= 0 or self.all_sv_data[data_id]["vol"] <= 0)
    
    def normalize_data(self, sv_data):
        return {
            "area": (sv_data["area"] - self.normalization_param["area"][0]) / self.normalization_param["area"][1],
            "vol": (sv_data["vol"] - self.normalization_param["vol"][0]) / self.normalization_param["vol"][1]
        }     

    def get_data_by_id(self, data_id):
        idx = self.all_data.index(data_id)
        return self.__getitem__(idx)

    def __getitem__(self, index):
        data_id = self.all_data[index].strip()  # 去除换行符
        h5_path = os.path.join(self.raw_data, data_id + ".h5")
        sv_data = self.all_sv_data[data_id]
        sv_data = self.normalize_data(sv_data)
        try:
            with h5py.File(h5_path, "r") as fp:
                cad_vec = fp["vec"][:]  # (len, 1 + N_ARGS)
        except Exception as e:
            print(f"Error loading {h5_path}: {e}")
            return {"command": torch.tensor([], dtype=torch.long), "args": torch.tensor([], dtype=torch.long),
                    "id": data_id}

        pad_len = self.max_total_len - cad_vec.shape[0]
        cad_vec = np.concatenate([cad_vec, EOS_VEC[np.newaxis].repeat(pad_len, axis=0)], axis=0)
        conditions = np.array([sv_data["area"], sv_data["vol"]], dtype=float)

        command = cad_vec[:, 0]
        args = cad_vec[:, 1:]
        command = torch.tensor(command, dtype=torch.long)
        args = torch.tensor(args, dtype=torch.long)
        conditions = torch.tensor(conditions, dtype=torch.float32)
        return {"command": command, "args": args, "conditions": conditions, "id": data_id}

    def __len__(self):
        return len(self.all_data)


def custom_collate_fn(batch):
    commands = [item['command'] for item in batch]
    args = [item['args'] for item in batch]
    conditions = [item['conditions'] for item in batch]

    # 获取最大长度
    max_command_len = max(command.size(0) for command in commands)
    max_args_len = max(arg.size(0) for arg in args)

    # 填充命令张量
    padded_commands = [
        torch.cat([command, torch.zeros(max_command_len - command.size(0), dtype=torch.long)])
        for command in commands
    ]

    # 填充 args 张量
    padded_args = []
    for arg in args:
        if arg.dim() == 1:  # 一维张量，假设形状为 [60]
            # 填充到 (max_args_len, 16)，可以设置为全0，后续处理时可以忽略
            padded_arg = torch.zeros(max_args_len, 16, dtype=torch.long)
            padded_arg[:arg.size(0), :1] = arg.view(-1, 1)  # 只保留 arg 的一列
            padded_args.append(padded_arg)
        elif arg.dim() == 2 and arg.size(1) == 16:  # 二维张量，假设形状为 [60, 16]
            # 填充到 (max_args_len, 16)
            padded_arg = torch.cat([arg, torch.zeros(max_args_len - arg.size(0), 16, dtype=torch.long)])
            padded_args.append(padded_arg)
        else:
            # 如果 arg 形状不符合预期，输出调试信息
            print(f"Unexpected arg shape: {arg.shape}")

    # 调试信息
    # for i, arg in enumerate(padded_args):
    #     print(f"Padded Args {i}: shape: {arg.shape}, dtype: {arg.dtype}")

    # 堆叠张量
    padded_commands = torch.stack(padded_commands)  # 变为 (batch_size, max_command_len)
    padded_args = torch.stack(padded_args)  # 变为 (batch_size, max_args_len, 16)
    conditions = torch.stack(conditions)

    # 打印输出维度以确认形状
    # print(f"Final Commands Shape: {padded_commands.shape}")
    # print(f"Final Args Shape: {padded_args.shape}")

    return {
        'command': padded_commands,  # (batch_size, max_command_len)
        'args': padded_args,  # (batch_size, max_args_len, 16)
        'conditions': conditions,
        'id': [item['id'] for item in batch]
    }

