import torch
import numpy as np
from tqdm import tqdm
    

class GrainDatasetOnGPU():
    def __init__(self,data,batch_size):
        # 将所有数据转移到 GPU 上
        self.list_index = data[0].cuda()
        self.list_lv = data[1].cuda()
        self.list_gv = data[2].cuda()
        self.list_bv = data[3].cuda()
        self.list_av1 = data[4].cuda()
        self.list_av2 = data[5].cuda()

        # 计算数据大小
        self.data_item_num=self.list_av2.shape[0]
        self.x_size= self.list_lv.shape[1]+ self.list_gv.shape[1]+ self.list_av1.shape[1]
        self.b_condition_size= self.list_bv.shape[1]
        self.a_condition_size= self.list_av2.shape[1]

        # 计算标准差,用于ddpm数据缩放
        self.lv_std = self.list_lv.std().item()

        # 计算数据集大小和批次数
        self.length = len(self.list_index)
        self.batch_size=batch_size
        self.num_batches = (self.length + batch_size - 1) // batch_size # 向上取整 
    
    def get_indices_random(self):
        # 需要打乱数据
        return torch.randperm(self.length).cuda()

    def get_indices(self):
        # 不打乱顺序
        return torch.arange(self.length).cuda()
    
    def get_batch(self, batch_index, indices):
        # 生成批次索引
        start = batch_index * self.batch_size
        end = min(start + self.batch_size, self.length)  # 确保不超出数据集范围
        batch_indices = indices[start:end]
        # 自定义批处理逻辑
        return (
            self.list_index[batch_indices],
            self.list_lv[batch_indices],
            self.list_gv[batch_indices],
            self.list_bv[batch_indices],
            self.list_av1[batch_indices],
            self.list_av2[batch_indices]
        )

def load_data_tensor(data_path: str, data_file_num:int, data_index_start:int=0):  # 载入数据
    list_index = []
    list_lv = []
    list_gv = []
    list_bv = []
    list_av1 = []
    list_av2 = []

    for i in tqdm(range(data_index_start, data_index_start + data_file_num), desc='loading data', colour='green', dynamic_ncols=True):
        npz_path = f'{data_path}/{i}'
        npz_file = np.load(npz_path+".npz")
        lv= npz_file["lv"]
        gv = npz_file["gv"]
        bvs = npz_file["bvs"]
        avs1 = npz_file["avs1"]
        avs2 = npz_file["avs2"]
        item_num_per_file = npz_file["avs2"].shape[0]
        for j in range(item_num_per_file):
            list_index.append(i-data_index_start)
            list_lv.append(lv)
            list_gv.append(gv)
            list_bv.append(bvs[j])
            list_av1.append(avs1[j])
            list_av2.append(avs2[j])

    # 将numpy列表转换为PyTorch张量
    list_index = torch.tensor(np.stack(list_index, axis=0))
    list_lv = torch.tensor(
        np.stack(list_lv, axis=0), dtype=torch.float32)
    list_gv = torch.tensor(
        np.stack(list_gv, axis=0), dtype=torch.float32)
    list_bv = torch.tensor(
        np.stack(list_bv, axis=0), dtype=torch.float32)
    list_av1 = torch.tensor(
        np.stack(list_av1, axis=0), dtype=torch.float32)
    list_av2 = torch.tensor(
        np.stack(list_av2, axis=0), dtype=torch.float32)

    return list_index, list_lv, list_gv, list_bv, list_av1, list_av2