import sys
import numpy as np
import torch
from Bio import SeqIO
import torch
import os
from torch.utils.data import Dataset, DataLoader
import torch
from typing import Dict, List

root_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(root_dir)
from dataset import construct_data_single


class RNADatasetV2(Dataset):
    def __init__(self, data_path, is_train=True):
        super(RNADatasetV2, self).__init__()
        self.npy_dir = data_path + "/coords"
        self.name_list = [i[:-4] for i in os.listdir(data_path + "/coords")]
        self.seq_dir =  data_path + "/seqs/"
        self.cache = {}
        self.is_train = is_train

    def __len__(self):
        return len(self.name_list)

    def get_pdb_id(self, idx):
        return self.name_list[idx]

    def merge_coords_file_path(self, pdb_id):
        return os.path.join(self.npy_dir, pdb_id + '.npy')

    def load_feature(self, pdb_id):
        coords = np.load(self.merge_coords_file_path(pdb_id))
        feature = {
            "name": pdb_id,
            "coords": {
                "P": coords[:, 0, :],
                "O5'": coords[:, 1, :],
                "C5'": coords[:, 2, :],
                "C4'": coords[:, 3, :],
                "C3'": coords[:, 4, :],
                "O3'": coords[:, 5, :],
                "N": coords[:, 6, :],
            }
        }

        return feature
    
    def read_fasta_biopython(self, file_path):
        sequences = {}
        for record in SeqIO.parse(file_path, "fasta"):
            sequences[record.id] = str(record.seq)
        return sequences
    
    def load_seq(self, pdb_id):
        return list(self.read_fasta_biopython(self.seq_dir + pdb_id + ".fasta").values())[0]
    
    def first_load(self, idx):
        pdb_id = self.get_pdb_id(idx)
        feature = self.load_feature(pdb_id)
        if self.is_train:
            feature["seq"] = self.load_seq(pdb_id)
        else:
            feature["seq"] = None
        return feature

    def __getitem__(self, idx):
        if idx in self.cache:
            return self.cache[idx]
        else:
            data = self.first_load(idx)
            self.cache[idx] = data
            return data
    
    def __iter__(self):
        for idx in range(len(self)):
            yield self.__getitem__(idx)

    def get_lengths_by_indices(self, indices):
        lengths = []
        for idx in indices:
            pdb_id = self.get_pdb_id(idx)
            file_path = self.merge_coords_file_path(pdb_id)
            with open(file_path, 'rb') as f:
                # 读取文件头的前8字节（魔数和版本号）
                version = np.lib.format.read_magic(f)
                # 读取文件头信息（包含shape/dtype等）
                shape, _, _ = np.lib.format._read_array_header(f, version)
            lengths.append(shape[0])
        return lengths
    
class RNADatasetV3(RNADatasetV2):
    def __init__(self, data_path, is_train=True):
        super().__init__(data_path, is_train)
        
    def __getitem__(self, idx):
        data = super().__getitem__(idx)
        coords, seq, name = self.data_adapter(data)
        struct_data = construct_data_single(coords, seq)
        # print(struct_data)
        zt = torch.randn(struct_data['node_s'].shape)
        return struct_data
    
    @staticmethod
    def data_adapter(data):
        coords = data["coords"]
        coords = np.concatenate((coords["P"].reshape(-1, 1, 3), coords["C4'"].reshape(-1, 1, 3), coords["N"].reshape(-1, 1, 3)), axis=1)
        seq = data["seq"]
        name = data["name"]
        return coords, seq, name

def collate_fn(batch: List[Dict[str, torch.Tensor]]):
    collated_batch = {}
    keys = batch[0].keys()
    
    for key in keys:
        samples = [d[key] for d in batch]
        collated_batch[key] = samples
   
    return collated_batch



def graph_collate_fn(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
    """
    处理图结构数据的批处理函数，支持以下字段：
    - seq: 序列数据 (1D Tensor)
    - coords: 坐标数据 (2D Tensor) 
    - node_s/node_v: 节点标量/向量特征
    - edge_s/edge_v: 边标量/向量特征
    - edge_index: 边索引 (2D Tensor)
    - mask: 掩码 (1D Tensor)
    """
    collated_batch = {}
    keys = batch[0].keys()
    
    for key in keys:
        samples = [d[key] for d in batch]
        
        # 处理需要padding的1D序列数据
        if key in ['seq', 'mask']:
            max_len = max(s.size(0) for s in samples)
            padded = torch.zeros(len(samples), max_len, *samples[0].shape[1:])
            for i, s in enumerate(samples):
                padded[i, :s.size(0)] = s
            collated_batch[key] = padded
            
        # 处理图结构数据（保持原始维度堆叠）
        elif key in ['node_s', 'node_v', 'edge_s', 'edge_v']:
            # collated_batch[key] = torch.cat(samples, dim=0)
            max_len = max(s.size(0) for s in samples)
            # 填充
            padded = torch.zeros(len(samples), max_len, *samples[0].shape[1:])
            for i, s in enumerate(samples):
                padded[i, :s.size(0)] = s
            collated_batch[key] = padded

        # 处理边索引（需要偏移）
        elif key == 'edge_index':
            collated_batch[key] = samples
            
        # 其他数据直接堆叠
        else:
            max_len = max(s.size(0) for s in samples)
            # 填充
            padded = torch.zeros(len(samples), max_len, *samples[0].shape[1:])
            for i, s in enumerate(samples):
                padded[i, :s.size(0)] = s
            collated_batch[key] = padded
            
    return collated_batch



if __name__ == "__main__":
    dataset = RNADatasetV3(
        data_path="/data/slz/sais_medicine/saisdata",
        is_train=True
    )
    for data in dataset:
        print(data['z_t'].shape)
