import functools
import math
import sys
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from Bio.PDB.PDBParser import PDBParser
from Bio import SeqIO
import torch
import torch_geometric
import torch_cluster
import os
import gc
import pickle
import pdb
from torch.utils.data import Dataset

root_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(root_dir)
from dataset import construct_data_single


class RNADatasetV2(Dataset):
    def __init__(self, data_path, is_train=True):
        super(RNADatasetV2, self).__init__()
        self.npy_dir = data_path + "/coords"
        self.name_list = [i[:-4] for i in os.listdir(data_path + "/coords")]
        self.seq_dir =  data_path + "/seqs/"
        self.cache = {}
        self.is_train = is_train

    def __len__(self):
        return len(self.name_list)

    def get_pdb_id(self, idx):
        return self.name_list[idx]

    def merge_coords_file_path(self, pdb_id):
        return os.path.join(self.npy_dir, pdb_id + '.npy')

    def load_feature(self, pdb_id):
        coords = np.load(self.merge_coords_file_path(pdb_id))
        feature = {
            "name": pdb_id,
            "coords": {
                "P": coords[:, 0, :],
                "O5'": coords[:, 1, :],
                "C5'": coords[:, 2, :],
                "C4'": coords[:, 3, :],
                "C3'": coords[:, 4, :],
                "O3'": coords[:, 5, :],
                "N": coords[:, 6, :],
            }
        }

        return feature
    
    def read_fasta_biopython(self, file_path):
        sequences = {}
        for record in SeqIO.parse(file_path, "fasta"):
            sequences[record.id] = str(record.seq)
        return sequences
    
    def load_seq(self, pdb_id):
        return list(self.read_fasta_biopython(self.seq_dir + pdb_id + ".fasta").values())[0]
    
    def first_load(self, idx):
        pdb_id = self.get_pdb_id(idx)
        feature = self.load_feature(pdb_id)
        if self.is_train:
            feature["seq"] = self.load_seq(pdb_id)
        else:
            feature["seq"] = None
        return feature

    def __getitem__(self, idx):
        if idx in self.cache:
            return self.cache[idx]
        else:
            data = self.first_load(idx)
            self.cache[idx] = data
            return data
    
    def __iter__(self):
        for idx in range(len(self)):
            yield self.__getitem__(idx)

    def get_lengths_by_indices(self, indices):
        lengths = []
        for idx in indices:
            pdb_id = self.get_pdb_id(idx)
            file_path = self.merge_coords_file_path(pdb_id)
            with open(file_path, 'rb') as f:
                # 读取文件头的前8字节（魔数和版本号）
                version = np.lib.format.read_magic(f)
                # 读取文件头信息（包含shape/dtype等）
                shape, _, _ = np.lib.format._read_array_header(f, version)
            lengths.append(shape[0])
        return lengths
    
class RNADatasetV3(RNADatasetV2):
    def __init__(self, data_path, is_train=True):
        super().__init__(data_path, is_train)
        
    def __getitem__(self, idx):
        data = super().__getitem__(idx)
        coords, seq, name = self.data_adapter(data)
        struct_data = construct_data_single(coords, seq)
        # print(struct_data)
        zt = torch.randn(struct_data['node_s'].shape)
        struct_data['z_t'] = zt
        return struct_data
    
    @staticmethod
    def data_adapter(data):
        coords = data["coords"]
        coords = np.concatenate((coords["P"].reshape(-1, 1, 3), coords["C4'"].reshape(-1, 1, 3), coords["N"].reshape(-1, 1, 3)), axis=1)
        seq = data["seq"]
        name = data["name"]
        return coords, seq, name
    
if __name__ == "__main__":
    dataset = RNADatasetV3(
        data_path="/data/slz/sais_medicine/saisdata",
        is_train=True
    )
    for data in dataset:
        print(data['z_t'].shape)
