import os

import numpy as np
import torch
from Bio import SeqIO
from torch.utils.data import Dataset

tag_list = list("AUCG")
idx2tag = {i: tag for i, tag in enumerate(tag_list)}
tag2idx = {tag: i for i, tag in enumerate(tag_list)}

def read_fasta_biopython(file_path):
    sequences = {}
    for record in SeqIO.parse(file_path, "fasta"):
        sequences[record.id] = str(record.seq)
    return sequences

class RNAPredictDataset(Dataset):
    def __init__(self, data_path):
        super(RNAPredictDataset, self).__init__()
        self.npy_dir = data_path + "/coords"
        self.name_list = [i[:-4] for i in os.listdir(data_path + "/coords")]
        self.cache = {}

    def __len__(self):
        return len(self.name_list)

    def get_pdb_id(self, idx):
        return self.name_list[idx]

    def merge_coords_file_path(self, pdb_id):
        return os.path.join(self.npy_dir, pdb_id + '.npy')

    def get_lengths_by_indices(self, indices):
        lengths = []
        for idx in indices:
            pdb_id = self.get_pdb_id(idx)
            file_path = self.merge_coords_file_path(pdb_id)
            with open(file_path, 'rb') as f:
                # 读取文件头的前8字节（魔数和版本号）
                version = np.lib.format.read_magic(f)
                # 读取文件头信息（包含shape/dtype等）
                shape, _, _ = np.lib.format._read_array_header(f, version)
            lengths.append(shape[0])
        return lengths

    def load_feature(self, pdb_id):
        coords = np.load(self.merge_coords_file_path(pdb_id))

        feature = {
            "name": pdb_id,
            "coords": {
                "P": coords[:, 0, :],
                "O5'": coords[:, 1, :],
                "C5'": coords[:, 2, :],
                "C4'": coords[:, 3, :],
                "C3'": coords[:, 4, :],
                "O3'": coords[:, 5, :],
            }
        }

        return feature

    def first_load(self, idx):
        pdb_id = self.get_pdb_id(idx)
        return self.load_feature(pdb_id)

    def __iter__(self):
        for idx in range(len(self)):
            yield self.__getitem__(idx)

    def __getitem__(self, idx):
        if idx in self.cache:
            return self.cache[idx]
        else:
            data = self.first_load(idx)
            self.cache[idx] = data
            return data

class RNATrainDataset(RNAPredictDataset):
    def __init__(self, data_path):
        super(RNATrainDataset, self).__init__(data_path)
        self.seq_dir =  data_path + "/seqs/"

    def load_seq(self, pdb_id):
        return list(read_fasta_biopython(self.seq_dir + pdb_id + ".fasta").values())[0]

    def first_load(self, idx):
        pdb_id = self.get_pdb_id(idx)
        feature = self.load_feature(pdb_id)
        feature["seq"] = self.load_seq(pdb_id)
        return feature


def featurize(batch, has_label=True):
    # alphabet = 'AUCG'
    B = len(batch)
    lengths = np.array([b["coords"]["P"].shape[0] for b in batch], dtype=np.int32)
    L_max = max([b["coords"]["P"].shape[0] for b in batch])
    X = np.zeros([B, L_max, 6, 3])
    # if has_label:
    #     S = np.zeros([B, L_max], dtype=np.int32)
    S = []
    names = []

    # Build the batch
    for i, b in enumerate(batch):
        x = np.stack([np.nan_to_num(b['coords'][c], nan=0.0) for c in ["P", "O5'", "C5'", "C4'", "C3'", "O3'"]], 1)
        l = b["coords"]["P"].shape[0]
        x_pad = np.pad(x, [[0, L_max - l], [0, 0], [0, 0]], 'constant', constant_values=(np.nan,))
        X[i, :, :, :] = x_pad
        if has_label:
            # indices = np.asarray([tag2idx[a] for a in b['seq']], dtype=np.int32)
            # S[i, :l] = indices
            S.extend([tag2idx[a] for a in b['seq']])
        names.append(b['name'])

    mask = np.isfinite(np.sum(X, (2, 3))).astype(np.float32)
    numbers = np.sum(mask, axis=1).astype(np.int32)
    # if has_label:
    #     S_new = np.zeros_like(S)
    X_new = np.zeros_like(X) + np.nan
    for i, n in enumerate(numbers):
        X_new[i, :n, ::] = X[i][mask[i] == 1]
        # if has_label:
        #     S_new[i, :n] = S[i][mask[i] == 1]

    X = X_new
    isnan = np.isnan(X)
    mask = np.isfinite(np.sum(X, (2, 3))).astype(np.float32)
    X[isnan] = 0.
    # Conversion
    X = torch.from_numpy(X).to(dtype=torch.float32)
    mask = torch.from_numpy(mask).to(dtype=torch.float32)
    if has_label:
        # S = S_new
        # S = torch.from_numpy(S).to(dtype=torch.long)
        S = torch.as_tensor(S)
        return X, mask, lengths, names, S
    else:
        return X, mask, lengths, names


if __name__ == '__main__':
    def run():
        dataset = RNATrainDataset("./saisdata")
        print(dataset[0])
    run()
