# data.py (最终修正版，使用迁移后的utils)

import os
import sys
import argparse
import warnings
import math
import numpy as np
import scipy.spatial as spa
from scipy.special import softmax
import mindspore.mindrecord as mr
from Bio.PDB import PDBParser, ShrakeRupley
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from tqdm import tqdm

#从新创建的ms版本utils导入函数
from src.Dataset.dataset_utils_ms import safe_index, one_hot_res, log, dihedral, NormalizeProtein, allowable_amino_acids, one_letter

warnings.filterwarnings("ignore")

class ProteinProcessor:
    def __init__(self, args):
        self.args = args
        self.root = self.args.protein_dataset
        self.raw_dir = os.path.join(self.root, 'raw', 'dompdb')
        self.output_dir = os.path.join(self.root, 'mindrecord'); os.makedirs(self.output_dir, exist_ok=True)
        self.biopython_parser, self.sr = PDBParser(), ShrakeRupley(probe_radius=1.4, n_points=100)
        
        # 复用原始参数
        self.c_alpha_max_neighbors = args.c_alpha_max_neighbors
        self.cutoff = 30.0
        self.seq_dist_cut = 64
        self.use_sasa = args.use_sasa
        self.use_bfactor = args.use_bfactor
        self.use_dihedral = args.use_dihedral
        self.use_omega = False
        self.use_angle = False 

        # 根据启用的特征动态计算维度
        self.feature_dim = 20
        if self.use_sasa: self.feature_dim += 1
        if self.use_bfactor: self.feature_dim += 1
        if self.use_dihedral: self.feature_dim += 4 if not self.use_omega else 6
        
        self.edge_feature_dim = self.seq_dist_cut + 1 + 15 + 1 + 12

    def run(self):
        all_files = sorted(os.listdir(self.raw_dir))
        np.random.seed(42); np.random.shuffle(all_files)
        num_test, num_val = 500, 500
        splits = {
            'train': all_files[:-(num_val + num_test)],
            'val': all_files[-(num_val + num_test):-num_test],
            'test': all_files[-num_test:],
        }
        for split_name, filenames in splits.items():
            self._create_mindrecord_file(filenames, split_name)

    def _create_mindrecord_file(self, filenames, split_name):
        output_file = os.path.join(self.output_dir, f"{split_name}.mindrecord")
        if os.path.exists(output_file):
            print(f"MindRecord file {output_file} already exists. Skipping.")
            return

        schema = {
            "x": {"type": "float32", "shape": [-1, self.feature_dim]}, "pos": {"type": "float32", "shape": [-1, 3]},
            "edge_index": {"type": "int32", "shape": [2, -1]}, "edge_attr": {"type": "float32", "shape": [-1, self.edge_feature_dim]},
            "mu_r_norm": {"type": "float32", "shape": [-1, 5]}, "seq": {"type": "string"},
        }
        writer = mr.FileWriter(file_name=output_file, shard_num=1)
        writer.add_schema(schema, f"{split_name}_schema")

        processed_data_list = []
        for filename in tqdm(filenames, desc=f"Processing {split_name} set"):
            protein_path = os.path.join(self.raw_dir, filename)
            try:
                graph_dict = self._process_single_protein(protein_path)
                if graph_dict:
                    processed_data_list.append(graph_dict)
            except Exception as e:
                pass # 静默处理错误，继续运行
        
        if processed_data_list:
            writer.write_raw_data(processed_data_list)
        writer.commit()
        print(f"Successfully created MindRecord file: {output_file}")

    def _process_single_protein(self, protein_path):
        rec, c_alpha_coords, n_coords, c_coords, seq = self._get_receptor_inference(protein_path)
        if rec is False: return None
        return self._get_calpha_graph_ms(rec, c_alpha_coords, n_coords, c_coords, seq)

    def _get_calpha_graph_ms(self, rec, c_alpha_coords, n_coords, c_coords, seq):
        num_res = len(c_alpha_coords)
        if num_res <= 1: return None

        # 1. 获取节点特征 (这是解决维度不匹配问题的核心)
        node_features = self._get_node_feature_matrix(rec, seq, n_coords, c_coords, c_alpha_coords)
        if node_features is None: return None # 如果特征构建失败（如遇到非标氨基酸），则跳过

        # 2. k-NN图和边属性构建 (这部分逻辑与框架无关，基本复用)
        distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)
        src_list, dst_list, dist_list = [], [], []
        for i in range(num_res):
            dst = list(np.where(distances[i, :] < self.cutoff)[0]); dst.remove(i)
            if self.c_alpha_max_neighbors is not None and len(dst) > self.c_alpha_max_neighbors:
                dst = list(np.argsort(distances[i, :]))[1:self.c_alpha_max_neighbors + 1]
            if not dst: dst = list(np.argsort(distances[i, :]))[1:2]
            src_list.extend([i] * len(dst)); dst_list.extend(dst); dist_list.extend(list(distances[i, dst]))

        # 3. 计算 mu_r_norm
        mean_norm_list = self._calculate_mu_r_norm(c_alpha_coords, distances, src_list, dst_list)
        
        # 4. 计算边特征
        edge_features = self._get_full_edge_features(c_alpha_coords, n_coords, c_coords, src_list, dst_list, dist_list)

        # 5. 组装成字典返回
        return {
            "x": node_features.astype(np.float32), "pos": c_alpha_coords.astype(np.float32),
            "edge_index": np.array([src_list, dst_list], dtype=np.int32),
            "edge_attr": edge_features.astype(np.float32),
            "mu_r_norm": np.array(mean_norm_list).astype(np.float32),
            "seq": "".join([one_letter.get(s, 'X') for s in seq]),
        }

    def _get_node_feature_matrix(self, rec, seq, n_coords, c_coords, c_alpha_coords):
        # 这个函数是解决维度问题的关键，它确保所有特征都基于同样数量的残基
        num_res = len(c_alpha_coords)
        
        # 1. 从氨基酸序列`seq`获取one-hot特征
        res_feat_list = []
        for res_name in seq:
            residx = safe_index(allowable_amino_acids, res_name)
            one_hot_vec = one_hot_res(residx, num_residue_type=20)
            if one_hot_vec is None: return None # 使用导入的函数，忠实于原始逻辑
            res_feat_list.append(one_hot_vec)
        features_list = [np.stack(res_feat_list)]

        # 2. 获取SASA和B-Factor，同样基于num_res
        valid_residues = list(rec.get_list()[0].get_residues())
        if self.use_sasa:
            sasa_features = np.zeros(num_res)
            try: self.sr.compute(rec, level="R"); sasa_ok = True
            except: sasa_ok = False
            for i in range(num_res): sasa_features[i] = valid_residues[i].sasa if sasa_ok else 0.0
            features_list.append(sasa_features.reshape(-1, 1))
        if self.use_bfactor:
            bfactor_features = np.zeros(num_res)
            for i in range(num_res): bfactor_features[i] = valid_residues[i]['CA'].bfactor if 'CA' in valid_residues[i] else 0.0
            features_list.append(bfactor_features.reshape(-1, 1))

        # 3. 获取二面角特征
        if self.use_dihedral:
            scalar_feature, _ = self._get_dihedral_features(n_coords, c_coords, c_alpha_coords)
            features_list.append(scalar_feature)

        # 4. 拼接并返回
        final_features = np.concatenate(features_list, axis=1)
        # 注意：原代码的归一化逻辑是在process()的循环中，这里为了简化暂不加入
        # 如果需要，可以实例化NormalizeProteinMS并在这里调用
        return final_features
    
    def _get_dihedral_features(self, n_coords, c_coords, c_alpha_coords):
        # 完全复用 get_node_features 的逻辑
        num_res = n_coords.shape[0]
        num_angle_type = 3 if self.use_omega else 2
        angles = np.zeros((num_res, num_angle_type))
        for i in range(num_res - 1):
            angles[i, 0] = dihedral(c_coords[i], n_coords[i], c_alpha_coords[i], c_coords[i+1])
            angles[i, 1] = dihedral(n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i+1])
            if self.use_omega:
                angles[i, 2] = dihedral(c_alpha_coords[i], c_coords[i], n_coords[i + 1], c_alpha_coords[i + 1])

        if self.use_angle: return angles, None
        
        node_scalar_features = np.zeros((num_res, num_angle_type * 2))
        for i in range(num_angle_type):
            node_scalar_features[:, 2 * i] = np.sin(angles[:, i])
            node_scalar_features[:, 2 * i + 1] = np.cos(angles[:, i])
        return node_scalar_features, None

    def _get_full_edge_features(self, pos, n_coords, c_coords, src_list, dst_list, dist_list):
        # 1. 距离和序列间隔特征
        seq_edge = np.abs(np.array(src_list) - np.array(dst_list)).reshape(-1, 1)
        seq_edge = np.clip(seq_edge, 0, self.seq_dist_cut)
        seq_edge_one_hot = np.eye(self.seq_dist_cut + 1)[seq_edge.flatten().astype(int)]
        dist_arr = np.array(dist_list)
        dist_rbf = np.exp(-((dist_arr / 4) ** 2) / (1.5 ** np.arange(15).reshape(1, -1)))
        contact_sig = (dist_arr <= 8.0).astype(np.float32).reshape(-1, 1)
        non_orient_feat = np.concatenate([seq_edge_one_hot, dist_rbf, contact_sig], axis=1)

        # 2. 方向性特征
        n_i_feat = (n_coords - pos) / np.linalg.norm(n_coords - pos, axis=1, keepdims=True)
        u_i_feat = (c_coords - pos) / np.linalg.norm(c_coords - pos, axis=1, keepdims=True)
        v_i_feat = np.cross(n_i_feat, u_i_feat)
        
        orient_feat_list = []
        for i in range(len(src_list)):
            src, dst = src_list[i], dst_list[i]
            basis_matrix = np.stack((n_i_feat[dst], u_i_feat[dst], v_i_feat[dst]), axis=0)
            p_ij, q_ij = np.matmul(basis_matrix, pos[src] - pos[dst]), np.matmul(basis_matrix, n_i_feat[src])
            k_ij, t_ij = np.matmul(basis_matrix, u_i_feat[src]), np.matmul(basis_matrix, v_i_feat[src])
            orient_feat_list.append(np.concatenate((p_ij, q_ij, k_ij, t_ij), axis=0))
        
        return np.concatenate([non_orient_feat, np.array(orient_feat_list)], axis=1)

    def _calculate_mu_r_norm(self, pos, distances, src_list, dst_list):
        mean_norm_list = []
        for i in range(len(pos)):
            dst_indices = [idx for idx, src_node in enumerate(src_list) if src_node == i]
            if not dst_indices: mean_norm_list.append(np.zeros(5)); continue
            dst_nodes = [dst_list[idx] for idx in dst_indices]
            weights = softmax(-distances[i, dst_nodes].reshape((1, -1)) ** 2 / np.array([1.,2.,5.,10.,30.]).reshape(-1,1), axis=1)
            diff_vecs = pos[i] - pos[dst_nodes]
            mean_vec = weights.dot(diff_vecs)
            denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1))
            mean_norm_list.append(np.linalg.norm(mean_vec, axis=1) / (denominator + 1e-9))
        return mean_norm_list

    def _get_receptor_inference(self, rec_path):
        try:
            structure = self.biopython_parser.get_structure('random_id', rec_path)[0]
        except Exception: return False, None, None, None, None
            
        coords, c_alpha, n_coords, c_coords, seq = [], [], [], [], []
        
        chain = next(structure.get_chains())
        valid_residues = []
        for res in chain:
            if 'CA' in res and 'N' in res and 'C' in res:
                valid_residues.append(res)
        
        if not valid_residues: return False, None, None, None, None

        for res in valid_residues:
            c_alpha.append(res['CA'].get_coord())
            n_coords.append(res['N'].get_coord())
            c_coords.append(res['C'].get_coord())
            seq.append(res.get_resname())
        
        all_res_ids = {res.get_id() for res in chain}
        valid_res_ids = {res.get_id() for res in valid_residues}
        for res_id in all_res_ids:
            if res_id not in valid_res_ids: chain.detach_child(res_id)

        return structure, np.array(c_alpha), np.array(n_coords), np.array(c_coords), seq


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="MindSpore Protein Data Preprocessing")
    parser.add_argument('--protein_dataset', type=str, default='data/cath40_k10')
    parser.add_argument('--c_alpha_max_neighbors', type=int, default=10)
    parser.add_argument('--build_cath', action='store_true', default=False)
    parser.add_argument('--use_sasa', action='store_true', default=False)
    parser.add_argument('--use_bfactor', action='store_true', default=False)
    parser.add_argument('--use_dihedral', action='store_true', default=False)
    args = parser.parse_args()
    
    if args.build_cath:
        print("Starting CATH dataset preprocessing for MindSpore (Faithful Migration)...")
        ProteinProcessor(args).run()
        print("Preprocessing finished.")
    else:
        print("Please specify a build flag, e.g., --build_cath")