#!/usr/bin/env python3

"""Example script for running inference with enhanced NPU support."""

import dataclasses
import itertools
import logging
import os
import pathlib
import sys
import time
import warnings
from typing import Optional
from collections import defaultdict

import hydra
import numpy as np
import omegaconf
import pandas as pd

import matplotlib.pyplot as plt
import requests
import torch
import torch_npu  # 导入NPU支持库

# 强制使用NPU:0并优化环境
os.environ["CUDA_VISIBLE_DEVICES"] = ""  # 禁用CUDA
os.environ["ASCEND_DEVICE_ID"] = "0"  # 设置NPU设备ID
os.environ["ASCEND_GLOBAL_LOG_LEVEL"] = "3"  # 降低日志级别
os.environ["ASCEND_GLOBAL_EVENT_ENABLE"] = "0"  # 禁用事件记录

# 设置图后端环境变量
backend = os.environ.get('GRAPH_BACKEND', 'dgl').lower()
if backend not in ['dgl', 'pyg']:
    print(f"警告: 无效的图后端值 '{backend}'，使用默认值 'dgl'")
    backend = 'dgl'

os.environ['SE3_TRANSFORMER_GRAPH_BACKEND'] = backend
os.environ['RFDIFFUSION_GRAPH_BACKEND'] = backend
print(f"当前使用的图后端: {backend}")


import re
import os
import pickle
import sys
from pathlib import Path

sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

import torch
from omegaconf import OmegaConf
import hydra
import logging
from rfdiffusion.util import writepdb_multi, writepdb
from rfdiffusion.inference import utils as iu
from hydra.core.hydra_config import HydraConfig
import numpy as np
import random
import glob
from hydra.core.config_store import ConfigStore


# 性能分析类
class PerformanceProfiler:
    def __init__(self):
        self.timings = defaultdict(float)
        self.counts = defaultdict(int)
        self.current_module = None
        self.start_time = None
        self.enabled = False
    
    def start(self, module_name):
        if not self.enabled:
            return
        self.current_module = module_name
        self.start_time = time.time()
    
    def end(self):
        if not self.enabled or self.current_module is None:
            return
        elapsed = time.time() - self.start_time
        self.timings[self.current_module] += elapsed
        self.counts[self.current_module] += 1
        self.current_module = None
    
    def report(self):
        if not self.enabled:
            return
        
        total_time = sum(self.timings.values())
        print("\n===== 性能分析报告 =====")
        print(f"总执行时间: {total_time:.4f} 秒")
        
        # 按时间排序
        sorted_modules = sorted(self.timings.items(), key=lambda x: x[1], reverse=True)
        
        print(f"{'模块名称':<30} {'执行时间(秒)':<15} {'占比(%)':<10} {'调用次数':<10} {'平均时间(秒)':<15}")
        print("-" * 80)
        
        for module, timing in sorted_modules:
            percentage = (timing / total_time) * 100
            count = self.counts[module]
            avg_time = timing / count if count > 0 else 0
            print(f"{module:<30} {timing:<15.4f} {percentage:<10.2f} {count:<10} {avg_time:<15.4f}")
        
        print("=" * 80)


# 全局性能分析器实例
profiler = PerformanceProfiler()


# NPU设备初始化（强制使用npu:0）
def setup_npu():
    """初始化NPU设备并返回是否成功"""
    try:
        if not hasattr(torch, 'npu') or not torch.npu.is_available():
            raise RuntimeError("NPU设备不可用或torch_npu模块缺失")
        
        torch.npu.set_device(0)
        print(f"已初始化NPU设备 (npu:0)")
        return True
    except Exception as e:
        print(f"初始化NPU时出错: {e}")
        print("将使用CPU模式")
        return False

# 检查NPU状态并设置设备
IS_NPU_AVAILABLE = setup_npu()
DEVICE = torch.device("npu:0") if IS_NPU_AVAILABLE else torch.device("cpu")
print(f"当前运行设备: {DEVICE}")


def make_deterministic(seed=0):
    """设置随机种子，确保结果可复现"""
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    if IS_NPU_AVAILABLE:
        torch.npu.manual_seed_all(seed)


# 张量设备统一管理函数
def to_device(data, strict=True):
    """
    将数据移至目标设备，支持张量、列表、字典等结构
    
    Args:
        data: 要迁移的数据
        strict: 是否严格检查设备一致性
    """
    if isinstance(data, torch.Tensor):
        if data.device != DEVICE:
            if strict:
                print(f"迁移张量从 {data.device} 到 {DEVICE}")
            return data.to(DEVICE)
        return data
    elif isinstance(data, (list, tuple)):
        return type(data)(to_device(item, strict) for item in data)
    elif isinstance(data, dict):
        return {k: to_device(v, strict) for k, v in data.items()}
    return data


# 设备一致性检查函数
def check_device一致性(data, target_device=DEVICE):
    """递归检查所有张量是否在目标设备上"""
    if isinstance(data, torch.Tensor):
        return data.device == target_device
    elif isinstance(data, (list, tuple)):
        return all(check_device一致性(item, target_device) for item in data)
    elif isinstance(data, dict):
        return all(check_device一致性(v, target_device) for v in data.values())
    return True


# 调试函数：打印并保存张量信息
def debug_tensors(tensor_dict, step=None, prefix="dgl_debug"):
    """打印张量信息并保存到文件，用于调试"""
    if os.environ.get('RFDIFFUSION_LOG_LEVEL') == 'DEBUG':
        print("\n==== DGL调试信息 ====")
    os.makedirs("debug_outputs", exist_ok=True)
    
    filename = f"debug_outputs/{prefix}" 
    if step is not None:
        filename += f"_step{step}"
    filename += ".pkl"
    
    # 检查设备一致性
    for name, tensor in tensor_dict.items():
        if isinstance(tensor, torch.Tensor) and tensor.device != DEVICE:
            print(f"警告: {name} 在 {tensor.device}，应为 {DEVICE}")
    
    # 保存数据到CPU
    save_data = {}
    for name, tensor in tensor_dict.items():
        if isinstance(tensor, torch.Tensor):
            # print(f"{name}: shape={tensor.shape}, dtype={tensor.dtype}, device={tensor.device}")
            save_data[name] = tensor.detach().cpu().numpy()
        elif isinstance(tensor, (list, tuple)) and all(isinstance(t, torch.Tensor) for t in tensor):
            shapes = [t.shape for t in tensor]
            # print(f"{name}: 张量列表，形状 {shapes}")
            save_data[name] = [t.detach().cpu().numpy() for t in tensor]
        else:
            print(f"{name}: {type(tensor)} (非张量类型)")
    
    with open(filename, 'wb') as f:
        pickle.dump(save_data, f)
    if os.environ.get('RFDIFFUSION_LOG_LEVEL') == 'DEBUG':
        print(f"调试数据已保存至 {filename}")
    if os.environ.get('RFDIFFUSION_LOG_LEVEL') == 'DEBUG':
        print("=====================\n")


@hydra.main(version_base=None, config_path="../config/inference", config_name="base")
def main(conf: HydraConfig) -> None:
    log = logging.getLogger(__name__)
    if conf.inference.deterministic:
        make_deterministic()
    
    # 启用性能分析
    if hasattr(conf.inference, 'profile') and conf.inference.profile:
        profiler.enabled = True
        print("性能分析已启用")

    # 设备日志
    device_str = "NPU (npu:0)" if IS_NPU_AVAILABLE else "CPU"
    log.info(f"使用 {device_str} 运行推理")

    # 初始化采样器并移至目标设备
    profiler.start("初始化采样器")
    sampler = iu.sampler_selector(conf)
    sampler.model = to_device(sampler.model)
    log.info(f"模型已加载至 {device_str}")
    profiler.end()

    # 设计编号初始化
    design_startnum = sampler.inf_conf.design_startnum
    # 处理空字符串或None值
    if design_startnum is None or design_startnum == '':
        design_startnum = 0
    else:
        try:
            design_startnum = int(design_startnum)
        except (ValueError, TypeError):
            design_startnum = 0
    
    if design_startnum == -1:
        existing = glob.glob(sampler.inf_conf.output_prefix + "*.pdb")
        indices = [-1]
        for pdb_path in existing:
            match = re.match(r".*_(\d+)\.pdb$", pdb_path)
            if match:
                indices.append(int(match.group(1)))
        design_startnum = max(indices) + 1

    # 处理num_designs参数
    num_designs = sampler.inf_conf.num_designs
    if num_designs is None or num_designs == '':
        num_designs = 1
    else:
        try:
            num_designs = int(num_designs)
        except (ValueError, TypeError):
            num_designs = 1

    for i_des in range(design_startnum, design_startnum + num_designs):
        if conf.inference.deterministic:
            make_deterministic(i_des)

        start_time = time.time()
        out_prefix = f"{sampler.inf_conf.output_prefix}_{i_des}"
        log.info(f"生成设计: {out_prefix}")
        
        # 谨慎模式：跳过已存在的设计
        if sampler.inf_conf.cautious and os.path.exists(out_prefix + ".pdb"):
            log.info(f"(谨慎模式) 跳过设计，因 {out_prefix}.pdb 已存在")
            continue

        # 初始化数据并移至目标设备
        profiler.start("初始化数据")
        x_init, seq_init = sampler.sample_init()
        x_init = to_device(x_init)
        seq_init = to_device(seq_init)
        debug_tensors({"x_init": x_init, "seq_init": seq_init}, prefix="init_data")
        profiler.end()

        denoised_xyz_stack = []
        px0_xyz_stack = []
        seq_stack = []
        plddt_stack = []

        x_t = torch.clone(x_init)
        seq_t = torch.clone(seq_init)

        # 反向扩散步骤
        for t in range(int(sampler.t_step_input), sampler.inf_conf.final_step - 1, -1):
            debug_tensors({"t": torch.tensor(t, device=DEVICE), "x_t": x_t, "seq_t": seq_t}, step=f"{t}_before")

            # 执行扩散步骤
            profiler.start("预处理输入")
            # 这里是预处理输入的部分
            profiler.end()
            
            profiler.start("模型前向传播")
            px0, x_t, seq_t, plddt = sampler.sample_step(
                t=t, x_t=x_t, seq_init=seq_t, final_step=sampler.inf_conf.final_step
            )
            profiler.end()
            
            profiler.start("后处理结果")
            # 强制迁移结果到目标设备
            px0 = to_device(px0)
            x_t = to_device(x_t)
            seq_t = to_device(seq_t)
            plddt = to_device(plddt)
            assert check_device一致性([px0, x_t, seq_t, plddt]), "扩散结果设备不一致"

            # 打印x_t形状供参考
            print(f"x_t shape at timestep {t}: {x_t.shape}")
            
            # 保持 x_t 与 px0 在原子维度上的一致性 ([L,14,3])，避免后续对齐时形状不匹配
            # 过去在此将 x_t 从 [L,14,3] 人为扩展到 [L,27,3] 的逻辑会导致
            # utils.align_to_xt_motif 中对 xT 与 px0 的原子数断言失败。

            px0_xyz_stack.append(px0)
            denoised_xyz_stack.append(x_t)
            seq_stack.append(seq_t)
            plddt_stack.append(plddt[0])  # 移除单例维度
            debug_tensors({"px0": px0, "x_t": x_t, "plddt": plddt}, step=f"{t}_after")
            profiler.end()

        # 处理结果张量
        profiler.start("处理结果张量")
        # 检查张量形状一致性
        print(f"检查结果张量形状一致性...")
        
        # 确保所有张量具有相同的形状
        if len(denoised_xyz_stack) > 0:
            # 检查所有张量的第二维是否一致
            shapes = [x.shape[1] for x in denoised_xyz_stack]
            print(f"denoised_xyz_stack shapes: {shapes}")
            
            # 如果形状不一致，将所有张量统一为 [L, 14, 3] 形状
            for i in range(len(denoised_xyz_stack)):
                if denoised_xyz_stack[i].shape[1] != 14:
                    print(f"修正第 {i} 个张量，从形状 {denoised_xyz_stack[i].shape} 到 [L, 14, 3]")
                    # 只保留前14个原子
                    if denoised_xyz_stack[i].shape[1] > 14:
                        denoised_xyz_stack[i] = denoised_xyz_stack[i][:, :14, :]
                    # 如果少于14个原子，填充NaN
                    elif denoised_xyz_stack[i].shape[1] < 14:
                        L = denoised_xyz_stack[i].shape[0]
                        pad_size = 14 - denoised_xyz_stack[i].shape[1]
                        pad = torch.full((L, pad_size, 3), float('nan'), device=denoised_xyz_stack[i].device)
                        denoised_xyz_stack[i] = torch.cat([denoised_xyz_stack[i], pad], dim=1)
        
        denoised_xyz_stack = to_device(denoised_xyz_stack)
        px0_xyz_stack = to_device(px0_xyz_stack)
        denoised_xyz_stack = torch.stack(denoised_xyz_stack)
        denoised_xyz_stack = torch.flip(denoised_xyz_stack, [0])
        px0_xyz_stack = torch.stack(px0_xyz_stack)
        px0_xyz_stack = torch.flip(px0_xyz_stack, [0])
        plddt_stack = torch.stack(plddt_stack)
        debug_tensors({
            "denoised_xyz": denoised_xyz_stack,
            "px0_xyz": px0_xyz_stack,
            "plddt": plddt_stack
        }, prefix="final_results")
        profiler.end()

        # 保存输出（需将数据移至CPU）
        profiler.start("保存结果")
        os.makedirs(os.path.dirname(out_prefix), exist_ok=True)
        final_seq = seq_stack[-1].cpu()

        # 处理序列（非NPU计算）
        final_seq = torch.where(
            torch.argmax(seq_init.cpu(), dim=-1) == 21, 7, torch.argmax(seq_init.cpu(), dim=-1)
        )  # 7是甘氨酸
        bfacts = torch.ones_like(final_seq.squeeze())
        bfacts[torch.where(torch.argmax(seq_init.cpu(), dim=-1) == 21, True, False)] = 0

        # 保存PDB（NPU数据需先移至CPU）
        writepdb(
            f"{out_prefix}.pdb",
            denoised_xyz_stack[0, :, :4].cpu(),
            final_seq,
            sampler.binderlen,
            chain_idx=sampler.chain_idx,
            bfacts=bfacts
        )

        # 保存元数据
        trb = {
            "config": OmegaConf.to_container(sampler._conf, resolve=True),
            "plddt": plddt_stack.cpu().numpy(),
            "device": device_str,
            "time": time.time() - start_time
        }
        if hasattr(sampler, "contig_map"):
            trb.update(sampler.contig_map.get_mappings())
        with open(f"{out_prefix}.trb", "wb") as f:
            pickle.dump(trb, f)

        # 保存轨迹
        if sampler.inf_conf.write_trajectory:
            traj_prefix = os.path.join(os.path.dirname(out_prefix), "traj", os.path.basename(out_prefix))
            os.makedirs(os.path.dirname(traj_prefix), exist_ok=True)
            
            writepdb_multi(
                f"{traj_prefix}_Xt-1_traj.pdb",
                denoised_xyz_stack.cpu(),
                bfacts,
                final_seq.squeeze(),
                use_hydrogens=False,
                backbone_only=False,
                chain_ids=sampler.chain_idx
            )
            
            writepdb_multi(
                f"{traj_prefix}_pX0_traj.pdb",
                px0_xyz_stack.cpu(),
                bfacts,
                final_seq.squeeze(),
                use_hydrogens=False,
                backbone_only=False,
                chain_ids=sampler.chain_idx
            )
        profiler.end()

        log.info(f"设计完成，耗时: {(time.time() - start_time) / 60:.2f} 分钟")
    
    # 输出性能分析报告
    profiler.report()


if __name__ == "__main__":
    main()