#!/usr/bin/env python3

"""Example script for running inference."""

import dataclasses
import itertools
import logging
import os
import pathlib
import sys
import warnings
from typing import Optional

import hydra
import numpy as np
import omegaconf
import pandas as pd


import matplotlib.pyplot as plt
import requests
import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu

# 设置环境变量，确保在导入任何模块前设置
import os
# 使用统一的环境变量 GRAPH_BACKEND 来控制图后端
# 如果未设置，默认使用 dgl
backend = os.environ.get('GRAPH_BACKEND', 'dgl').lower()

# 确保后端值有效
if backend not in ['dgl', 'pyg']:
    print(f"警告: 无效的图后端值 '{backend}'，使用默认值 'dgl'")
    backend = 'dgl'

# 同时设置两个组件的后端环境变量，保持一致性
os.environ['SE3_TRANSFORMER_GRAPH_BACKEND'] = backend
os.environ['RFDIFFUSION_GRAPH_BACKEND'] = backend

# 打印当前使用的后端，用于调试
print(f"当前使用的图后端: {backend}")


import re
import os, time, pickle
import sys
from pathlib import Path

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

import torch
from omegaconf import OmegaConf
import hydra
import logging
from rfdiffusion.util import writepdb_multi, writepdb
from rfdiffusion.inference import utils as iu
from hydra.core.hydra_config import HydraConfig
import numpy as np
import random
import glob
from hydra.core.config_store import ConfigStore


def make_deterministic(seed=0):
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)


# 添加调试函数，用于打印和保存关键张量数据
def debug_tensors(tensor_dict, step=None, prefix="dgl_debug"):
    """打印并保存张量信息，用于跨环境对比"""
    print("\n==== DGL调试信息 ====")
    # 创建调试输出目录
    os.makedirs("debug_outputs", exist_ok=True)
    
    # 文件名
    filename = f"debug_outputs/{prefix}"
    if step is not None:
        filename += f"_step{step}"
    filename += ".pkl"
    
    # 准备要保存的数据
    save_data = {}
    
    for name, tensor in tensor_dict.items():
        if isinstance(tensor, torch.Tensor):
            print(f"{name}: shape={tensor.shape}, dtype={tensor.dtype}, device={tensor.device}")
            # 转移到CPU并转为NumPy以便保存
            save_data[name] = tensor.detach().cpu().numpy()
        elif isinstance(tensor, (list, tuple)) and len(tensor) > 0 and isinstance(tensor[0], torch.Tensor):
            shapes = [t.shape for t in tensor]
            print(f"{name}: list of tensors with shapes {shapes}")
            # 保存列表中的所有张量
            save_data[name] = [t.detach().cpu().numpy() for t in tensor]
        else:
            print(f"{name}: {type(tensor)} (not a tensor or tensor list)")
    
    # 保存数据
    with open(filename, 'wb') as f:
        pickle.dump(save_data, f)
    
    print(f"调试数据已保存到 {filename}")
    print("=====================\n")


@hydra.main(version_base=None, config_path="../config/inference", config_name="base")
def main(conf: HydraConfig) -> None:
    log = logging.getLogger(__name__)
    if conf.inference.deterministic:
        make_deterministic()

    # Check for available GPU and print result of check
    if torch.cuda.is_available():
        device_name = torch.cuda.get_device_name(torch.cuda.current_device())
        log.info(f"Found GPU with device_name {device_name}. Will run RFdiffusion on {device_name}")
    else:
        log.info("////////////////////////////////////////////////")
        log.info("///// NO GPU DETECTED! Falling back to CPU /////")
        log.info("////////////////////////////////////////////////")

    # Initialize sampler and target/contig.
    sampler = iu.sampler_selector(conf)

    # Loop over number of designs to sample.
    design_startnum = sampler.inf_conf.design_startnum
    if sampler.inf_conf.design_startnum == -1:
        existing = glob.glob(sampler.inf_conf.output_prefix + "*.pdb")
        indices = [-1]
        for e in existing:
            print(e)
            m = re.match(".*_(\d+)\.pdb$", e)
            print(m)
            if not m:
                continue
            m = m.groups()[0]
            indices.append(int(m))
        design_startnum = max(indices) + 1

    for i_des in range(design_startnum, design_startnum + sampler.inf_conf.num_designs):
        if conf.inference.deterministic:
            make_deterministic(i_des)

        start_time = time.time()
        out_prefix = f"{sampler.inf_conf.output_prefix}_{i_des}"
        log.info(f"Making design {out_prefix}")
        if sampler.inf_conf.cautious and os.path.exists(out_prefix + ".pdb"):
            log.info(
                f"(cautious mode) Skipping this design because {out_prefix}.pdb already exists."
            )
            continue

        x_init, seq_init = sampler.sample_init()
        
        # 调试初始化数据
        debug_tensors({
            "x_init": x_init,
            "seq_init": seq_init,
        }, prefix="dgl_init")
        
        denoised_xyz_stack = []
        px0_xyz_stack = []
        seq_stack = []
        plddt_stack = []

        x_t = torch.clone(x_init)
        seq_t = torch.clone(seq_init)
        
        # Loop over number of reverse diffusion time steps.
        for t in range(int(sampler.t_step_input), sampler.inf_conf.final_step - 1, -1):
            # 调试每个步骤之前的数据
            debug_tensors({
                "t": torch.tensor(t),
                "x_t_before": x_t,
                "seq_t_before": seq_t,
            }, step=f"{t}_before")
            
            px0, x_t, seq_t, plddt = sampler.sample_step(
                t=t, x_t=x_t, seq_init=seq_t, final_step=sampler.inf_conf.final_step
            )
            
            # 调试每个步骤之后的数据
            debug_tensors({
                "t": torch.tensor(t),
                "px0": px0,
                "x_t_after": x_t,
                "seq_t_after": seq_t,
                "plddt": plddt,
            }, step=f"{t}_after")
            
            px0_xyz_stack.append(px0)
            denoised_xyz_stack.append(x_t)
            seq_stack.append(seq_t)
            plddt_stack.append(plddt[0])  # remove singleton leading dimension

        # Flip order for better visualization in pymol
        denoised_xyz_stack = torch.stack(denoised_xyz_stack)
        denoised_xyz_stack = torch.flip(
            denoised_xyz_stack,
            [
                0,
            ],
        )
        px0_xyz_stack = torch.stack(px0_xyz_stack)
        px0_xyz_stack = torch.flip(
            px0_xyz_stack,
            [
                0,
            ],
        )

        # For logging -- don't flip
        plddt_stack = torch.stack(plddt_stack)
        
        # 调试最终结果
        debug_tensors({
            "denoised_xyz_stack": denoised_xyz_stack,
            "px0_xyz_stack": px0_xyz_stack,
            "seq_stack": seq_stack,
            "plddt_stack": plddt_stack,
        }, prefix="dgl_final")

        # Save outputs
        os.makedirs(os.path.dirname(out_prefix), exist_ok=True)
        final_seq = seq_stack[-1]

        # Output glycines, except for motif region
        final_seq = torch.where(
            torch.argmax(seq_init, dim=-1) == 21, 7, torch.argmax(seq_init, dim=-1)
        )  # 7 is glycine

        bfacts = torch.ones_like(final_seq.squeeze())
        # make bfact=0 for diffused coordinates
        bfacts[torch.where(torch.argmax(seq_init, dim=-1) == 21, True, False)] = 0
        # pX0 last step
        out = f"{out_prefix}.pdb"

        # Now don't output sidechains
        writepdb(
            out,
            denoised_xyz_stack[0, :, :4],
            final_seq,
            sampler.binderlen,
            chain_idx=sampler.chain_idx,
            bfacts=bfacts,
        )

        # run metadata
        trb = dict(
            config=OmegaConf.to_container(sampler._conf, resolve=True),
            plddt=plddt_stack.cpu().numpy(),
            device=torch.cuda.get_device_name(torch.cuda.current_device())
            if torch.cuda.is_available()
            else "CPU",
            time=time.time() - start_time,
        )
        if hasattr(sampler, "contig_map"):
            for key, value in sampler.contig_map.get_mappings().items():
                trb[key] = value
        with open(f"{out_prefix}.trb", "wb") as f_out:
            pickle.dump(trb, f_out)

        if sampler.inf_conf.write_trajectory:
            # trajectory pdbs
            traj_prefix = (
                os.path.dirname(out_prefix) + "/traj/" + os.path.basename(out_prefix)
            )
            os.makedirs(os.path.dirname(traj_prefix), exist_ok=True)

            out = f"{traj_prefix}_Xt-1_traj.pdb"
            writepdb_multi(
                out,
                denoised_xyz_stack,
                bfacts,
                final_seq.squeeze(),
                use_hydrogens=False,
                backbone_only=False,
                chain_ids=sampler.chain_idx,
            )

            out = f"{traj_prefix}_pX0_traj.pdb"
            writepdb_multi(
                out,
                px0_xyz_stack,
                bfacts,
                final_seq.squeeze(),
                use_hydrogens=False,
                backbone_only=False,
                chain_ids=sampler.chain_idx,
            )

        log.info(f"Finished design in {(time.time()-start_time)/60:.2f} minutes")


if __name__ == "__main__":
    main()
