"""Script for running inference and sampling.
Sample command:
> python scripts/run_inference.py
"""
import os
import shutil
import time
import tree
import hydra
import torch
import logging
import GPUtil
import pickle
import random
import sys
print(sys.path)
sys.path.append(os.getcwd())
print(sys.path)
import numpy as np
import pandas as pd
from datetime import datetime
from typing import Optional
import  analysis.utils as au
from analysis import metrics
import data.utils as du
from typing import Dict
import esm
from experiments import train_se3_diffusion
from omegaconf import DictConfig, OmegaConf
from biotite.sequence.io import fasta
from openfold.utils import rigid_utils
from openfold.np import residue_constants 
from ParsePDB import read_model
from spin_cgnn import SPIN_CGNN


ALPHABET = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
            'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y',]
AA1_to_AA3 = {'A': 'ALA', 'C': 'CYS', 'D': 'ASP', 'E': 'GLU', 'F': 'PHE',
        'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'K': 'LYS', 'L': 'LEU',
        'M': 'MET', 'N': 'ASN', 'P': 'PRO', 'Q': 'GLN', 'R': 'ARG',
        'S': 'SER', 'T': 'THR', 'V': 'VAL', 'W': 'TRP', 'Y': 'TYR'}
AA3_to_AA1 = {'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F',
            'GLY': 'G', 'HIS': 'H', 'ILE': 'I', 'LYS': 'K', 'LEU': 'L',
            'MET': 'M', 'ASN': 'N', 'PRO': 'P', 'GLN': 'Q', 'ARG': 'R',
            'SER': 'S', 'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y'}
AA_INDEX = {}
for aa in ALPHABET:
    AA_INDEX[aa] = ALPHABET.index(aa)
    AA_INDEX[ALPHABET.index(aa)] = aa
def aa_encode(seq):
    return torch.tensor([AA_INDEX[aa] for aa in seq])
    
def get_motif(pdb_path, idx_motif, chain_name, atoms=['N', 'CA', 'C'], scale=1, recenter=True):
    '''
    extract motif from model
    '''
    model = read_model(pdb_path)
    aa = []
    coord = []
    for chain in model:
        if chain.name in chain_name:
            for residue in chain:
                if residue.num in idx_motif:
                    aa.append(AA_INDEX[AA3_to_AA1[residue.name]])
                    coord.append([atom for atom in residue if atom.name in atoms])
    aa = torch.tensor(aa)
    idx_motif = torch.tensor(idx_motif)
    coord = torch.tensor(coord)
    if recenter:
        #ca = coord[:, 1]
        #coord -= ca.mean(dim=0).unsqueeze(0).unsqueeze(0)
        motif_center = coord.mean(dim=0)
        #chain_center = torch.tensor([[ 1.4301e+02, -5.4484e+00, -1.4585e-01],
        #                       [ 1.4300e+02, -5.3923e+00, -8.5914e-02],
        #                       [ 1.4298e+02, -5.3451e+00, -2.0806e-02]])
        chain_center = torch.tensor([[136.2531,   1.3359,  12.4172],
                                     [136.2039,   1.3719,  12.4993],
                                     [136.1492,   1.3986,  12.5889]])
        new_center = motif_center * (1-scale) + chain_center * scale
        coord -= new_center.unsqueeze(0)
    return {'coord':coord, 'aa':aa, 'idx_motif_src':idx_motif}

class Sampler:
    def __init__(
            self,
            conf: DictConfig,
            conf_overrides: Dict=None
        ):
        """Initialize sampler.

        Args:
            conf: inference config.
            gpu_id: GPU device ID.
            conf_overrides: Dict of fields to override with new values.
        """
        self._log = logging.getLogger(__name__)

        # Remove static type checking.
        OmegaConf.set_struct(conf, False)

        # Prepare configs.
        self._conf = conf
        self._infer_conf = conf.inference
        self._diff_conf = self._infer_conf.diffusion
        self._sample_conf = self._infer_conf.samples

        self._rng = np.random.default_rng(self._infer_conf.seed)
        self.spin_cgnn = SPIN_CGNN()

        # Set model hub directory for ESMFold.
        torch.hub.set_dir(self._infer_conf.pt_hub_dir)

        # Set-up accelerator
        if torch.cuda.is_available():
            if self._infer_conf.gpu_id is None:
                available_gpus = ''.join(
                    [str(x) for x in GPUtil.getAvailable(
                        order='memory', limit = 8)])
                self.device = f'cuda:{available_gpus[0]}'
            else:
                self.device = f'cuda:{self._infer_conf.gpu_id}'
        else:
            self.device = 'cpu'
        self._log.info(f'Using device: {self.device}')

        # Set-up directories
        self._weights_path = self._infer_conf.weights_path
        output_dir =self._infer_conf.output_dir
        if self._infer_conf.name is None:
            dt_string = datetime.now().strftime("%dD_%mM_%YY_%Hh_%Mm_%Ss")
        else:
            dt_string = self._infer_conf.name
        self._output_dir = os.path.join(output_dir, dt_string)
        os.makedirs(self._output_dir, exist_ok=True)
        self._log.info(f'Saving results to {self._output_dir}')
        self._pmpnn_dir = self._infer_conf.pmpnn_dir

        config_path = os.path.join(self._output_dir, 'inference_conf.yaml')
        with open(config_path, 'w') as f:
            OmegaConf.save(config=self._conf, f=f)
        self._log.info(f'Saving inference config to {config_path}')

        # Load models and experiment
        self._load_ckpt(conf_overrides)
        self._folding_model = esm.pretrained.esmfold_v1().eval()
        self._folding_model = self._folding_model.to(self.device)

    def _load_ckpt(self, conf_overrides):
        """Loads in model checkpoint."""
        self._log.info(f'Loading weights from {self._weights_path}')

        # Read checkpoint and create experiment.
        weights_pkl = du.read_pkl(
            self._weights_path, use_torch=True,
            map_location=self.device)

        # Merge base experiment config with checkpoint config.
        self._conf.model = OmegaConf.merge(
            self._conf.model, weights_pkl['conf'].model)
        if conf_overrides is not None:
            self._conf = OmegaConf.merge(self._conf, conf_overrides)

        # Prepare model
        self._conf.experiment.ckpt_dir = None
        self._conf.experiment.warm_start = None
        self.exp = train_se3_diffusion.Experiment(
            conf=self._conf)
        self.model = self.exp.model

        # Remove module prefix if it exists.
        model_weights = weights_pkl['model']
        model_weights = {
            k.replace('module.', ''):v for k,v in model_weights.items()}
        self.model.load_state_dict(model_weights)
        self.model = self.model.to(self.device)
        self.model.eval()
        self.diffuser = self.exp.diffuser
    
    def motif_shifting_schedule(self, sample_length, motif, step, start=0, end=None):
        idx_motif_rel = motif['idx_motif_src'] - motif['idx_motif_src'].min()

        if end==None:
            end = sample_length - idx_motif_rel.max()
        else:
            sample_length = sample_length - 228
            #end = sample_length - 228
            end = min(end, sample_length - idx_motif_rel.max())
            print("end: ", end)
            
        idx_set = []
        for shift in range(start, end, step):
            idx_set.append(idx_motif_rel + shift)
        print("idx_set:", idx_set)
        return idx_set

    def generate_residue_positions(self, sample_length, motif):
        idx_set = random.sample(range(sample_length), 4)
        print("idx_set:", idx_set)
        return torch.tensor(idx_set)

    def run_sampling(self):
        """Sets up inference run.

        All outputs are written to 
            {output_dir}/{date_time}
        where {output_dir} is created at initialization.
        """
        motif = get_motif(
            pdb_path=self._sample_conf.motif.pdb_path,
            idx_motif=self._sample_conf.motif.idx_motif,
            chain_name=self._sample_conf.motif.chain_name,
            scale=self._sample_conf.motif.scale
            )
        motif['tensor7'] = rigid_utils.rigids_from_ref(
            motif['coord'][:,0], motif['coord'][:,1], motif['coord'][:,2]
        ).to_tensor_7()

        for model_i in range(self._sample_conf.total_samples):
            # random sample length
            sample_length = np.random.randint(
            self._sample_conf.min_length,
            self._sample_conf.max_length+1,
            )
            #length dir
            length_dir = os.path.join(
                self._output_dir, f'length_{sample_length}')
            os.makedirs(length_dir, exist_ok=True)
            self._log.info(f'Sampling length {sample_length}: {length_dir}')
            # generate motif idx
            #print("motif:", motif)
            idx_motif = self.generate_residue_positions(sample_length, motif)
            # avoid the same file
            sample_dir = os.path.join(length_dir,f'sample_{model_i}')
            os.makedirs(sample_dir, exist_ok=True)

            diffuse_mask = np.ones(sample_length)
            diffuse_mask[idx_motif] = 0.
            sample_output = self.sample(sample_length, motif, idx_motif)
            self.save_sample(
                idx_motif,
                sample_output['prot_traj'],
                diffuse_mask,
                output_dir=sample_dir
            )
            metrics_dict = self.eval_diffusion(
                sample_output['prot_traj'],
                output_dir=sample_dir
            )
            if metrics_dict['ca_ca_bond_dev'] <= 0.30 and metrics_dict['ca_ca_valid_percent'] >= 0.80 and \
                metrics_dict['ca_steric_clash_percent'] <= 0.0 and metrics_dict['num_ca_steric_clashes'] <= 0:

                sample_pdb_path = os.path.join(sample_dir, f'sample_1.pdb')
                sc_output_dir = os.path.join(sample_dir, 'self_consistency')
                os.makedirs(sc_output_dir, exist_ok=True)
                shutil.copy(sample_pdb_path, os.path.join(sc_output_dir, os.path.basename(sample_pdb_path)))
                # Run SPIN-CGNN
                _ = self.run_self_consistency(
                    decoy_pdb_dir=sc_output_dir,
                    sample_pdb='sample_1.pdb',
                    reference_pdb_path=sc_output_dir,
                    motif_mask=idx_motif
                )
                self._log.info(f'Done sample {_}: {sample_dir}')

    def save_sample(
            self,
            idx_motif,
            bb_prot_traj: np.ndarray,
            diffuse_mask: np.ndarray,
            output_dir: str,
        ):
        """Writes final sample.

        Args:
            bb_prot_traj: [T, N, 37, 3] atom37 sampled diffusion states.
                T is number of time steps. First time step is t=eps,
                i.e. bb_prot_traj[0] is the final sample after reverse diffusion.
                N is number of residues.
            x0_traj: [T, N, 3] x_0 predictions of C-alpha at each time step.
            aatype: [T, N, 21] amino acid probability vector trajectory.
            res_mask: [N] residue mask.
            diffuse_mask: [N] which residues are diffused.
            output_dir: where to save samples.
        """

        # Write sample.
        sample_path = os.path.join(output_dir, 'sample')

        # Use b-factors to specify which residues are diffused.
        b_factors = np.tile((diffuse_mask * 100)[:, None], (1, 37))

        _ = au.write_prot_to_pdb(
            idx_motif,
            bb_prot_traj[0],
            sample_path,
            b_factors=b_factors
        )

    def eval_diffusion(
        self,
        bb_prot_traj: np.ndarray,
        #atom37_pos,
        output_dir: str
        ):

        atom37_pos = bb_prot_traj[0]
        #np.save("atom37_pos",atom37_pos)
        eval_metrics = []
        CA_IDX = residue_constants.atom_order['CA']
        atom37_mask = np.any(atom37_pos, axis=-1)
        bb_mask = np.any(atom37_mask, axis=-1)
        ca_pos = atom37_pos[..., CA_IDX, :][bb_mask.astype(bool)]
        ca_ca_bond_dev, ca_ca_valid_percent = metrics.ca_ca_distance(ca_pos)
        num_ca_steric_clashes, ca_steric_clash_percent = metrics.ca_ca_clashes(ca_pos)

        metrics_dict = {
            'ca_ca_bond_dev': ca_ca_bond_dev,
            'ca_ca_valid_percent': ca_ca_valid_percent,
            'ca_steric_clash_percent': ca_steric_clash_percent,
            'num_ca_steric_clashes': num_ca_steric_clashes,
        }

        return metrics_dict 

    def run_self_consistency(
        self,
        decoy_pdb_dir: str,
        sample_pdb,
        reference_pdb_path: str,
        motif_mask: Optional[np.ndarray]=None):
    
        """Run self-consistency on design proteins against reference protein.
        
        Args:
            decoy_pdb_dir: directory where designed protein files are stored.
            reference_pdb_path: path to reference protein file
            motif_mask: Optional mask of which residues are the motif.
        
        Returns:
            Writes SPIN-CGNN outputs to decoy_pdb_dir/seqs
            Writes ESMFold outputs to decoy_pdb_dir/esmf
            Writes results in decoy_pdb_dir/sc_results.csv
        """
    
        # Run SPIN-CGNN
        cgnn_dir = os.path.join(decoy_pdb_dir, 'seqs')
        os.makedirs(cgnn_dir, exist_ok=True)
        reference_pdb = os.path.join(reference_pdb_path, sample_pdb)
        cgnn_fasta_path = os.path.join(
            cgnn_dir,
            os.path.basename(reference_pdb).replace('.pdb', '.fa')
        )
    
        self.spin_cgnn.sample(
            pdb_path=os.path.join(decoy_pdb_dir, sample_pdb), 
            fa_path=cgnn_fasta_path, 
            num_sample=self._sample_conf.seq_per_sample,
            temperature=1e-1,
            motif_mask=motif_mask
            )
    
        # Run ESMFold on each sequence and calculate metrics.
        cgnn_results = {
            'tm_score': [],
            'sample_path': [],
            'header': [],
            'sequence': [],
            'rmsd': [],
        }
        
        if motif_mask is not None:
            # Only calculate motif RMSD if mask is specified.
            cgnn_results['motif_rmsd'] = []
        
        esmf_dir = os.path.join(decoy_pdb_dir, 'esmf')
        os.makedirs(esmf_dir, exist_ok=True)
        
        fasta_seqs = fasta.FastaFile.read(cgnn_fasta_path)
        sample_feats = du.parse_pdb_feats('sample', reference_pdb)
    
        for i, (header, string) in enumerate(fasta_seqs.items()):
            # Run ESMFold
            esmf_sample_path = os.path.join(esmf_dir, f'esmfold_{i}.pdb')
            _ = self.run_folding(string, esmf_sample_path)
            esmf_feats = du.parse_pdb_feats('folded_sample', esmf_sample_path)
            sample_seq = du.aatype_to_seq(sample_feats['aatype'])
            
            # Calculate scTM of ESMFold outputs with reference protein
            _, tm_score = metrics.calc_tm_score(
                sample_feats['bb_positions'], esmf_feats['bb_positions'],
                sample_seq, sample_seq)
            rmsd = metrics.calc_aligned_rmsd(
                sample_feats['bb_positions'], esmf_feats['bb_positions'])
            if motif_mask is not None:
                sample_motif = sample_feats['bb_positions'][motif_mask]
                of_motif = esmf_feats['bb_positions'][motif_mask]
                motif_rmsd = metrics.calc_aligned_rmsd(
                    sample_motif, of_motif)
                cgnn_results['motif_rmsd'].append(motif_rmsd)
            cgnn_results['rmsd'].append(rmsd)
            cgnn_results['tm_score'].append(tm_score)
            cgnn_results['sample_path'].append(esmf_sample_path)
            cgnn_results['header'].append(header)
            cgnn_results['sequence'].append(string)
        
        # Save results to CSV
        csv_path = os.path.join(decoy_pdb_dir, 'sc_results.csv')
        cgnn_results = pd.DataFrame(cgnn_results)
        cgnn_results.to_csv(csv_path)

    def run_folding(self, sequence, save_path):
        """Run ESMFold on sequence."""
        with torch.no_grad():
            output = self._folding_model.infer_pdb(sequence)
        with open(save_path, "w") as f:
            f.write(output)
        return output

    def sample(self, sample_length: int, motif, idx_motif):
        """Sample based on length.

        Args:
            sample_length: length to sample

        Returns:
            Sample outputs. See train_se3_diffusion.inference_fn.
        """
        # Process motif features.
        res_mask = np.ones(sample_length)
        fixed_mask = np.zeros_like(res_mask) 
 
        # Initialize data
        ref_sample = self.diffuser.sample_ref(
            n_samples=sample_length,
            as_tensor_7=True,
        )

        ref_sample['rigids_t'][idx_motif] = motif['tensor7']
        fixed_mask[idx_motif] = 1. #// mask motif from diffusion

        res_idx = torch.arange(1, sample_length+1)

        init_feats = {
            'res_mask': res_mask,
            'seq_idx': res_idx,
            'fixed_mask': fixed_mask,
            'torsion_angles_sin_cos': np.zeros((sample_length, 7, 2)),
            'sc_ca_t': np.zeros((sample_length, 3)),
            **ref_sample,
        }
        
        ### fixed aatype
        init_feats['aatype'] = torch.ones(sample_length).long() * 20
        init_feats['aatype'][fixed_mask==1] = motif['aa']
        #init_feats['aatype'][fixed_mask==1][:14] = motif['aa'][:14]

        # Add batch dimension and move to GPU.
        init_feats = tree.map_structure(
            lambda x: x if torch.is_tensor(x) else torch.tensor(x), init_feats)
        init_feats = tree.map_structure(
            lambda x: x[None].to(self.device), init_feats)

        # Run inference
        sample_out = self.exp.inference_fn(
            init_feats,
            num_t=self._diff_conf.num_t,
            min_t=self._diff_conf.min_t, 
            aux_traj=True,
            noise_scale=self._diff_conf.noise_scale,
        )

        return tree.map_structure(lambda x: x[:, 0], sample_out)

@hydra.main(version_base=None, config_path="../config", config_name="inference")
def run(conf: DictConfig) -> None:
    print('Starting inference')
    start_time = time.time()
    sampler = Sampler(conf)

    sampler.run_sampling()
    elapsed_time = time.time() - start_time
    print(f'Finished in {elapsed_time:.2f}s')

if __name__ == '__main__':
    run()
