# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import argparse
import pathlib
import string
import torch
import os
import sys
sys.path.append(os.getcwd())
sys.path.append("..")
import json
import esm
import itertools
import warnings
import shutil
import torch.nn.functional as F
import pandas as pd
import numpy as np
from scipy.stats import spearmanr
from Bio import SeqIO
from esm.inverse_folding.util import CoordBatchConverter
from esm import pretrained, MSATransformer
from tqdm import tqdm
from typing import List, Tuple
from src.utils import read_fasta, full_sequence, scan_max_mutant, mutant_filter

warnings.filterwarnings("ignore")


def remove_insertions(sequence: str) -> str:
    """ Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
    # This is an efficient way to delete lowercase characters and insertion characters from a string
    deletekeys = dict.fromkeys(string.ascii_lowercase)
    deletekeys["."] = None
    deletekeys["*"] = None

    translation = str.maketrans(deletekeys)
    return sequence.translate(translation)


def read_msa(filename: str, nseq: int) -> List[Tuple[str, str]]:
    """ Reads the first nseq sequences from an MSA file, automatically removes insertions.

    The input file must be in a3m format (although we use the SeqIO fasta parser)
    for remove_insertions to work properly."""

    msa = [
        (record.description, remove_insertions(str(record.seq)))
        for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)
    ]
    return msa



def label_row(rows, sequence, token_probs, alphabet, offset_idx, use_sum=False):
    s = []
    sep = ";"
    if ":" in rows:
        sep = ":"
    for row in rows.split(sep):
        if row.lower() == "wt":
            s.append(0)
            continue
        wt, idx, mt = row[0], int(row[1:-1]) - offset_idx, row[-1]
        assert sequence[idx] == wt, f"The pos {idx} `{wt}` does not match the `{mt}`"
        wt_encoded, mt_encoded = alphabet.get_idx(wt), alphabet.get_idx(mt)
        # add 1 for BOS
        score = token_probs[0, 1 + idx, mt_encoded] - token_probs[0, 1 + idx, wt_encoded]
        score = score.item()
        s.append(score)
        
    if use_sum:
        return sum(s)
    else:
        return sum(s) / len(s)


def compute_pppl(row, sequence, model, alphabet, offset_idx):
    wt, idx, mt = row[0], int(row[1:-1]) - offset_idx, row[-1]
    assert sequence[idx] == wt, "The listed wildtype does not match the provided sequence"

    # modify the sequence
    sequence = sequence[:idx] + mt + sequence[(idx + 1):]

    # encode the sequence
    data = [
        ("protein1", sequence),
    ]

    batch_converter = alphabet.get_batch_converter()
    batch_labels, batch_strs, batch_tokens = batch_converter(data)
    wt_encoded, mt_encoded = alphabet.get_idx(wt), alphabet.get_idx(mt)

    # compute probabilities at each position
    log_probs = []
    for i in range(1, len(sequence) - 1):
        batch_tokens_masked = batch_tokens.clone()
        batch_tokens_masked[0, i] = alphabet.mask_idx
        with torch.no_grad():
            token_probs = torch.log_softmax(model(batch_tokens_masked.cuda())["logits"], dim=-1)
        log_probs.append(token_probs[0, i, alphabet.get_idx(sequence[i])].item())  # vocab size
    return sum(log_probs)




def compute_spearmanr(df, model_location):
    groudn_truth, predict = df["score"], df[model_location]
    score = spearmanr(groudn_truth, predict)[0]
    return 0 if np.isnan(score) else score



def predict(args, model_location, model, alphabet):
    # Load the deep mutational scan
    df = pd.read_table(args.dms_input)
    
    # inference for each model
    batch_converter = alphabet.get_batch_converter()

    is_truncate = False
    offset = 1
    
    # truncate the sequence
    if not "esm2" in model_location and len(args.sequence) >= 1022:
        # max lenth 1024
        df_truncate, sequence_truncate, offset = scan_max_mutant(df, args.sequence)
        is_truncate = True
        
    if "if" in model_location:
        if_score = []
        mutants = df["mutant"]
        seq = args.sequence
        coords, pdb_seq = esm.inverse_folding.util.load_coords(args.pdb_path, args.chain)
        # check the sequence
        assert seq == pdb_seq, "The fasta sequence does not match the pdb sequence"
        
        if is_truncate:
            mutants = df_truncate["mutant"]
            seq = sequence_truncate
            coords = coords[offset-1:offset+1021]
        
        # speed inverse folding calculate
        batch_converter = CoordBatchConverter(alphabet)
        batch = [(coords, None, seq)]
        coords_, confidence, strs, tokens, padding_mask = batch_converter(batch)
        prev_output_tokens = tokens[:, :-1]
        if args.esmif_cache:
            logits, _ = model.forward(
                coords_.cuda(),
                padding_mask.cuda(),
                confidence.cuda(),
                prev_output_tokens.cuda()
            )
        
        for m in tqdm(mutants):
            m_seq = full_sequence(seq, m, offset)

            batch = [(coords, None, m_seq)]
            coords_, confidence, strs, tokens, padding_mask = batch_converter(batch)
            
            if not args.esmif_cache:
                logits, _ = model.forward(
                    coords_.cuda(),
                    padding_mask.cuda(),
                    confidence.cuda(),
                    prev_output_tokens.cuda()
                )
            
            target = tokens[:, 1:]
            target_padding_mask = (target == alphabet.padding_idx).cuda()
            # compute the loss
            loss = F.cross_entropy(logits.cuda(), target.cuda(), reduction='none')
            avgloss = torch.sum(loss * ~target_padding_mask, dim=-1) / torch.sum(~target_padding_mask, dim=-1)
            ll_fullseq = -avgloss.detach().cpu().numpy().item()
            if_score.append(ll_fullseq)
        
        if is_truncate:
            df_truncate[model_location] = if_score
            # concat the dataframe
            df[model_location] = df_truncate[model_location]
        else:
            df[model_location] = if_score
        return df
        
    elif "msa" in model_location:
        assert args.msa_path, "MSA Transformer requires an MSA"
        data = [read_msa(args.msa_path, args.msa_samples)]
        if is_truncate:
            data_truncate = [[(d[0], d[1][offset-1:offset+1021]) for d in data[0]]]
            data = data_truncate

        assert (
                args.scoring_strategy == "masked-marginals"
        ), "MSA Transformer only supports masked marginal strategy"

        batch_labels, batch_strs, batch_tokens = batch_converter(data)

        all_token_probs = []
        for i in tqdm(range(batch_tokens.size(2))):
            batch_tokens_masked = batch_tokens.clone()
            batch_tokens_masked[0, 0, i] = alphabet.mask_idx  # mask out first sequence
            with torch.no_grad():
                token_probs = torch.log_softmax(
                    model(batch_tokens_masked.cuda())["logits"], dim=-1
                )
            all_token_probs.append(token_probs[:, 0, i])  # vocab size
        token_probs = torch.cat(all_token_probs, dim=0).unsqueeze(0)
    else:
        data = [
            ("protein1", args.sequence),
        ]
        if is_truncate:
            data = [
                ("protein1", sequence_truncate),
            ]
        batch_labels, batch_strs, batch_tokens = batch_converter(data)

        if args.scoring_strategy == "wt-marginals":
            with torch.no_grad():
                token_probs = torch.log_softmax(model(batch_tokens.cuda())["logits"], dim=-1)
        elif args.scoring_strategy == "masked-marginals":
            all_token_probs = []
            for i in tqdm(range(batch_tokens.size(1))):
                batch_tokens_masked = batch_tokens.clone()
                batch_tokens_masked[0, i] = alphabet.mask_idx
                with torch.no_grad():
                    token_probs = torch.log_softmax(
                        model(batch_tokens_masked.cuda())["logits"], dim=-1
                    )
                all_token_probs.append(token_probs[:, i])  # vocab size
            token_probs = torch.cat(all_token_probs, dim=0).unsqueeze(0)
        elif args.scoring_strategy == "pseudo-ppl":
            tqdm.pandas()
            df[model_location] = df.progress_apply(
                lambda row: compute_pppl(
                    row[args.mutation_col], args.sequence, model, alphabet, 1
                ),
                axis=1,
            )
    
    # label the dataframe
    if is_truncate:
        df[model_location] = df_truncate.apply(
            lambda row: label_row(  
                row[args.mutation_col], 
                sequence_truncate,
                token_probs, alphabet, 
                offset, args.use_sum
            ),
            axis=1,
        )
    else:
        df[model_location] = df.apply(
            lambda row: label_row(  
                row[args.mutation_col], 
                args.sequence,
                token_probs, alphabet, offset,
                args.use_sum
            ),
            axis=1,
        )
                
    return df



def create_parser():
    parser = argparse.ArgumentParser(
        description="Label a deep mutational scan with predictions from an ensemble of ESM-1v models."  # noqa
    )

    # fmt: off
    parser.add_argument("--model_location", type=str, help="PyTorch model file OR name of pretrained model to download (see README for models)",nargs="+",)
    parser.add_argument("--fasta",type=str,default=None,help="fasta file",)
    parser.add_argument("--dms_input",type=pathlib.Path,help="TSV file containing the deep mutational scan",)
    parser.add_argument("--mutation_col",type=str, default="mutant", help="column in the deep mutational scan labeling the mutation as 'AiB'")
    parser.add_argument("--dms_output", type=pathlib.Path, help="Save file containing the deep mutational scan along with predictions",)
    parser.add_argument("--scoring_strategy", type=str, default="wt-marginals", choices=["wt-marginals", "pseudo-ppl", "masked-marginals"], help="")
    parser.add_argument("--pdb_path", type=pathlib.Path, help="path to pdb in pdb format (required for inverse folding)")
    parser.add_argument("--chain", type=str, default="A", help="A")
    parser.add_argument("--msa_path", type=pathlib.Path, default=None, help="path to MSA in a3m format (required for MSA Transformer)")
    parser.add_argument("--msa_samples", type=int, default=200,help="number of sequences to select from the start of the MSA")
    # custom arg
    parser.add_argument("--sequence",type=str,default=None,help="input sequence for single muation predict")
    parser.add_argument("--dataset_input",type=str,default=None,help="input dms dataset directory")
    parser.add_argument("--esmif_cache", action="store_true", help="Use cached to inference")
    parser.add_argument("--use_ef", action="store_true", help="use esmfold pdb")
    parser.add_argument("--use_sum", action="store_true", help="use sum of log prob")
    
    # fmt: on
    parser.add_argument("--nogpu", action="store_true", help="Do not use GPU even if available")
    return parser


def main(args):
    protein_names = []
    exclude_files_or_dirs = ["file.txt", ".ipynb_checkpoints"]
    
    for model_locate in args.model_location:
        
        model, alphabet = pretrained.load_model_and_alphabet(model_locate)
        model.eval()
        if torch.cuda.is_available() and not args.nogpu:
            model = model.cuda()
            print(f"Transferred {model_locate} to GPU.")
        
        # evalute whole dataset
        if args.dataset_input:
            # get protein names in dataset path
            protein_names = sorted(os.listdir(args.dataset_input))
            if protein_names == []:
                raise ValueError("No protein found in dataset input path!")
            
            # remove files or dirs
            for item in exclude_files_or_dirs:
                if item in protein_names:
                    protein_names.remove(item)

            # loop over proteins
            for idx, name in enumerate(protein_names):
                print("-"*60)
                print(f"Processing 【{name}】. Current {idx+1}/{len(protein_names)}...")
                
                # target protein directory
                cur_dir = os.path.join(args.dataset_input, name)
                # get sequence
                fasta_path = os.path.join(cur_dir, name+".fasta")
                if os.path.exists(fasta_path):
                    args.sequence = read_fasta(fasta_path, "seq")
                
                if "mas" in model_locate:
                    # get msa
                    msa_path = os.path.join(cur_dir, name+".a3m")
                    assert os.path.exists(msa_path), f"{msa_path} not exists!"
                    args.msa_path = msa_path
                
                if "if" in model_locate:
                    # get pdb
                    pdb_shuffix = ".ef.pdb" if args.use_ef else ".pdb"
                    pdb_path = os.path.join(cur_dir, name + pdb_shuffix)
                    assert os.path.exists(pdb_path), f"{pdb_path} not exists!"
                    args.pdb_path = pdb_path 
                
                # all mutation file: [xxx.tsv]
                mutant_files = os.listdir(os.path.join(cur_dir, "experiments"))
                for item in exclude_files_or_dirs:
                    if item in mutant_files:
                        mutant_files.remove(item)
                if mutant_files == []:
                    print(f"Experiment tsv not exists!")
                    continue
                
                # create prediction directory
                prediction_path = os.path.join(cur_dir, "predictions")
                if not os.path.exists(prediction_path):
                    os.mkdir(prediction_path)
                
                # predict
                for file in mutant_files:
                    mutant_file = os.path.join(cur_dir, "experiments", file)
                    args.dms_input = mutant_file

                    if "if" in model_locate and args.use_ef:
                        out_path = os.path.join(prediction_path, f"{file[:-4]}.{model_locate}.ef.tsv")
                    else:
                        out_path = os.path.join(prediction_path, f"{file[:-4]}.{model_locate}.tsv")
                    
                    if args.use_sum:
                        out_path = out_path.replace(".tsv", ".sum.tsv")
                    
                    # skip if already exists
                    if os.path.exists(out_path):
                        print(f"{out_path} exists, skipping!")
                        continue
                    
                    result = pd.read_table(mutant_file)
                    result["score"] = predict(args, model_locate, model, alphabet)[model_locate]
                    result.to_csv(out_path, sep="\t", index=False)
                    print(f"【{args.dms_input}】 Done!")
                
        # evaluate single protein mutation file
        else:
            if args.fasta:
                args.sequence = read_fasta(args.fasta, "seq")
            else:
                raise ValueError("No squence info! please assign `sequence` or `fasta`")
            if os.path.exists(args.dms_output):
                result = pd.read_csv(args.dms_output)
                for model_locate in args.model_location:
                    if model_locate not in result.columns:
                        result[model_locate] = predict(args, model_locate)[model_locate]
            else:
                result = predict(args, args.model_location)
            result.to_csv(args.dms_output, sep="\t", index=False)
            


if __name__ == "__main__":
    parser = create_parser()
    args = parser.parse_args()
    main(args)