# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import argparse
import pathlib
import string

import torch

from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, MSATransformer
import pandas as pd
from tqdm import tqdm
from Bio import SeqIO
import itertools
from typing import List, Tuple
import numpy as np
import scipy.stats as stats
import gc

@torch.no_grad()
def dist_model(model, gpus, half=True):
    assert gpus <= torch.cuda.device_count(), "gpus is greater than physics gpu"
    model.eval()
    if half:
        model.half()
    devices = [torch.device(f"cuda:{i}") for i in range(gpus)]
    model = model.to(devices[0])
    torch.cuda.empty_cache()
    # patch layers of the model to different devices
    layer_device = {}
    gpus = len(devices)
    for idx, layer in enumerate(model.layers):
        device = devices[idx % gpus]
        layer.to(device)
        layer_device[idx] = device
        torch.cuda.empty_cache()
    return model, devices, layer_device


# -------------------------------


# ------- re-implement forward method of the model
@torch.no_grad()
def dist_model_forward(model, tokens, devices, layer_device):
    assert tokens.ndim == 3
    batch_size, num_alignments, seqlen = tokens.size()
    padding_mask = tokens.eq(model.padding_idx)  # B, R, C
    if not padding_mask.any():
        padding_mask = None

    x = model.embed_tokens(tokens)
    x += model.embed_positions(tokens.view(batch_size * num_alignments, seqlen)).view(x.size())
    if model.msa_position_embedding is not None:
        if x.size(1) > 1024:
            raise RuntimeError(
                "Using model with MSA position embedding trained on maximum MSA "
                f"depth of 1024, but received {x.size(1)} alignments."
            )
        x += model.msa_position_embedding[:, :num_alignments]

    x = model.emb_layer_norm_before(x)

    x = model.dropout_module(x)

    if padding_mask is not None:
        x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))

    # B x R x C x D -> R x C x B x D
    x = x.permute(1, 2, 0, 3)

    for layer_idx, layer in enumerate(model.layers):
        x = x.to(layer_device[layer_idx])
        gc.collect()
        torch.cuda.empty_cache()
        x = layer(
            x,
            self_attn_padding_mask=padding_mask.to(layer_device[layer_idx]) if padding_mask is not None else None,
            need_head_weights=False,
        )
        gc.collect()
        torch.cuda.empty_cache()

    x = x.to(devices[0])
    x = model.emb_layer_norm_after(x)
    x = x.permute(2, 0, 1, 3)  # R x C x B x D -> B x R x C x D

    x = model.lm_head(x)

    result = {"logits": x}
    return result
    
def calculate_spearman_correlation(X, Y):
    return stats.spearmanr(X, Y)[0]

def calculate_spearman_correlation_p(X, Y):
    return stats.spearmanr(X, Y)[1]

def remove_insertions(sequence: str) -> str:
    """ Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
    # This is an efficient way to delete lowercase characters and insertion characters from a string
    deletekeys = dict.fromkeys(string.ascii_lowercase)
    deletekeys["."] = None
    deletekeys["*"] = None

    translation = str.maketrans(deletekeys)
    return sequence.translate(translation)


def read_msa(filename: str, nseq: int) -> List[Tuple[str, str]]:
    """ Reads the first nseq sequences from an MSA file, automatically removes insertions.
    
    The input file must be in a3m format (although we use the SeqIO fasta parser)
    for remove_insertions to work properly."""

    msa = [
        (record.description, remove_insertions(str(record.seq)))
        for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)
    ]
    return msa


def create_parser():
    parser = argparse.ArgumentParser(
        description="Label a deep mutational scan with predictions from an ensemble of ESM-1v models."  # noqa
    )

    # fmt: off
    parser.add_argument(
        "--model-location",
        type=str,
        help="PyTorch model file OR name of pretrained model to download (see README for models)",
        nargs="+",
    )
    parser.add_argument(
        "--sequence",
        type=str,
        help="Base sequence to which mutations were applied",
    )
    parser.add_argument(
        "--read",
        type=str,
        default=None,
        help="read fasta file",
    )
    parser.add_argument(
        "--dms-input",
        type=pathlib.Path,
        help="CSV file containing the deep mutational scan",
    )
    parser.add_argument(
        "--mutation-col",
        type=str,
        default="mutant",
        help="column in the deep mutational scan labeling the mutation as 'AiB'"
    )
    parser.add_argument(
        "--dms-output",
        type=pathlib.Path,
        help="Output file containing the deep mutational scan along with predictions",
    )
    parser.add_argument(
        "--offset-idx",
        type=int,
        default=0,
        help="Offset of the mutation positions in `--mutation-col`"
    )
    parser.add_argument(
        "--scoring-strategy",
        type=str,
        default="wt-marginals",
        choices=["wt-marginals", "pseudo-ppl", "masked-marginals"],
        help=""
    )
    parser.add_argument(
        "--msa-path",
        type=pathlib.Path,
        help="path to MSA in a3m format (required for MSA Transformer)"
    )
    parser.add_argument(
        "--msa-samples",
        type=int,
        default=400,
        help="number of sequences to select from the start of the MSA"
    )
    parser.add_argument(
        "--name",
        type=str,
        default='',
        help="number of sequences to select from the start of the MSA"
    )
    # fmt: on
    parser.add_argument("--nogpu", action="store_true", help="Do not use GPU even if available")
    return parser


def label_row(row, sequence, token_probs, alphabet, offset_idx):
    wt, idx, mt = row[0], int(row[1:-1]) - offset_idx-1, row[-1]
    #print(sequence[idx],wt)
    assert sequence[idx] == wt, f"The pos {idx} `{wt}` does not match the `{mt}`"

    wt_encoded, mt_encoded = alphabet.get_idx(wt), alphabet.get_idx(mt)

    # add 1 for BOS
    score = token_probs[0, 1 + idx, mt_encoded] - token_probs[0, 1 + idx, wt_encoded]
    return score.item()


def compute_pppl(row, sequence, model, alphabet, offset_idx):
    wt, idx, mt = row[0], int(row[1:-1]) - offset_idx, row[-1]
    assert sequence[idx] == wt, "The listed wildtype does not match the provided sequence"

    # modify the sequence
    sequence = sequence[:idx] + mt + sequence[(idx + 1) :]

    # encode the sequence
    data = [
        ("protein1", sequence),
    ]

    batch_converter = alphabet.get_batch_converter()

    batch_labels, batch_strs, batch_tokens = batch_converter(data)

    wt_encoded, mt_encoded = alphabet.get_idx(wt), alphabet.get_idx(mt)

    # compute probabilities at each position
    log_probs = []
    for i in range(1, len(sequence) - 1):
        batch_tokens_masked = batch_tokens.clone()
        batch_tokens_masked[0, i] = alphabet.mask_idx
        with torch.no_grad():
            token_probs = torch.log_softmax(model(batch_tokens_masked.cuda())["logits"], dim=-1)
        log_probs.append(token_probs[0, i, alphabet.get_idx(sequence[i])].item())  # vocab size
    return sum(log_probs)


def main(args):
    # Load the deep mutational scan
    df = pd.read_csv(args.dms_input)
    fas=pd.read_csv(args.read)
    fasl=np.array(fas)
    #print(fasl)
    small=['UBC9_HUMAN','RASH_HUMAN','TIM_SULSO','P84126_THETH','KKA2_KLEPN','BLAT_ECOLX','B3VI55_LIPST','AMIE_PSEAE']
    big=['MTH3_HAEAESTABILIZED','BG_STRSQ']
    fast=str()
    for i in fasl:
        #print(i)
        fast=fast+str(i).replace('[', '').replace(']', '').replace('\'', '')
    args.sequence=fast

    reals=[]
    zz=np.array(df)
        
    
    for z in zz:
        z = str(z).replace('[', '').replace(']', '').replace('\'', '').replace('\\', '').split('t')
        if ';'in z[0]:
            continue
        reals.append([z[0], float(z[1])])
    df=pd.DataFrame(reals,columns=['mutant','score'])
    
    # inference for each model
    for model_location in args.model_location:
        flag=0
        model, alphabet = pretrained.load_model_and_alphabet(model_location)
        model.eval()
        if torch.cuda.is_available() and not args.nogpu:
            if args.name in small:
              model = model.cuda()
            elif args.name in big:
              flag=1
              model = model.cuda()
              model,devices,layer_device=dist_model(model,gpus)
            print("Transferred model to GPU")

        batch_converter = alphabet.get_batch_converter()

        if isinstance(model, MSATransformer):
            data = [read_msa(args.msa_path, args.msa_samples)]
            assert (
                args.scoring_strategy == "masked-marginals"
            ), "MSA Transformer only supports masked marginal strategy"

            batch_labels, batch_strs, batch_tokens = batch_converter(data)

            all_token_probs = []
            for i in tqdm(range(batch_tokens.size(2))):
                batch_tokens_masked = batch_tokens.clone()
                batch_tokens_masked[0, 0, i] = alphabet.mask_idx  # mask out first sequence
                if flag==0:
                  with torch.no_grad():
                      token_probs = torch.log_softmax(
                          model(batch_tokens_masked.cuda())["logits"], dim=-1
                      )
                  all_token_probs.append(token_probs[:, 0, i])  # vocab size
                elif flag==1:
                  with torch.no_grad():
                    token_probs = torch.log_softmax(
                        #model(batch_tokens_masked.cuda())["logits"], dim=-1
                        dist_model_forward(model,batch_tokens_masked.cuda(),devices,layer_device)["logits"], dim=-1
                    )
                  all_token_probs.append(token_probs[:, 0, i])  # vocab size
            token_probs = torch.cat(all_token_probs, dim=0).unsqueeze(0)
            df[model_location] = df.apply(
                lambda row: label_row(
                    row[args.mutation_col], args.sequence, token_probs, alphabet, args.offset_idx
                ),
                axis=1,
            )

        else:
            data = [
                ("protein1", args.sequence),
            ]
            batch_labels, batch_strs, batch_tokens = batch_converter(data)

            if args.scoring_strategy == "wt-marginals":
                with torch.no_grad():
                    token_probs = torch.log_softmax(model(batch_tokens.cuda())["logits"], dim=-1)
                df[model_location] = df.apply(
                    lambda row: label_row(
                        row[args.mutation_col],
                        args.sequence,
                        token_probs,
                        alphabet,
                        args.offset_idx,
                    ),
                    axis=1,
                )
            elif args.scoring_strategy == "masked-marginals":
                all_token_probs = []
                for i in tqdm(range(batch_tokens.size(1))):
                    batch_tokens_masked = batch_tokens.clone()
                    batch_tokens_masked[0, i] = alphabet.mask_idx
                    with torch.no_grad():
                        token_probs = torch.log_softmax(
                            model(batch_tokens_masked.cuda())["logits"], dim=-1
                        )
                    all_token_probs.append(token_probs[:, i])  # vocab size
                token_probs = torch.cat(all_token_probs, dim=0).unsqueeze(0)
                df[model_location] = df.apply(
                    lambda row: label_row(
                        row[args.mutation_col],
                        args.sequence,
                        token_probs,
                        alphabet,
                        args.offset_idx,
                    ),
                    axis=1,
                )
            elif args.scoring_strategy == "pseudo-ppl":
                tqdm.pandas()
                df[model_location] = df.progress_apply(
                    lambda row: compute_pppl(
                        row[args.mutation_col], args.sequence, model, alphabet, args.offset_idx
                    ),
                    axis=1,
                )
    x = df['score']
    y = df['esm_msa1b_t12_100M_UR50S']
    print('DP Spearman Correlation_P:'+str(calculate_spearman_correlation_p(x, y)))
    print('DP Spearman Correlation:'+str(calculate_spearman_correlation(x, y)))

    df.to_csv(args.dms_output)


if __name__ == "__main__":
    parser = create_parser()
    args = parser.parse_args()
    main(args)