import argparse
import re
import os
import torch
import sys
sys.path.append(os.getcwd())
sys.path.append("..")
import transformers
import json
import warnings
import pandas as pd
import numpy as np
from tqdm import tqdm
from scipy.stats import pearsonr
from transformers import EsmForMaskedLM, EsmTokenizer
from src.utils import read_fasta, full_sequence, scan_max_mutant, mutant_filter

transformers.logging.set_verbosity_error()
warnings.filterwarnings("ignore")

def label_row(rows, sequence, token_probs, tokenizer, offset_idx, model):
    s = []
    sep = ";"
    if ":" in rows:
        sep = ":"
    for row in rows.split(sep):
        if row.lower() == "wt":
            s.append(0)
            continue
        wt, idx, mt = row[0], int(row[1:-1]) - offset_idx, row[-1]
        assert sequence[idx] == wt, f"The pos {idx} `{wt}` does not match the `{mt}`"
        wt_encoded = tokenizer.convert_tokens_to_ids(wt)
        mt_encoded = tokenizer.convert_tokens_to_ids(mt)
        # print(f"wt: {wt}, mt: {mt}, idx: {idx}, wt_encoded: {wt_encoded}, mt_encoded: {mt_encoded}")
        # add 1 for BOS
        score = token_probs[0, idx, mt_encoded] - token_probs[0, idx, wt_encoded]
        score = score.item()
        s.append(score)
    return sum(s)


def predict(args, model_location, model, tokenizer, mask_id):
    df = pd.read_csv(args.dms_input)
    offset = 1
    
    seq = args.sequence
    # prepare your protein sequences as a list
    sequence_examples = [seq]
    # this will replace all rare/ambiguous amino acids by X and introduce white-space between all amino acids
    sequence_examples = [" ".join(list(re.sub(r"[UZOB]", "X", sequence))) for sequence in sequence_examples]
    
    # tokenize sequences and pad up to the longest sequence in the batch
    ids = tokenizer.batch_encode_plus(sequence_examples, add_special_tokens=True, padding="longest")
    input_ids = torch.tensor(ids['input_ids']).to(device)
    attention_mask = torch.tensor(ids['attention_mask']).to(device)
    
    
    bos_offset = 1
    all_token_probs = []
    for i in tqdm(range(len(seq))):
        input_ids_masked = input_ids.clone()
        input_ids_masked[0, i + bos_offset] = mask_id
            
        with torch.no_grad():
            outputs = model(input_ids=input_ids_masked, attention_mask=attention_mask)
            # extract embeddings for the first ([0,:]) sequence in the batch while removing padded & special tokens
            token_probs = outputs.logits[0,bos_offset:len(seq) + bos_offset]
            token_probs = torch.log_softmax(token_probs, dim=-1).unsqueeze(0) # 1 * seq_len * vocab_size
        all_token_probs.append(token_probs[:, i])  # vocab size
    token_probs = torch.cat(all_token_probs, dim=0).unsqueeze(0)
    
    # label the dataframe
    df[model_location] = df.apply(
        lambda row: label_row(  
            row[args.mutation_col], 
            args.sequence,
            token_probs, tokenizer, 
            offset, model_location
        ),
        axis=1,
    )
                
    return df

def create_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset_input", type=str, default=None, help="input dms dataset directory")
    parser.add_argument("--model_location", type=str, nargs="+", help="model location")
    parser.add_argument("--dms_input", type=str, default=None, help="input dms file")
    parser.add_argument("--mutation_col", type=str, default="mutant", help="mutation column name")
    parser.add_argument("--sequence", type=str, default=None, help="sequence")
    args = parser.parse_args()
    return args

if __name__ == "__main__":
    protein_names = []
    exclude_files_or_dirs = ["file.txt", ".ipynb_checkpoints"]
    args = create_parser()
    
    for model_locate in args.model_location:
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        tokenizer = EsmTokenizer.from_pretrained(f"{model_locate}", do_lower_case=False, return_tensors="pt")
        model = EsmForMaskedLM.from_pretrained(f"{model_locate}").to(device)
        mask_id = tokenizer.convert_tokens_to_ids("[MASK]")
        print(f"Transfer {model_locate} to GPU!")
        spearmans = []
        # evalute whole dataset
        if args.dataset_input:
            # get protein names in dataset path
            protein_names = sorted(os.listdir(args.dataset_input))
            if protein_names == []:
                raise ValueError("No protein found in dataset input path!")
            
            # remove files or dirs
            for item in exclude_files_or_dirs:
                if item in protein_names:
                    protein_names.remove(item)

            # loop over proteins
            for idx, name in enumerate(protein_names):
                print("-"*60)
                print(f"Processing 【{name}】. Current {idx+1}/{len(protein_names)}...")
                
                # target protein directory
                cur_dir = os.path.join(args.dataset_input, name)
                # get sequence
                fasta_path = os.path.join(cur_dir, name+".fasta")
                if os.path.exists(fasta_path):
                    args.sequence = read_fasta(fasta_path, "seq")
                
                # fill information
                mutation_info = {"name": name, "seq": args.sequence}
                curr_seq_len = len(args.sequence)
                mutation_info["lenth"] = curr_seq_len
                # save info to json
                info_path = os.path.join(cur_dir, f"{name}-info.json")
                with open(info_path, "w", encoding='utf-8') as f:
                    f.write(json.dumps(mutation_info)) 
                
                # create prediction directory
                prediction_path = os.path.join(cur_dir, "predictions")
                if not os.path.exists(prediction_path):
                    os.mkdir(prediction_path)
                
                # predict
                mutant_file = os.path.join(cur_dir, f"{name}.csv")
                args.dms_input = mutant_file

                model_name = model_locate.split("/")[-1]
                out_path = os.path.join(prediction_path, f"{name}.{model_name}.csv")
                print(out_path)
                result = pd.read_csv(mutant_file)    
                result["score"] = predict(args, model_locate, model, tokenizer, mask_id)[model_locate]
                result.to_csv(out_path, index=False)
                spearmans.append(pearsonr(result["score"], result["DMS_score"])[0])
                print(f"【{args.dms_input}】 Done! spearman: {spearmans[-1]}")
        print(f"mean spearmans: {np.mean(spearmans)}")