import argparse
import re
import os
import torch
import sys
sys.path.append(os.getcwd())
sys.path.append("..")
import transformers
import json
import warnings
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers import T5Tokenizer, T5EncoderModel, T5Model,T5ForConditionalGeneration
from transformers import BertForMaskedLM, BertTokenizer
from src.utils import read_fasta, full_sequence, scan_max_mutant, mutant_filter

transformers.logging.set_verbosity_error()
warnings.filterwarnings("ignore")

def label_row(rows, sequence, token_probs, tokenizer, offset_idx, model):
    s = []
    sep = ";"
    if ":" in rows:
        sep = ":"
    for row in rows.split(sep):
        if row.lower() == "wt":
            s.append(0)
            continue
        wt, idx, mt = row[0], int(row[1:-1]) - offset_idx, row[-1]
        assert sequence[idx] == wt, f"The pos {idx} `{wt}` does not match the `{mt}`"
        if "t5" in model:
            wt_encoded = tokenizer.batch_encode_plus([wt])["input_ids"][0][0]
            mt_encoded = tokenizer.batch_encode_plus([mt])["input_ids"][0][0]
        elif "bert" in model:
            wt_encoded = tokenizer.convert_tokens_to_ids(wt)
            mt_encoded = tokenizer.convert_tokens_to_ids(mt)
        # print(f"wt: {wt}, mt: {mt}, idx: {idx}, wt_encoded: {wt_encoded}, mt_encoded: {mt_encoded}")
        # add 1 for BOS
        score = token_probs[0, idx, mt_encoded] - token_probs[0, idx, wt_encoded]
        score = score.item()
        s.append(score)
    return sum(s) / len(s)


def predict(args, model_location, model, tokenizer, mask_id):
    df = pd.read_table(args.dms_input)
    offset = 1
    
    seq = args.sequence
    # prepare your protein sequences as a list
    sequence_examples = [seq]
    # this will replace all rare/ambiguous amino acids by X and introduce white-space between all amino acids
    sequence_examples = [" ".join(list(re.sub(r"[UZOB]", "X", sequence))) for sequence in sequence_examples]
    
    # tokenize sequences and pad up to the longest sequence in the batch
    ids = tokenizer.batch_encode_plus(sequence_examples, add_special_tokens=True, padding="longest")
    input_ids = torch.tensor(ids['input_ids']).to(device)
    attention_mask = torch.tensor(ids['attention_mask']).to(device)
    
    
    bos_offset = 0 if "t5" in model_location else 1
    all_token_probs = []
    for i in tqdm(range(len(seq))):
        input_ids_masked = input_ids.clone()
        input_ids_masked[0, i + bos_offset] = mask_id
            
        with torch.no_grad():
            if "t5" in model_location:
                outputs = model(input_ids=input_ids_masked, labels=input_ids, attention_mask=attention_mask)
                token_probs = outputs.logits[0,:len(seq)]
            elif "bert" in model_location:
                outputs = model(input_ids=input_ids_masked, attention_mask=attention_mask)
            # extract embeddings for the first ([0,:]) sequence in the batch while removing padded & special tokens
            token_probs = outputs.logits[0,bos_offset:len(seq) + bos_offset]
            token_probs = torch.log_softmax(token_probs, dim=-1).unsqueeze(0) # 1 * seq_len * vocab_size
        all_token_probs.append(token_probs[:, i])  # vocab size
    token_probs = torch.cat(all_token_probs, dim=0).unsqueeze(0)
    print(f"token_probs: {token_probs.shape}")
    
    # label the dataframe
    df[model_location] = df.apply(
        lambda row: label_row(  
            row[args.mutation_col], 
            args.sequence,
            token_probs, tokenizer, 
            offset, model_location
        ),
        axis=1,
    )
                
    return df

def create_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset_input", type=str, default=None, help="input dms dataset directory")
    parser.add_argument("--model_location", type=str, nargs="+", help="model location")
    parser.add_argument("--dms_input", type=str, default=None, help="input dms file")
    parser.add_argument("--mutation_col", type=str, default="mutant", help="mutation column name")
    parser.add_argument("--sequence", type=str, default=None, help="sequence")
    args = parser.parse_args()
    return args

if __name__ == "__main__":
    protein_names = []
    exclude_files_or_dirs = ["file.txt", ".ipynb_checkpoints"]
    args = create_parser()
    
    for model_locate in args.model_location:
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    
        print(f"Transfer {model_locate} to GPU!")
        if "t5" in model_locate:
            # max lenth 512
            # Load the tokenizer
            tokenizer = T5Tokenizer.from_pretrained(f'Rostlab/{model_locate}', do_lower_case=False, return_tensors="pt")
            # Load the model
            model = T5ForConditionalGeneration.from_pretrained(f'Rostlab/{model_locate}').to(device)
            # only GPUs support half-precision currently; if you want to run on CPU use full-precision (not recommended, much slower)
            model.full() if device =='cpu' else model.half()
            mask_id = tokenizer.convert_tokens_to_ids("<extra_id_0>")
            
        elif "bert" in model_locate:
            # max lenth 512
            tokenizer = BertTokenizer.from_pretrained(f"Rostlab/{model_locate}", do_lower_case=False, return_tensors="pt")
            model = BertForMaskedLM.from_pretrained(f"Rostlab/{model_locate}").to(device)
            mask_id = tokenizer.convert_tokens_to_ids("[MASK]")
        
        
        # evalute whole dataset
        if args.dataset_input:
            # get protein names in dataset path
            protein_names = sorted(os.listdir(args.dataset_input))
            if protein_names == []:
                raise ValueError("No protein found in dataset input path!")
            
            # remove files or dirs
            for item in exclude_files_or_dirs:
                if item in protein_names:
                    protein_names.remove(item)

            # loop over proteins
            for idx, name in enumerate(protein_names):
                print("-"*60)
                print(f"Processing 【{name}】. Current {idx+1}/{len(protein_names)}...")
                
                # target protein directory
                cur_dir = os.path.join(args.dataset_input, name)
                # get sequence
                fasta_path = os.path.join(cur_dir, name+".fasta")
                if os.path.exists(fasta_path):
                    args.sequence = read_fasta(fasta_path, "seq")
                
                # fill information
                mutation_info = {"name": name, "seq": args.sequence}
                curr_seq_len = len(args.sequence)
                mutation_info["lenth"] = curr_seq_len
                # save info to json
                info_path = os.path.join(cur_dir, f"{name}-info.json")
                with open(info_path, "w", encoding='utf-8') as f:
                    f.write(json.dumps(mutation_info)) 
                
                # all mutation file: [xxx.tsv]
                mutant_files = os.listdir(os.path.join(cur_dir, "experiments"))
                for item in exclude_files_or_dirs:
                    if item in mutant_files:
                        mutant_files.remove(item)
                if mutant_files == []:
                    print(f"Experiment tsv not exists!")
                    continue
                
                
                # create prediction directory
                prediction_path = os.path.join(cur_dir, "predictions")
                if not os.path.exists(prediction_path):
                    os.mkdir(prediction_path)
                
                # predict
                for file in mutant_files:
                    mutant_file = os.path.join(cur_dir, "experiments", file)
                    args.dms_input = mutant_file

                    # loop input models
                    out_path = os.path.join(prediction_path, f"{file[:-4]}.{model_locate}.tsv")
                    result = pd.read_table(mutant_file)
                    # skip if already exists
                    if os.path.exists(out_path):
                        print(f"{out_path} exists, skipping!")
                        continue
                    result["score"] = predict(args, model_locate, model, tokenizer, mask_id)[model_locate]
                    result.to_csv(out_path, sep="\t", index=False)
                    
                    print(f"【{args.dms_input}】 Done!")