import os
import sys
sys.path.append(os.getcwd())
sys.path.append("..")
import argparse
import tqdm 

from transformers import AutoTokenizer, AutoConfig

from scipy.stats import spearmanr
import numpy as np
import pandas as pd

import torch
from torch.nn import CrossEntropyLoss
from rita.rita_modeling import RITAModelForCausalLM
from src.utils import read_fasta

def calc_fitness(model, prots, tokenizer, device='cuda:0', model_context_len=1023):
    loss_list = []
    loss_fn = CrossEntropyLoss()
    with torch.no_grad():
        for prot in tqdm.tqdm(prots):
            loss_val = 0
            
            sequence_chunks=[]
            if len(prot) < model_context_len:
                sequence_chunks = [prot]
            else:
                len_target_seq = len(prot)
                num_windows = 1 + int( len_target_seq / model_context_len)
                start=0
                for window_index in range(1, num_windows+1):
                    sequence_chunks.append(prot[start:start+model_context_len])
                    start += model_context_len
            
            for chunk in sequence_chunks:
                for p in [chunk, chunk[::-1]]:
                    ids = torch.tensor([tokenizer.encode(p)]).to(device)
                    input_ids = ids[:, :-1]
                    targets   = ids[:, 1:]
                    
                    logits=model(input_ids).logits
                    loss = loss_fn(target=targets.view(-1), input=logits.view(-1,logits.size(-1)))
                    loss_val += -loss.item()
                
            loss_list += [loss_val]
    return np.array(loss_list)

def get_mutated_sequence(focus_seq, mutant, start_idx=1, AA_vocab="ACDEFGHIKLMNPQRSTVWY"):
    """
    Helper function that mutates an input sequence (focus_seq) via an input mutation triplet (substitutions only).
    Mutation triplet are typically based on 1-indexing: start_idx is used for switching to 0-indexing.
    """
    mutated_seq = list(focus_seq)
    sep = ":"
    if ";" in mutant:
        sep = ";"
    for mutation in mutant.split(sep):
        try:
            from_AA, position, to_AA = mutation[0], int(mutation[1:-1]), mutation[-1]
        except:
            print("Issue with mutant: "+str(mutation))
        relative_position = position - start_idx
        assert (from_AA==focus_seq[relative_position]), "Invalid from_AA or mutant position: "+str(mutation)+" from_AA: "+str(from_AA) + " relative pos: "+str(relative_position) + " focus_seq: "+str(focus_seq)
        assert (to_AA in AA_vocab) , "Mutant to_AA is invalid: "+str(mutation)
        mutated_seq[relative_position] = to_AA
    return "".join(mutated_seq)

def main():
    """
    Main script to score sets of mutated protein sequences (substitutions or indels) with RITA models.
    """
    parser = argparse.ArgumentParser(description='RITA scoring')
    parser.add_argument('--model_location', default="lightonai/RITA_s", type=str, help='Name of or path to RITA model', nargs='+')
    parser.add_argument('--dataset_input', default=None, type=str, help='Name of dataset to score')
    parser.add_argument('--target_seq', default=None, type=str, help='Target sequence to score')
    parser.add_argument('--dms_input', default=None, type=str, help='Name of DMS dataset to score')
    args = parser.parse_args()

    
    for model_loc in args.model_location:
        tokenizer = AutoTokenizer.from_pretrained(model_loc)
        model = RITAModelForCausalLM.from_pretrained(model_loc, trust_remote_code=True)
        model.cuda()
        if "/" in model_loc:
            model_name = model_loc.split("/")[-1]
        print("-"*60)
        print(f"Computing scores with RITA: {model_name}")
        
        if args.dataset_input is not None:
            proteins = sorted(os.listdir(args.dataset_input))
            for protein in proteins:        
                print(f"Processing {model_name} on【{protein}】. Current {proteins.index(protein)+1}/{len(proteins)}...")
                out_path = os.path.join(args.dataset_input, protein, "predictions", f"{protein}.{model_name}.tsv")
                if os.path.exists(out_path):
                    print(f"{out_path} exists, skipping!")
                    continue
                args.target_seq = read_fasta(os.path.join(args.dataset_input, protein, f"{protein}.fasta"), "seq")
                mutant_file = os.path.join(args.dataset_input, protein, "experiments", f"{protein}.tsv")
                mutant_df = pd.read_table(mutant_file)
                mutant_seqs = []
                for mutant in mutant_df["mutant"]:
                    mutant_seqs.append(get_mutated_sequence(args.target_seq, mutant))
                model_scores = calc_fitness(model=model, prots=mutant_seqs, tokenizer=tokenizer)
                assert len(model_scores) == len(mutant_df), f"Length of scores ({len(model_scores)}) does not match length of mutants ({len(mutant_df)})"
                mutant_df["score"] = model_scores
                os.makedirs(os.path.join(args.dataset_input, protein, "predictions"), exist_ok=True)
                mutant_df.to_csv(out_path, sep="\t", index=False)
                print(f"Saved {out_path}")
        else:
            assert args.target_seq is not None, "Must provide either a dataset input or a target sequence"
            if args.target_seq.endswith(".fasta"):
                args.target_seq = read_fasta(args.target_seq, "seq")
            mutant_df = pd.read_table(args.dms_input)
            mutant_seqs = []
            for mutant in mutant_df["mutant"]:
                mutant_seqs.append(get_mutated_sequence(args.target_seq, mutant))
            model_scores = calc_fitness(model=model, prots=mutant_seqs, tokenizer=tokenizer)
            assert len(model_scores) == len(mutant_df), f"Length of scores ({len(model_scores)}) does not match length of mutants ({len(mutant_df)})"
            mutant_df[f"{model_name}_score"] = model_scores
            mutant_df.to_csv(args.dms_input, sep="\t", index=False)

if __name__ == '__main__':
    main()