import os
import argparse
import json
import pandas as pd
import numpy as np
import torch

from transformers import PreTrainedTokenizerFast
import tranception
from tranception import config, model_pytorch
import scipy.stats as stats
dir_path = os.path.dirname(os.path.abspath(__file__))

def calculate_spearman_correlation(X, Y):
    return stats.spearmanr(X, Y)[0]

def calculate_spearman_correlation_p(X, Y):
    return stats.spearmanr(X, Y)[1]

def full_sequence(origin_sequence, raw_mutant, offset):
    # if the mutant is in the format of "A123V;A124V"
    sep = ";"
    # if the mutant is in the format of "A123V:A124V"
    if ":" in raw_mutant:
        sep = ":"
        
    sequence = origin_sequence
    for raw_mut in raw_mutant.split(sep):
        # if the mutant is "WT"
        if raw_mut.lower() == "wt":
            continue
        to = raw_mut[-1]
        pos = int(raw_mut[1:-1]) - offset
        if sequence[pos]==raw_mut[0].lower():
            ww=list(raw_mut)
            ww[0]=ww[0].lower()
            ww[-1]=ww[-1].lower()
            #to=to.lower()
            raw_mut=str(ww).replace('[','').replace(']','').replace(',','').replace('\'','')

        assert sequence[pos] == raw_mut[0], print(sequence[pos], raw_mut[0], pos)
        sequence = sequence[:pos] + to + sequence[pos + 1:]
        #print(to,pos,sequence)
    return sequence
    
def main():
    """
    Main script to score sets of mutated protein sequences (substitutions or indels) with Tranception.
    """
    proteus_names = ['WW', 'GFP']
    for proteus_name in proteus_names:
        print(proteus_name)
        parser = argparse.ArgumentParser(description='Tranception scoring')
        checkpoint='/home/cloudam/wyx/Tranception-main/Tranception_Small'
        model_framework='pytorch'
        batch_size_inference=32
        DMS_reference_file_path=None
        DMS_index=0
        target_seq=None
        DMS_file_name=proteus_name+'.tsv'
        MSA_filename=None
        MSA_weight_file_name=None
        MSA_start=1
        MSA_end=None
        DMS_data_folder='/home/cloudam/wyx/Tranception-main/exp/'+proteus_name+'.tsv'
        output_scores_folder='/home/cloudam/wyx/Tranception-main/score/'+proteus_name+'_M'
        MSA_folder = '/home/cloudam/wyx/Tranception-main/data/a2m_M/'+proteus_name+'_M.a2m'
        clustal_omega_location = '/home/cloudam/wyx/Tranception-main/omega/bin/clustalo'

        parser.add_argument('--deactivate_scoring_mirror', action='store_true', help='Whether to deactivate sequence scoring from both directions (Left->Right and Right->Left)')
        parser.add_argument('--indel_mode', action='store_true', help='Flag to be used when scoring insertions and deletions. Otherwise assumes substitutions')
        parser.add_argument('--scoring_window', default="optimal", type=str, help='Sequence window selection mode (when sequence length longer than model context size)')
        parser.add_argument('--num_workers', default=2, type=int, help='Number of workers for model scoring data loader')
        parser.add_argument('--inference_time_retrieval', action='store_true', help='Whether to perform inference-time retrieval')
        parser.add_argument('--retrieval_inference_weight', default=0.6, type=float, help='Coefficient (alpha) used when aggregating autoregressive transformer and retrieval')
        parser.add_argument('--MSA_weights_folder', default=None, type=str, help='Path to MSA weights for neighborhood scoring')


        args = parser.parse_args()

        model_name = str(checkpoint).split("/")[-1]
        #print(model_name)

        tokenizer = PreTrainedTokenizerFast(tokenizer_file=dir_path+os.sep+"tranception/utils/tokenizers/Basic_tokenizer",
                                                    unk_token="[UNK]",
                                                    sep_token="[SEP]",
                                                    pad_token="[PAD]",
                                                    cls_token="[CLS]",
                                                    mask_token="[MASK]"
                                                )

        if DMS_reference_file_path:
            '''
            mapping_protein_seq_DMS = pd.read_csv(args.DMS_reference_file_path)
            list_DMS = mapping_protein_seq_DMS["DMS_id"]
            DMS_id=list_DMS[args.DMS_index]
            print("Compute scores for DMS: "+str(DMS_id))
            target_seq = mapping_protein_seq_DMS["target_seq"][mapping_protein_seq_DMS["DMS_id"]==DMS_id].values[0].upper()
            DMS_file_name = mapping_protein_seq_DMS["DMS_filename"][mapping_protein_seq_DMS["DMS_id"]==DMS_id].values[0]
            if args.inference_time_retrieval:
                MSA_data_file = args.MSA_folder if args.MSA_folder is not None else None
                MSA_weight_file_name = args.MSA_weights_folder + os.sep + mapping_protein_seq_DMS["weight_file_name"][mapping_protein_seq_DMS["DMS_id"]==DMS_id].values[0] if args.MSA_weights_folder else None
                MSA_start = int(mapping_protein_seq_DMS["MSA_start"][mapping_protein_seq_DMS["DMS_id"]==DMS_id].values[0]) - 1 # MSA_start typically based on 1-indexing
                MSA_end = int(mapping_protein_seq_DMS["MSA_end"][mapping_protein_seq_DMS["DMS_id"]==DMS_id].values[0])
                '''
        else:
            f1=open(str(MSA_folder),'r')
            stmp=f1.readlines()[1:]
            s1=str()
            for si in stmp:
                #print(si)
                if si[0]=='>':
                    break
                s1=s1+si[:-1]
            print("Target Seq:")
            print(s1)
            target_seq=s1

            DMS_file_name=DMS_file_name
            DMS_id = str(DMS_file_name).split(".")[0]
            if args.inference_time_retrieval:
                MSA_data_file = MSA_folder
                MSA_weight_file_name = str(args.MSA_weights_folder) + str(os.sep) + str(args.MSA_weight_file_name) if args.MSA_weights_folder is not None else None
                MSA_start = int(str(MSA_start)) - 1 # MSA_start based on 1-indexing
                MSA_end = len(s1)

        config = json.load(open(str(checkpoint)+str(os.sep)+'config.json'))
        config = tranception.config.TranceptionConfig(**config)
        config.attention_mode="tranception"
        config.position_embedding="grouped_alibi"
        config.tokenizer = tokenizer
        config.scoring_window = args.scoring_window

        if args.inference_time_retrieval:
            config.retrieval_aggregation_mode = "aggregate_indel" if args.indel_mode else "aggregate_substitution"
            config.MSA_filename=MSA_data_file
            config.full_protein_length=len(target_seq)
            config.MSA_weight_file_name=MSA_weight_file_name
            config.retrieval_inference_weight=args.retrieval_inference_weight
            config.MSA_start = MSA_start
            config.MSA_end = MSA_end
            if args.indel_mode:
                config.clustal_omega_location = clustal_omega_location
                print(A)
        else:
            config.retrieval_aggregation_mode = None
        #print(config)
        pn=str(checkpoint)+str(os.sep)+'config.json'
        if model_framework=="pytorch":
            model = tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=checkpoint,
                                                                         config=config)
            if torch.cuda.is_available():
                model.cuda()
        model.eval()

        if not os.path.isdir(output_scores_folder):
            os.mkdir(output_scores_folder)
        retrieval_type = '_retrieval_' + str(args.retrieval_inference_weight) if args.inference_time_retrieval else '_no_retrieval'
        mutation_type = '_indels' if args.indel_mode else '_substitutions'
        mirror_type = '_no_mirror' if args.deactivate_scoring_mirror else ''
        scoring_filename = output_scores_folder + os.sep + model_name + retrieval_type + mirror_type + mutation_type
        if not os.path.isdir(scoring_filename):
            os.mkdir(scoring_filename)
        scoring_filename += os.sep + DMS_id + '.csv'

        #DMS_data = pd.read_csv(str(args.DMS_data_folder) , low_memory=False)
        DMS_data=pd.read_table(str(DMS_data_folder))

        DMS_data["mutated_sequence"] = DMS_data.apply(lambda row: full_sequence(target_seq, row["mutant"], 1), axis=1)
        #print(DMS_data)

        all_scores = model.score_mutants(
                                        DMS_data=DMS_data,
                                        target_seq=target_seq,
                                        scoring_mirror=not args.deactivate_scoring_mirror,
                                        batch_size_inference=batch_size_inference,
                                        num_workers=args.num_workers,
                                        indel_mode=args.indel_mode
                                        )
        #print(DMS_data)
        all_scores.to_csv(scoring_filename, index=False)

        a=pd.merge(all_scores, DMS_data[['mutated_sequence','score']], on='mutated_sequence', how='left', suffixes=('', 'score'))
        a=a.drop_duplicates('mutated_sequence',keep='first')
        mask=a['avg_score']!=0
        b=a.loc[mask,['mutated_sequence','avg_score','score']]
        print(b)

        x_score=b['score']
        y_score=b['avg_score']
        sp_p = calculate_spearman_correlation_p(x_score, y_score)
        sp = calculate_spearman_correlation(x_score, y_score)
        print(sp,sp_p)
        fffff=open('/home/cloudam/wyx/Tranception-main/score/'+proteus_name+'_M.txt','w+')
        fffff.writelines([proteus_name,'    ',str(sp),'   ',str(sp_p)])
        #fffff.write(str(sp))
        #fffff.write(str(sp_p))
        fffff.close()

if __name__ == '__main__':
    main()