import os
import argparse
import json
import pandas as pd
import sys
sys.path.append(os.getcwd())
sys.path.append("..")
import torch
import warnings
import tranception
from transformers import PreTrainedTokenizerFast, logging
from tranception import model_pytorch
from src.utils import read_fasta, full_sequence

warnings.filterwarnings("ignore")
logging.set_verbosity_error()

dir_path = os.path.dirname(os.path.abspath(__file__))




def create_parser():
    parser = argparse.ArgumentParser(description='Tranception scoring')
    parser.add_argument("--dataset_input", type=str, default=None, help="input dms dataset directory")
    parser.add_argument('--model_dir', type=str, help='Path of Tranception model checkpoint')
    parser.add_argument('--model_locate', type=str, nargs="+", help='Path of Tranception model checkpoint')
    parser.add_argument('--batch_size_inference', default=20, type=int, help='Batch size for inference')

    #Fields to be passed manually if reference file is not used
    parser.add_argument('--target_seq', default=None, type=str, help='Full wild type sequence that is mutated in the DMS asssay')
    parser.add_argument('--fasta_file', default=None, type=str, help='Name of fasta file with full wild type sequence that is mutated in the DMS asssay')
    parser.add_argument('--DMS_file', default=None, type=str, help='Name of DMS assay file')
    parser.add_argument('--MSA_file', default=None, type=str, help='Name of MSA (eg., a2m) file constructed on the wild type sequence')
    parser.add_argument('--MSA_weight_file', default=None, type=str, help='Weight of sequences in the MSA (optional)')
    parser.add_argument('--MSA_start', default=None, type=int, help='Sequence position that the MSA starts at (1-indexing)')
    parser.add_argument('--MSA_end', default=None, type=int, help='Sequence position that the MSA ends at (1-indexing)')

    parser.add_argument('--deactivate_scoring_mirror', action='store_true', help='Whether to deactivate sequence scoring from both directions (Left->Right and Right->Left)')
    parser.add_argument('--indel_mode', action='store_true', help='Flag to be used when scoring insertions and deletions. Otherwise assumes substitutions')
    parser.add_argument('--scoring_window', default="optimal", type=str, help='Sequence window selection mode (when sequence length longer than model context size)')
    parser.add_argument('--num_workers', default=10, type=int, help='Number of workers for model scoring data loader')
    parser.add_argument('--inference_time_retrieval', action='store_true', help='Whether to perform inference-time retrieval')
    parser.add_argument('--retrieval_inference_weight', default=0.6, type=float, help='Coefficient (alpha) used when aggregating autoregressive transformer and retrieval')
    parser.add_argument('--clustal_omega_location', default=None, type=str, help='Path to Clustal Omega (only needed with scoring indels with retrieval)')
    args = parser.parse_args()
    return args


def main():
    """
    Main script to score sets of mutated protein sequences (substitutions or indels) with Tranception.
    """
    args = create_parser()
        
    tokenizer = PreTrainedTokenizerFast(tokenizer_file=dir_path+os.sep+"tranception/utils/tokenizers/Basic_tokenizer",
                                                unk_token="[UNK]",
                                                sep_token="[SEP]",
                                                pad_token="[PAD]",
                                                cls_token="[CLS]",
                                                mask_token="[MASK]"
                                            )

    retrieval_type = '_retrieval_' + str(args.retrieval_inference_weight) if args.inference_time_retrieval else '_no_retrieval'
    mutation_type = '_indels' if args.indel_mode else '_substitutions'
    mirror_type = '_no_mirror' if args.deactivate_scoring_mirror else ''
    
    exclude_files_or_dirs = ["file.txt", ".ipynb_checkpoints"]
    
    for model_name in args.model_locate:
        # evalute whole dataset
        if args.dataset_input:
            # load config
            config = json.load(open(os.path.join(args.model_dir, model_name, 'config.json')))
            config = tranception.config.TranceptionConfig(**config)
            config.attention_mode="tranception"
            config.position_embedding="grouped_alibi"
            config.tokenizer = tokenizer
            config.scoring_window = args.scoring_window
            
            # load model
            model = model_pytorch.TranceptionLMHeadModel.from_pretrained(
                pretrained_model_name_or_path=os.path.join(args.model_dir, model_name),
                config=config
                )
            
            if torch.cuda.is_available():
                model.cuda()
                print(f"Transfer {model_name} to GPU!")
            model.eval()
            
            # get protein names in dataset path
            protein_names = sorted(os.listdir(args.dataset_input))
            if protein_names == []:
                raise ValueError("No protein found in dataset input path!")
            

            # loop over proteins
            for idx, name in enumerate(protein_names):
                print("-"*60)
                print(f"Processing 【{name}】. Current {idx+1}/{len(protein_names)}...")
                
                # target protein directory
                cur_dir = os.path.join(args.dataset_input, name)
                # get sequence
                fasta_path = os.path.join(cur_dir, name+".fasta")
                if os.path.exists(fasta_path):
                    args.target_seq = read_fasta(fasta_path, "seq")
                # get msa
                msa_path = os.path.join(cur_dir, name+".a3m")
                if os.path.exists(msa_path):
                    args.msa_path = os.path.join(cur_dir, name+".a3m")
                # get pdb
                pdb_path = os.path.join(cur_dir, name+".pdb")
                if not os.path.exists(pdb_path):
                    print(f"{pdb_path} not exists!")
                    continue
                args.pdb_path = pdb_path

                # all mutation file: [xxx.tsv]
                mutant_files = os.listdir(os.path.join(cur_dir, "experiments"))
                for item in exclude_files_or_dirs:
                    if item in mutant_files:
                        mutant_files.remove(item)
                if mutant_files == []:
                    print(f"Experiment tsv not exists!")
                    continue
                
                # create prediction directory
                prediction_path = os.path.join(cur_dir, "predictions")
                if not os.path.exists(prediction_path):
                    os.mkdir(prediction_path)
                
                # predict
                for file in mutant_files:
                    mutant_file = os.path.join(cur_dir, "experiments", file)
                    args.DMS_file = mutant_file
                    
                    out_path = os.path.join(prediction_path, f"{file[:-4]}.{model_name}.tsv")
                    # skip if already exists
                    if os.path.exists(out_path):
                        print(f"{out_path} exists, skipping!")
                        continue
                    
                    result = pd.read_table(mutant_file)
                    DMS_data = pd.read_table(args.DMS_file)
                    DMS_data["mutated_sequence"] = DMS_data.apply(lambda row: full_sequence(args.target_seq, row["mutant"], 1), axis=1)
                    
                    all_scores = model.score_mutants(
                        DMS_data=DMS_data, 
                        target_seq=args.target_seq, 
                        scoring_mirror=not args.deactivate_scoring_mirror, 
                        batch_size_inference=args.batch_size_inference,  
                        num_workers=args.num_workers, 
                        indel_mode=args.indel_mode
                        )
                    
                    DMS_score = pd.merge(DMS_data, all_scores, on="mutated_sequence")
                    result["score"] = DMS_score["avg_score"]
                    result.to_csv(out_path, sep="\t", index=False)
                
                    print(f"【{args.DMS_file}】 Done!")
        else:
            if args.target_seq is None:
                assert args.fasta_file is not None, "Either target_seq or fasta_file must be provided!"
                args.target_seq = read_fasta(args.fasta_file, "seq")

            # load config
            config = json.load(open(os.path.join(args.model_dir, model_name, 'config.json')))
            config = tranception.config.TranceptionConfig(**config)
            config.attention_mode="tranception"
            config.position_embedding="grouped_alibi"
            config.tokenizer = tokenizer
            config.scoring_window = args.scoring_window
            
            if args.inference_time_retrieval:
                MSA_start = args.MSA_start - 1 # MSA_start based on 1-indexing
                MSA_end = args.MSA_end
                config.retrieval_aggregation_mode = "aggregate_indel" if args.indel_mode else "aggregate_substitution"
                config.MSA_filename = args.MSA_file if args.MSA_file is not None else None
                config.full_protein_length=len(args.target_seq)
                config.MSA_weight_file_name=args.MSA_weight_file if args.MSA_weight_file is not None else None
                config.retrieval_inference_weight=args.retrieval_inference_weight
                config.MSA_start = MSA_start
                config.MSA_end = MSA_end
                if args.indel_mode:
                    config.clustal_omega_location = args.clustal_omega_location
            else:
                config.retrieval_aggregation_mode = None
            
            # load model
            model = model_pytorch.TranceptionLMHeadModel.from_pretrained(
                pretrained_model_name_or_path=os.path.join(args.model_dir, model_name),
                config=config
                )
            
            if torch.cuda.is_available():
                model.cuda()
                print(f"Transfer {model_name} to GPU!")
            model.eval()
            
            if args.DMS_file[-3:] == "tsv":
                result = pd.read_table(args.DMS_file)
                DMS_data = pd.read_table(args.DMS_file)
            elif args.DMS_file[-3:] == "csv":
                result = pd.read_csv(args.DMS_file)
                DMS_data = pd.read_csv(args.DMS_file)
                
            DMS_data["mutated_sequence"] = DMS_data.apply(lambda row: full_sequence(args.target_seq, row["mutant"], 1), axis=1)
            all_scores = model.score_mutants(
                DMS_data=DMS_data, 
                target_seq=args.target_seq, 
                scoring_mirror=not args.deactivate_scoring_mirror, 
                batch_size_inference=args.batch_size_inference,  
                num_workers=args.num_workers, 
                indel_mode=args.indel_mode
                )
            
            DMS_score = pd.merge(DMS_data, all_scores, on="mutated_sequence")
            result["score"] = DMS_score["avg_score"]
            out_path = args.DMS_file[:-4] + f".{model_name}.tsv"
            result.to_csv(out_path, sep="\t", index=False)
    
        
        
if __name__ == '__main__':
    main()