import os
import sys
sys.path.append(os.getcwd())
sys.path.append("..")
import torch
import argparse
import pathlib
import json
import pandas as pd
from sequence_models.pdb_utils import parse_PDB, process_coords
from sequence_models.pretrained import load_model_and_alphabet
from sequence_models.constants import PROTEIN_ALPHABET
from src.utils import read_fasta, full_sequence, scan_max_mutant, mutant_filter

def label_row(rows, sequence, token_probs, offset_idx):
    s = []
    sep = ";"
    if ":" in rows:
        sep = ":"
    for row in rows.split(sep):
        if row.lower() == "wt":
            s.append(0)
            continue
        wt, idx, mt = row[0], int(row[1:-1]) - offset_idx, row[-1]
        assert sequence[idx] == wt, f"The pos {idx} `{wt}` does not match the `{mt}`"
        wt_encoded, mt_encoded = PROTEIN_ALPHABET.index(wt), PROTEIN_ALPHABET.index(mt)
        # add 1 for BOS
        score = token_probs[0, idx, mt_encoded] - token_probs[0, idx, wt_encoded]
        score = score.item()
        s.append(score)
    return sum(s) / len(s)

def predict(args, model_location, model, collater):
    df = pd.read_table(args.dms_input)
    coords, sequence, _ = parse_PDB(args.pdb_path)
    
    # can use result='repr' or result='logits'. Default is 'repr'.
    if 'carp' in model_location:
        seqs = [[sequence]]
        x = collater(seqs)[0].cuda()
        logits = model(x, logits=True)['logits']
        
    elif 'mif' in model_location:
        coords = {
            'N': coords[:, 0],
            'CA': coords[:, 1],
            'C': coords[:, 2]
        }
        dist, omega, theta, phi = process_coords(coords)
        batch = [[sequence, torch.tensor(dist, dtype=torch.float).cuda(),
                torch.tensor(omega, dtype=torch.float).cuda(),
                torch.tensor(theta, dtype=torch.float).cuda(), 
                torch.tensor(phi, dtype=torch.float).cuda()]]
        src, nodes, edges, connections, edge_mask = collater(batch)
        logits = model(src.cuda(), nodes.cuda(), edges.cuda(), connections.cuda(), edge_mask.cuda(), result='logits')
    else:
        raise ValueError("Model name must contain 'carp' or 'mif'!")
    
    offset = 1
    
    df[model_location] = df.apply(
        lambda row: label_row(  
            row[args.mutation_col], 
            sequence, logits, offset
        ),
        axis=1,
    )

    return df

def create_parser():
    parser = argparse.ArgumentParser(description='MIF-ST')

    parser.add_argument("--model_location", type=str, help="model names",nargs="+")
    parser.add_argument("--dms_input", type=pathlib.Path, help="TSV file containing the deep mutational scan")
    parser.add_argument("--mutation_col", type=str, default="mutant", help="column in the deep mutational scan labeling the mutation as 'AiB'")
    parser.add_argument("--dms_output", type=pathlib.Path, help="Save file containing the deep mutational scan along with predictions")
    parser.add_argument("--pdb_path", type=pathlib.Path, help="path to pdb in pdb format (required for inverse folding)")
    parser.add_argument("--use_ef", action="store_true", help="use esmfold pdb")
    parser.add_argument("--dataset_input", type=str, default=None, help="input dms dataset directory")
    return parser
    
    
def main(args):
    

    protein_names = []
    exclude_files_or_dirs = ["file.txt", ".ipynb_checkpoints"]
    
    for model_locate in args.model_location:
        print(f"Transfer {model_locate} to GPU...")
        model, collater = load_model_and_alphabet(model_locate)
        model.cuda()
        model.eval()
        
        # evalute whole dataset
        if args.dataset_input:
            # get protein names in dataset path
            protein_names = sorted(os.listdir(args.dataset_input))
            if protein_names == []:
                raise ValueError("No protein found in dataset input path!")
            
            # remove files or dirs
            for item in exclude_files_or_dirs:
                if item in protein_names:
                    protein_names.remove(item)

            # loop over proteins
            for idx, name in enumerate(protein_names):
                print("-"*60)
                print(f"Processing 【{name}】. Current {idx+1}/{len(protein_names)}...")
                
                # target protein directory
                cur_dir = os.path.join(args.dataset_input, name)
                # get pdb
                if "mif" in model_locate:
                    pdb_shufix = ".ef.pdb" if args.use_ef else ".pdb"
                    pdb_path = os.path.join(cur_dir, name + pdb_shufix)
                    if not os.path.exists(pdb_path):
                        print(f"{pdb_path} not exists!")
                        continue
                    args.pdb_path = pdb_path
                
                # all mutation file: [xxx.tsv]
                mutant_files = os.listdir(os.path.join(cur_dir, "experiments"))
                for item in exclude_files_or_dirs:
                    if item in mutant_files:
                        mutant_files.remove(item)
                if mutant_files == []:
                    print(f"Experiment tsv not exists!")
                    continue
                
                
                # create prediction directory
                prediction_path = os.path.join(cur_dir, "predictions")
                if not os.path.exists(prediction_path):
                    os.mkdir(prediction_path)
                
                # predict
                for file in mutant_files:
                    mutant_file = os.path.join(cur_dir, "experiments", file)
                    args.dms_input = mutant_file
                    if "mif" in model_locate and args.use_ef:
                        out_path = os.path.join(prediction_path, f"{file[:-4]}.{model_locate}.ef.tsv")
                    else:
                        out_path = os.path.join(prediction_path, f"{file[:-4]}.{model_locate}.tsv")
                    result = pd.read_table(mutant_file)
                    # skip if already exists
                    if os.path.exists(out_path):
                        print(f"{out_path} exists, skipping!")
                        continue
                    result["score"] = predict(args, model_locate, model, collater)[model_locate]
                    result.to_csv(out_path, sep="\t", index=False)
                    
                    print(f"【{out_path}】 Done!")
                
        # evaluate single protein mutation file
        else:
            if os.path.exists(args.dms_output):
                result = pd.read_csv(args.dms_output)
                if model_locate not in result.columns:
                    result[model_locate] = predict(args, model_locate, model, collater)[model_locate]
            else:
                result = predict(args, args.model_location)
            result.to_csv(args.dms_output, sep="\t", index=False)
            


if __name__ == "__main__":
    parser = create_parser()
    args = parser.parse_args()
    main(args)