import csv
import os
import re
import pandas as pd

import torch
from tqdm import tqdm
from transformers import AutoModelForSeq2SeqLM, T5Tokenizer

def prostT5(sequence):
    # Define a global variable to hold the features
    intermediate_output = None
    def hook(module, input, output):
        global intermediate_output
        intermediate_output = output.last_hidden_state
    model.encoder.register_forward_hook(hook)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    tokenizer = T5Tokenizer.from_pretrained('/state/partition/wzzheng/prostt5', do_lower_case=False)
    model = AutoModelForSeq2SeqLM.from_pretrained("/state/partition/wzzheng/prostt5").to(device)
    model.full() if device=='cpu' else model.half()
    sequence_examples = [sequence]
    min_len = min([ len(s) for s in sequence_examples])
    max_len = max([ len(s) for s in sequence_examples])
    # print(sequence_examples)
    # replace all rare/ambiguous amino acids by X (3Di sequences does not have those) and introduce white-space between all sequences (AAs and 3Di)
    sequence_examples = [" ".join(list(re.sub(r"[UZOB]", "X", sequence))) for sequence in sequence_examples]
    # print(sequence_examples)

    # add pre-fixes accordingly. For the translation from AAs to 3Di, you need to prepend "<AA2fold>"
    sequence_examples = [ "<AA2fold>" + " " + s for s in sequence_examples]
    # print(sequence_examples)

    # tokenize sequences and pad up to the longest sequence in the batch
    ids = tokenizer.batch_encode_plus(sequence_examples,
                                      add_special_tokens=True,
                                      padding="longest",
                                      return_tensors='pt').to(device)

    # Generation configuration for "folding" (AA-->3Di)
    gen_kwargs_aa2fold = {
        "do_sample": True,
        "num_beams": 3, 
        "top_p" : 0.95, 
        "temperature" : 1.2, 
        "top_k" : 6,
        "repetition_penalty" : 1.2,
    }

    # translate from AA to 3Di (AA-->3Di)
    with torch.no_grad():
        translations = model.generate( 
            ids.input_ids, 
            attention_mask=ids.attention_mask, 
            max_length=max_len, # max length of generated text
            min_length=min_len, # minimum length of the generated text
            early_stopping=True, # stop early if end-of-text token is generated
            num_return_sequences=1, # return only a single sequence
            **gen_kwargs_aa2fold
        )
    # Decode and remove white-spaces between tokens
    decoded_translations = tokenizer.batch_decode(translations, skip_special_tokens=True)
    structure_sequences = [ "".join(ts.split(" ")) for ts in decoded_translations ] # predicted 3Di strings

    # add pre-fixes accordingly. For the translation from 3Di to AA (3Di-->AA), you need to prepend "<fold2AA>"
    sequence_examples_backtranslation = [ "<fold2AA>" + " " + s for s in decoded_translations]

    # tokenize sequences and pad up to the longest sequence in the batch
    ids_backtranslation = tokenizer.batch_encode_plus(sequence_examples_backtranslation,
                                    add_special_tokens=True,
                                    padding="longest",
                                    return_tensors='pt').to(device)

    # Example generation configuration for "inverse folding" (3Di-->AA)
    gen_kwargs_fold2AA = {
                "do_sample": True,
                "top_p" : 0.85,
                "temperature" : 1.0,
                "top_k" : 3,
                "repetition_penalty" : 1.2,
    }

    # translate from 3Di to AA (3Di-->AA)
    with torch.no_grad():
        backtranslations = model.generate( 
                    ids_backtranslation.input_ids, 
                    attention_mask=ids_backtranslation.attention_mask, 
                    max_length=max_len, # max length of generated text
                    min_length=min_len, # minimum length of the generated text
                    #early_stopping=True, # stop early if end-of-text token is generated; only needed for beam-search
                    num_return_sequences=1, # return only a single sequence
                    **gen_kwargs_fold2AA
        )
    # Decode and remove white-spaces between tokens
    decoded_backtranslations = tokenizer.batch_decode( backtranslations, skip_special_tokens=True )
    aminoAcid_sequences = [ "".join(ts.split(" ")) for ts in decoded_backtranslations ] # predicted amino acid strings
    Di_feature = intermediate_output
    return Di_feature, structure_sequences[0]


if __name__ == '__main__':
    file_name = 'split100.csv'
    csvfile = open(os.path.join('/state/partition/wzzheng/clean/data/train_valid_split/split100', '3di.csv'), 'w')
    csvwriter = csv.writer(csvfile, delimiter = '\t')
    csvwriter.writerow(['ID', '3DI'])
    id_seq, id_seq_a = {}, {}
    extension = os.path.splitext(file_name)[1]

    if extension == '.fasta':
        df = pd.read_csv(os.path.join('/state/partition/wzzheng/clean/data/train_valid_split/split100', file_name), header=None)
        for index, row in df.iterrows():
            if index % 2 == 0:
                if index + 1 < len(df): 
                    sequence = df.iloc[index + 1, 0]
                    new_sequence = sequence.replace("<mask>", "A")
                    Di_feature, structure_sequences = prostT5(new_sequence)
                    torch.save(Di_feature, os.path.join('/state/partition/wzzheng/clean/data/train_valid_split/split100/3Di', row[0][1:] + '.pt'))
                    csvwriter.writerow([row[0][1:],structure_sequences])
    elif extension == '.csv':
        csv_file = open(os.path.join('/state/partition/wzzheng/clean/data/train_valid_split/split100', file_name), 'r')
        csvreader = csv.reader(csv_file, delimiter='\t') 
        for i, rows in enumerate(tqdm(csvreader)):
            if i > 0:
                Di_feature, structure_sequences = prostT5(rows[2])
                torch.save(Di_feature, os.path.join('/state/partition/wzzheng/clean/data/train_valid_split/split100/3Di', rows[0] + '.pt'))
                csvwriter.writerow([rows[0],structure_sequences])       
    else:
        print('Format is not supported')