import esm
import torch
import os
import sys
sys.path.append(os.getcwd())
import argparse
from tqdm import tqdm
from esm.inverse_folding.util import CoordBatchConverter
from esm import pretrained
from mif_st.sequence_models.pdb_utils import parse_PDB, process_coords
from mif_st.sequence_models.pretrained import load_model_and_alphabet
from mif_st.sequence_models.constants import PROTEIN_ALPHABET

def get_embedding(model_name, pdb_path, embed_type, chunck_id, output_path):
    pdbs = sorted(os.listdir(pdb_path))[chunck_id*100000: (chunck_id+1)*100000]
    pdb_infos = {}
    if "esm_if" in model_name:
        model, alphabet = pretrained.load_model_and_alphabet(model_name)
        model.cuda()
        model.eval()
        batch_converter = CoordBatchConverter(alphabet)
        
        for pdb in tqdm(pdbs):
            single_pdb_path = os.path.join(pdb_path, pdb)
            coords, pdb_seq = esm.inverse_folding.util.load_coords(single_pdb_path, "A")
            batch = [(coords, None, pdb_seq)]
            coords_, confidence, strs, tokens, padding_mask = batch_converter(batch)
            prev_output_tokens = tokens[:, :-1]
            hidden_states, _ = model.forward(
                coords_.cuda(),
                padding_mask.cuda(),
                confidence.cuda(),
                prev_output_tokens.cuda(),
                features_only=True,
            )
            # last_hidden_state: [1, 512, 1]
            if embed_type == 'last_hidden_state':
                last_hidden_state = hidden_states[0,:,-1]
            elif embed_type == 'mean_hidden_state':
                last_hidden_state = hidden_states[0,:,:].mean(dim=1)
            pdb_infos[pdb] = {"embedding": last_hidden_state.cpu().detach().numpy(), "seq": pdb_seq}

    
    if "mif" in model_name:
        model, collater = load_model_and_alphabet(model_name)
        model.cuda()
        model.eval()
        
        for pdb in tqdm(pdbs):
            coords, sequence, _ = parse_PDB(os.path.join(pdb_path, pdb))
            coords = {
                'N': coords[:, 0],
                'CA': coords[:, 1],
                'C': coords[:, 2]
            }
            dist, omega, theta, phi = process_coords(coords)
            batch = [[sequence, torch.tensor(dist, dtype=torch.float).cuda(),
                    torch.tensor(omega, dtype=torch.float).cuda(),
                    torch.tensor(theta, dtype=torch.float).cuda(), 
                    torch.tensor(phi, dtype=torch.float).cuda()]]
            src, nodes, edges, connections, edge_mask = collater(batch)
            rep = model(src.cuda(), nodes.cuda(), edges.cuda(), connections.cuda(), edge_mask.cuda(), result='repr')[0]
            rep_mean = rep.mean(dim=0).cpu().detach()
            pdb_infos[pdb] = {"embedding": rep_mean.numpy(), "seq": sequence}
        
        
    # save embedding
    out_file = os.path.join(output_path, f'{model_name}_{embed_type}_chunk{chunck_id}.pt')
    torch.save(pdb_infos, out_file)

def create_args():
    parser = argparse.ArgumentParser(description='Extract embeddings from ESM_if')
    parser.add_argument('--model', type=str, default='esm_if1_gvp4_t16_142M_UR50', help='model name')
    parser.add_argument('--batch_size', type=int, default=32, help='batch size')
    parser.add_argument('--pdb_path', type=str, default='/home/user4/data/swiss_prot_pdb/', help='path to pdb file')
    parser.add_argument('--output_path', type=str, default='./data/swissprot_esmif', help='path to output file')
    parser.add_argument('--embed_type', type=str, default='last_hidden_state', help='last_hidden_state or mean_hidden_state')
    parser.add_argument('--chunck_id', type=int, default=0, help='chunck_id id')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = create_args()
    get_embedding(args.model, args.pdb_path, args.embed_type, args.chunck_id, args.output_path)
    
    