import sys
sys.path.append("/home/wzzheng/ec")

import json
import os
import torch
import random
from utils import format_esm, get_ec_id_dict


def to_one_hot(ec_number, ec_number_to_int_dict, num_classes):
    integer_encoded = ec_number_to_int_dict[ec_number]
    one_hot_encoded = torch.nn.functional.one_hot(
        torch.tensor(integer_encoded), num_classes
    )
    return one_hot_encoded


class Compare_dataset(torch.utils.data.Dataset):

    def __init__(self, id_ec, ec_id, data_dir='/state/partition/wzzheng/clean/data/train_valid_split/split100',
                 training_data='split100_train_split_0', ec_number_to_int_path='ec_number_to_int.json'):
        self.id_ec = id_ec
        self.ec_id = ec_id
        self.full_list = []
        self.data_dir = data_dir
        self.id_seq_a  = {}
        self.id_seq  = {}
        self.one_hot_dict = {}
        self.training_data = training_data

        if os.path.exists(ec_number_to_int_path):
            # Load the mapping from the file
            with open(ec_number_to_int_path, 'r') as f:
                self.ec_number_to_int = json.load(f)
        else:
            # Create and save the mapping to the file
            unique_ec_numbers = set(ec for ec_list in id_ec.values() for ec in ec_list)
            self.ec_number_to_int = {ec: i for i, ec in enumerate(unique_ec_numbers)}
            with open(ec_number_to_int_path, 'w') as f:
                json.dump(self.ec_number_to_int, f)

        self.num_classes = len(self.ec_number_to_int)
        self.num_classes = len(self.ec_number_to_int)

        for ec in ec_id.keys():
            if '-' not in ec:
                self.full_list.append(ec)

        for ec in ec_id.keys():
            one_hot = to_one_hot(ec, self.ec_number_to_int, self.num_classes)
            self.one_hot_dict[ec] = one_hot
        self.ec_dict = {i: ec for ec, i in self.ec_number_to_int.items()}


    def __getitem__(self, index):
        anchor_ec = self.full_list[index]
        anchor = random.choice(self.ec_id[anchor_ec])
        anchor_data = torch.load(self.data_dir + '/esm_data/' + anchor + '.pt')
        anchor_label = self.id_ec[anchor][0]
        data = format_esm(anchor_data)
        label = self.one_hot_dict[anchor_label]
        return data, label

    def one_hot_to_ec(self, one_hot):
        index = torch.argmax(one_hot).item()
        return self.ec_dict[index]
    
    def __len__(self):
        return len(self.full_list)
    

if __name__ == '__main__':
    data_dir='/state/partition/wzzheng/clean/data/train_valid_split/split100'
    training_data='split100'
    ec_number_to_int_path='ec_number_to_int.json'
    id_ec, ec_id_dict = get_ec_id_dict(os.path.join(data_dir, training_data + '.csv'))
    ec_id = {key: list(ec_id_dict[key]) for key in ec_id_dict.keys()}
    dataset = Compare_dataset(id_ec, ec_id)  # Create Compare_dataset
    print(dataset.num_classes)