from torch.utils.data import Dataset, DataLoader
import pandas as pd
import torch
import duckdb
from transformers import BertTokenizer, BertModel, RobertaTokenizer, RobertaModel
import re
class EmbedDataset(Dataset):
    def __init__(self, data_path, protein_embedding_path, seed=42):
        self.data_path = data_path
        df = pd.read_parquet(protein_embedding_path)
        self.pro_emb = { row['seq_names']:row['seq_features'] for i,row in df.iterrows()}
        self.data = pd.read_parquet(data_path)
    def __len__(self):
        return self.data.shape[0]
    
    def __getitem__(self, idx):
        return self.data.iloc[idx]['molecule_smiles'], self.pro_emb[self.data.iloc[idx]['protein_name']],self.data.iloc[idx]['binds']
    
    
    
class BalancedEmbedDataset(Dataset):
    def __init__(self, data_path, protein_embedding_path, data_size,seed=42):
        self.data_path = data_path
        self.data_size = data_size
        self.data = self.resample()
        df = pd.read_parquet(protein_embedding_path)
        self.pro_emb = { row['seq_names']:row['seq_features'] for i,row in df.iterrows()}
    
    def resample(self):
        con = duckdb.connect()
        df = con.query(f"""(SELECT *
                        FROM parquet_scan('{self.data_path}')
                        WHERE binds = 0
                        ORDER BY random()
                        LIMIT {self.data_size})
                        UNION ALL
                        (SELECT *
                        FROM parquet_scan('{self.data_path}')
                        WHERE binds = 1
                        ORDER BY random()
                        LIMIT {self.data_size})""").df()

        con.close()
        return df
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.data.iloc[idx]['molecule_smiles'], self.pro_emb[self.data.iloc[idx]['protein_name']],self.data.iloc[idx]['binds']
    
    