from torch.utils.data import Dataset, DataLoader
import pandas as pd
import torch
import duckdb
from transformers import BertTokenizer, BertModel, RobertaTokenizer, RobertaModel
import re
class EmbedDataset(Dataset):
    def __init__(self, data_path, protein_embedding_path, seed=42):
        self.data_path = data_path
        df = pd.read_parquet(protein_embedding_path)
        self.pro_emb = { row['seq_names']:row['seq_features'] for i,row in df.iterrows()}
        self.data = pd.read_parquet(data_path)
    def __len__(self):
        return self.data.shape[0]
    
    def __getitem__(self, idx):
        return self.data.iloc[idx]['molecule_smiles'], self.pro_emb[self.data.iloc[idx]['protein_name']],self.data.iloc[idx]['binds']
        
class BalancedEmbedDataset(Dataset):
    def __init__(self, data_path, protein_embedding_path, data_size,seed=42):
        self.data_path = data_path
        self.data_size = data_size
        # 只在第一次读取耗时
        all_data = pd.read_parquet(data_path)
        self.pos_data = all_data[all_data['binds']==1]
        self.neg_data = all_data[all_data['binds']==0]
        self.data = self.resample()
        df = pd.read_parquet(protein_embedding_path)
        self.pro_emb = { row['seq_names']:row['seq_features'] for i,row in df.iterrows()}
    
    def resample(self):
        # 从all_data中随机抽取data_size个负样本
        # 正样本数量为data_size，负样本数量为data_size * 10        
        pos_data = self.pos_data.sample(n=self.data_size)
        neg_data = self.neg_data.sample(n=self.data_size * 10)
        df = pd.concat([pos_data, neg_data])
        return df
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.data.iloc[idx]['molecule_smiles'], self.pro_emb[self.data.iloc[idx]['protein_name']],self.data.iloc[idx]['binds']
    
    