import polars as pl
import torch
import numpy as np
from transformers import BertTokenizer, BertModel, RobertaTokenizer, RobertaModel
if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')
train_df = pl.read_parquet('datas/leash-BELKA/train.parquet')
mol_tokenizer = RobertaTokenizer.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1")
mol_encoder = RobertaModel.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1").to(device)

def make_batches(data,batch_size):
    length = len(data)
    for i in range(0, length, batch_size):
        yield data[i:min(i+batch_size,length)]
        
def extract_smile_features(smiles,batch_size=100):
    features = []
    with torch.no_grad():
        for batch in make_batches(smiles,batch_size):
            token = mol_tokenizer(batch, padding=True, return_tensors="pt").to(device)
            feature = mol_encoder(**token).pooler_output.cpu().numpy()
            features.append(feature)
    features = np.concatenate(features, axis=0)
    print(features.shape)
    return features

def get_unique_features(df):
    unique_values = df.select(pl.col('molecule_smiles')).unique().to_list()
    feature = extract_smile_features(unique_values)
    result_df = pl.DataFrame(zip(unique_values,feature), columns=['molecule_smiles', 'features'])
    return result_df
result_df = get_unique_features(train_df)
result_df.to_parquet('datas/leash-BELKA/train_smiles_features.parquet')
test_df = pl.read_parquet('datas/leash-BELKA/test.parquet')
result_df = get_unique_features(test_df)
result_df.to_parquet('datas/leash-BELKA/test_smiles_features.parquet')