import pandas as pd
import torch
import numpy as np
from transformers import BertTokenizer, BertModel, RobertaTokenizer, RobertaModel
import re
def read_fasta(file_path):
    seqs = []
    seq_names = []
    with open(file_path, 'r') as f:
        seq = ''
        for line in f:
            if line.startswith('>'):
                seq_names.append(line.strip()[1:])
                if seq:
                    seqs.append(seq)
                    seq = ''
            else:
                seq += line.strip()
        if seq:
            seqs.append(seq)
    return seq_names, seqs
def preprocess_sequence(seq):
    # Preprocess protein sequence
    return " ".join(re.sub(r"[UZOB]", "X", seq))

if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')
prot_tokenizer = BertTokenizer.from_pretrained("models/Rostlab/prot_bert")
prot_encoder = BertModel.from_pretrained("models/Rostlab/prot_bert").to(device)
seq_names, seqs = read_fasta('./proteins.fasta')
seq_features = prot_encoder(**prot_tokenizer([preprocess_sequence(seq) for seq in seqs], padding=True, truncation=True,max_length=3200, return_tensors='pt').to('cuda')).pooler_output.cpu().detach().numpy()
seq_features = np.array(seq_features)
df = pd.DataFrame({'seq_names':seq_names,"seq_features":seq_features.tolist()})
df.to_parquet('datas/leash-BELKA/protein_features.parquet')

# df = pd.DataFrame({'seq_names':seq_names,"seq_features":seq_features})
# df.to_parquet('datas/leash-BELKA/protein_features.parquet')