token_evaluation / eval.py
nancyH's picture
Upload folder using huggingface_hub
2560dd0 verified
import pickle
import numpy as np
import pandas as pd
from tokenizers import Tokenizer
import os
DATA_PATH = "/home/n5huang/dna_token/tokenizer_evaluation/eval_data.pkl"
RESULTS_PATH = "/home/n5huang/dna_token/tokenizer_evaluation/evaluation_results.pkl"
# Ensure these match the filenames you upload to the server
VOCAB_PATHS = {
"Merged_uni": "/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram.json",
"Merged_word": "/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_wordPiece.json",
"Weighted": "/home/n5huang/dna_token/tokenizer_evaluation/weighted_bpe/tokenizer.json", # Adjust filename if needed
"SeqOnly": "/home/n5huang/dna_token/tokenizer_evaluation/baseline_bpe/tokenizer.json", # Adjust filename if needed
"DNAbert2": "/home/n5huang/dna_token/pretrain/models/DNAbert2_Pretrained/tokenizer.json",
"Grover": "/home/n5huang/dna_token/pretrain/models/Grover_Pretrained/tokenizer.json",
}
def evaluate_tokenizer_on_phyloP(tokenizer, sequences, phyloPs):
"""
For each tokenizer, compute:
- token_mean_scores: list of mean phyloP per token occurrence
- token_variances: list of variance per token occurrence
- token_names: list of token strings
"""
token_means = []
token_vars = []
token_names = []
total_tokens = 0
for seq, scores in zip(sequences, phyloPs):
# Skip if chunk is too small (end of chrom) or has N padding
if len(seq) < 100:
continue
enc = tokenizer.encode(seq.upper())
total_tokens += len(enc.ids)
for tok, (start, end) in zip(enc.tokens, enc.offsets):
region = scores[start:end]
if len(region) == 0:
continue
m = region.mean()
v = region.var()
token_means.append(m)
token_vars.append(v)
token_names.append(tok)
print(f" -> Processed {total_tokens:,} tokens.")
return {
"mean": np.array(token_means),
"var": np.array(token_vars),
"token": token_names
}
# --- 3. MAIN EXECUTION ---
if __name__ == "__main__":
print("Loading data from pickle...")
with open(DATA_PATH, "rb") as f:
data = pickle.load(f)
sequences = data["test_sequences"]
phyloPs = data["test_phyloP"]
print(f"Loaded {len(sequences)} genomic windows.")
# Load Tokenizers
tokenizers = {}
print("Loading tokenizers...")
for name, path in VOCAB_PATHS.items():
if os.path.exists(path):
tokenizers[name] = Tokenizer.from_file(path)
print(f"✅ Loaded {name}")
else:
print(f"❌ Warning: File {path} not found. Skipping.")
# Run Eval
results = {}
print("\nStarting Benchmark (with Token Names)...")
for name, tok in tokenizers.items():
print(f"Evaluating {name}...")
results[name] = evaluate_tokenizer_on_phyloP(tok, sequences, phyloPs)
# Save Results
print(f"\nSaving results to {RESULTS_PATH}...")
with open(RESULTS_PATH, "wb") as f:
pickle.dump(results, f)
print("Success! Download 'evaluation_results.pkl' (Note: File size will be larger).")