|
from datasets import concatenate_datasets, load_dataset, load_from_disk |
|
import argparse |
|
from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers |
|
from transformers import GPT2TokenizerFast, AutoTokenizer |
|
from datasets import config |
|
import logging |
|
|
|
def initialize_logger(log_file): |
|
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s') |
|
|
|
def log_parameters(vocab_size, batch_size, fertility_score, proportion_continued_words, log_file='parameters.log'): |
|
initialize_logger(log_file) |
|
logging.info(f"Vocabulary Size: {vocab_size}, Batch Size: {batch_size}, Fertility Score: {fertility_score}, Proportion of Continued word: {proportion_continued_words}") |
|
|
|
def parse_arguments(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument( |
|
"--batch_size", |
|
type=int, |
|
required=True, |
|
help="Batch size to use for training" |
|
) |
|
parser.add_argument( |
|
"--vocab_size", |
|
type=int, |
|
required=True, |
|
help="Vocabulary size to use for tokenizer" |
|
) |
|
parser.add_argument( |
|
"--use_config", |
|
choices=['xlm-roberta', 'vanilla'], |
|
required=True, |
|
help="Use XLM-RoBERTa config or Vanilla BPE" |
|
) |
|
parser.add_argument( |
|
"--do_evaluate", |
|
action='store_true', |
|
help="Enable evaluation." |
|
) |
|
args = parser.parse_known_args() |
|
return args |
|
|
|
def calculate_proportion_continued_words(tokenizer, sentences): |
|
total_continued_words = 0 |
|
total_words = 0 |
|
for sentence in sentences: |
|
tok = tokenizer.encode_plus(sentence, return_tensors="pt") |
|
input_ids = tok['input_ids'].squeeze(0) |
|
continued_word = False |
|
for i in range(1, len(input_ids)): |
|
if input_ids[i] != tokenizer.pad_token_id: |
|
if continued_word: |
|
total_continued_words += 1 |
|
continued_word = True |
|
else: |
|
continued_word = False |
|
total_words += len(sentence.split()) |
|
proportion_continued_words = total_continued_words / total_words if total_words > 0 else 0 |
|
return proportion_continued_words |
|
|
|
|
|
|
|
def train_tokenizer(args): |
|
|
|
|
|
indic_datasets = [] |
|
configs=['hi'] |
|
|
|
|
|
|
|
|
|
indic_datasets_hi= load_dataset('satpalsr/indicCorpv2', 'hi', split='train', cache_dir='/home1/BharatGPT_tokenizer/hf/') |
|
indic_datasets_en= load_dataset('satpalsr/indicCorpv2', 'en', split='train', cache_dir='/home1/BharatGPT_tokenizer/hf/') |
|
|
|
|
|
print(indic_datasets) |
|
|
|
dataset = concatenate_datasets([indic_datasets_en,indic_datasets_hi]) |
|
test_data = load_from_disk('samanantar_data') |
|
test_data = dataset['text'][:10000] |
|
print(f"[INFO] {len(test_data)}") |
|
print(f"[INFO] {len(dataset)}") |
|
|
|
|
|
def batch_iterator(): |
|
for idx in range(0, len(dataset), args.batch_size): |
|
yield dataset[idx: idx + args.batch_size]['text'] |
|
|
|
if args.use_config == 'vanilla': |
|
tokenizer = Tokenizer(models.BPE()) |
|
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) |
|
print(f"[INFO] The brown fox jumped over the lazy dog\n{tokenizer.pre_tokenizer.pre_tokenize_str('The brown fox jumped over the lazy dog')}") |
|
print(f"[INFO] Training...") |
|
trainer = trainers.BpeTrainer(vocab_size=args.vocab_size, special_tokens=["<|endoftext|>"]) |
|
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer) |
|
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) |
|
tokenizer.decoder = decoders.ByteLevel() |
|
tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer) |
|
elif args.use_config == 'xlm-roberta': |
|
print("skipped") |
|
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base') |
|
trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size) |
|
|
|
trained_tokenizer.save_pretrained('hi-bgpt-bpe-tokenizer1') |
|
print(f"[INFO] Tokenizer saved to disk") |
|
|
|
if args.do_evaluate: |
|
print(f"[INFO] Running evaluation using fertility and fraction of continued words") |
|
tokenizer = AutoTokenizer.from_pretrained('hi-bgpt-bpe-tokenizer1') |
|
|
|
fertility = 0 |
|
for sentence in test_data: |
|
tok=tokenizer(sentence) |
|
fertility += len(tok['input_ids']) / len(sentence.split()) |
|
average_fertility = fertility / len(test_data) |
|
proportion_continued_words = calculate_proportion_continued_words(tokenizer, test_data) |
|
log_parameters(args.vocab_size, args.batch_size, average_fertility, proportion_continued_words) |
|
|
|
def main(): |
|
args, _ = parse_arguments() |
|
train_tokenizer(args) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|
|
|