matlok commited on
Commit
17917cf
1 Parent(s): 5a55332

fixing newline char in logs/readme

Browse files
Files changed (2) hide show
  1. README.md +2 -1
  2. run-tiny-merge.py +1 -0
README.md CHANGED
@@ -496,7 +496,8 @@ INFO:__main__:loaded new model file: matlok/tinyllama-cinder-openhermes-32k aski
496
  INFO:__main__:loading tokenizer=TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
497
  Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.
498
  INFO:__main__:
499
- ----------tokenizer=LlamaTokenizerFast(name_or_path='TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', vocab_size=32000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>'}, clean_up_tokenization_spaces=False), added_tokens_decoder={
 
500
  0: AddedToken("<unk>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
501
  1: AddedToken("<s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
502
  2: AddedToken("</s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
 
496
  INFO:__main__:loading tokenizer=TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
497
  Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.
498
  INFO:__main__:
499
+ ----------
500
+ tokenizer=LlamaTokenizerFast(name_or_path='TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', vocab_size=32000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>'}, clean_up_tokenization_spaces=False), added_tokens_decoder={
501
  0: AddedToken("<unk>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
502
  1: AddedToken("<s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
503
  2: AddedToken("</s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
run-tiny-merge.py CHANGED
@@ -94,6 +94,7 @@ def run_text_test(
94
  log.info(
95
  "\n"
96
  "----------"
 
97
  f"tokenizer={tokenizer}\n "
98
  f"question:\n{question}\n"
99
  f"answer:\n{answer}\n"
 
94
  log.info(
95
  "\n"
96
  "----------"
97
+ "\n"
98
  f"tokenizer={tokenizer}\n "
99
  f"question:\n{question}\n"
100
  f"answer:\n{answer}\n"