from data_process import read_dataset
from transformers import AutoTokenizer
from tqdm import tqdm


if __name__ == '__main__':

    data_name = 'scinli'
    set_name = 'test'
    model_path = "model/Llama-2-7b-chat-hf"
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

    data = read_dataset(data_name, set_name)
    input_len = []
    output_len = []
    for d in tqdm(data):
        input_num = len(tokenizer.encode(d['text']))
        output_num = len(tokenizer.encode(str(d['label'])))
        input_len.append(input_num)
        output_len.append(output_num)

    print(f"Input average length: {sum(input_len) / len(input_len)}")
    print(f"Output average length: {sum(output_len) / len(output_len)}")

        

        
