"""
transform all train files to a pkl format file with preprocessing using torchtext

original dataset format:
train.txt
1 sentence1
0 sentence2 
...
"""

from torchtext import data
from transformers import BertTokenizer
import os
import pickle
import torch


def preprocess_txt(txt_file_path:str) -> str:
    """
    preprocess a txt file and return a modified file with path "new_path"
    """
    new_path = txt_file_path
    return new_path
    

def tokenize_and_cut(sentence):
    tokens = tokenizer.tokenize(sentence)
    tokens =tokens[:max_input_length-2]
    return tokens


if __name__ == "__main__":
    train_path = './data/original_train_data'
    save_path = './data/processed_train_data/all.pkl'
    tokenizer = BertTokenizer.from_pretrained('/home/dl/bert_models/bert-base-uncased')
    max_input_length = tokenizer.max_model_input_sizes['bert-base-uncased']
    cls_token_id=tokenizer.cls_token_id
    sep_token_id=tokenizer.sep_token_id
    pad_token_id=tokenizer.pad_token_id
    unk_token_id=tokenizer.unk_token_id
    TEXT=data.Field(batch_first=True, tokenize=tokenize_and_cut, lower=True, use_vocab=False,
                    preprocessing=tokenizer.convert_tokens_to_ids,
                    init_token=cls_token_id,
                    eos_token=sep_token_id,
                    pad_token=pad_token_id,
                    unk_token=unk_token_id)
    LABEL=data.LabelField(dtype=torch.float)
    fields = [("label", LABEL), ("text", TEXT)]
    mydataset = data.TabularDataset('./data/original_train_data/SMSSpamCollection','TSV',fields)

