from torchtext import vocab
from cio import JsonLReader
from config.Config import BASE_DIR, UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN, VOCAB_MIN_FREQ
import torch

lines = JsonLReader(BASE_DIR / "resource" / "data" / "consum" / "split_context.train.jsonl").read() \
        + JsonLReader(BASE_DIR / "resource" / "data" / "consum" / "split_context.test.jsonl").read()


def build_nl_vocab() -> vocab.Vocab:
    nl_vocab = vocab.build_vocab_from_iterator([each['summary'] for each in lines], min_freq=VOCAB_MIN_FREQ,
                                               specials=[UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN])
    nl_vocab.set_default_index(nl_vocab.get_stoi()[UNK_TOKEN])
    return nl_vocab


def build_code_vocab() -> vocab.Vocab:
    code_vocab = vocab.build_vocab_from_iterator([each['method'] + each['context'] for each in lines],
                                                 min_freq=VOCAB_MIN_FREQ,
                                                 specials=[UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN])
    code_vocab.set_default_index(code_vocab.get_stoi()[UNK_TOKEN])
    return code_vocab


def build_big_vocab() -> vocab.Vocab:
    big_vocab = vocab.build_vocab_from_iterator([each['summary'] + each['method'] + each['context'] for each in lines],
                                                min_freq=VOCAB_MIN_FREQ,
                                                specials=[UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN])
    big_vocab.set_default_index(big_vocab.get_stoi()[UNK_TOKEN])
    return big_vocab


if __name__ == '__main__':
    nl_v = build_nl_vocab()
    torch.save(nl_v, BASE_DIR / "resource" / "data" / "consum" / "nl_vocab.pkl")
    code_v = build_code_vocab()
    torch.save(code_v, BASE_DIR / "resource" / "data" / "consum" / "code_vocab.pkl")
    big_v = build_big_vocab()
    torch.save(big_v, BASE_DIR / "resource" / "data" / "consum" / "big_vocab.pkl")
