import json

import config
import drqa_yixin.tokenizers
from drqa_yixin.tokenizers import CoreNLPTokenizer
from utils import fever_db, text_clean
from tqdm import tqdm


def easy_tokenize(text, tokenizer):
    return tokenizer.tokenize(text_clean.normalize(text)).words()


def save_jsonl(d_list, filename):
    print("Save to Jsonl:", filename)
    with open(filename, encoding='utf-8', mode='w') as out_f:
        for item in d_list:
            out_f.write(json.dumps(item) + '\n')


def load_jsonl(filename):
    d_list = []
    with open(filename, encoding='utf-8', mode='r') as in_f:
        print("*"*20,"Load Jsonl from {} and return the list".format(filename))
        for line in tqdm(in_f):
            item = json.loads(line.strip())
            d_list.append(item)

    return d_list


def tokenized_claim(in_file, out_file):
    path_stanford_corenlp = str(config.PRO_ROOT / 'dep_packages/stanford-corenlp-full-2017-06-09/*')
    #path_stanford_corenlp = str(config.PRO_ROOT / 'dep_packages/stanford-corenlp-4.2.0/*')
    print("*"*20,"corenlp jar path:{}".format(path_stanford_corenlp))
    drqa_yixin.tokenizers.set_default('corenlp_classpath', path_stanford_corenlp)
    #tokenizer = CoreNLPTokenizer(annotators=['pos', 'lemma'])
    tokenizer=CoreNLPTokenizer(annotators=['pos', 'lemma', 'ner'])

    d_list = load_jsonl(in_file)
    cnt=0
    print("*"*20,"starting tokenizing and some examples:")
    for item in tqdm(d_list):
        old_claim=item['claim']
        item['claim'] = ' '.join(easy_tokenize(item['claim'], tokenizer))
        new_claim=item['claim']
        if cnt<3:
            print("   origin claim:{}".format(old_claim))
            print("tokenized claim:{}".format(new_claim))
            print("-"*50)
            cnt+=1


    save_jsonl(d_list, out_file)
    print("*"*20,f"total tokenize {len(d_list)} items")


def tokenized_claim_list(in_list):
    path_stanford_corenlp_full_2017_06_09 = str(config.PRO_ROOT / 'dep_packages/stanford-corenlp-full-2017-06-09/*')
    drqa_yixin.tokenizers.set_default('corenlp_classpath', path_stanford_corenlp_full_2017_06_09)
    tok = CoreNLPTokenizer(annotators=['pos', 'lemma'])

    for item in tqdm(in_list):
        item['claim'] = ' '.join(easy_tokenize(item['claim'], tok))

    return in_list


if __name__ == '__main__':
    #tokenized_claim(config.FEVER_DEV_JSONL, config.DATA_ROOT / "tokenized_fever/dev.jsonl")
    tokenized_claim(config.FEVER_TRAIN_JSONL, config.DATA_ROOT / "tokenized_fever/train.jsonl")
