import json
import datasets
import pandas as pd
from transformers import AutoTokenizer


def check_json(xpath, xstrs, file_id):
    print(f'Reading {xpath}')
    with open(xpath, 'r', encoding='utf8') as fi:
        xcont = fi.read()
        xlen = len(xcont)
        print(f'Length: {xlen:,d}')
        xdata = json.loads(xcont)
    xid = 0
    for xobj in xdata:
        xline = xobj['text']
        xstrs.append({
            'text': xline,
            'id': xid,
            'file_id': file_id,
        })
        xid += 1


if '__main__' == __name__:

    def _main():
        xpath_arr = [
            r'D:\_dell7590_root\local\LNP_datasets\med\CBLUE\CMeEE-V2\CMeEE-V2_dev.limit-20.json',
            # r'D:\_dell7590_root\local\LNP_datasets\med\CBLUE\CMeEE-V2\CMeEE-V2_dev.json',
            r'D:\_dell7590_root\local\LNP_datasets\med\CBLUE\CMeEE-V2\CMeEE-V2_train.limit-20.json',
            # r'D:\_dell7590_root\local\LNP_datasets\med\CBLUE\CMeEE-V2\CMeEE-V2_train.json',
        ]
        xstrs = []
        for i, xpath in enumerate(xpath_arr):
            check_json(xpath, xstrs, i)

        ds = datasets.Dataset.from_list(xstrs)

        # model_name = 'bert-base-chinese'
        model_name = r'C:\Users\peter\.cache\huggingface\hub\models--bert-base-chinese\snapshots\8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        print(tokenizer)

        ds_id_with_sub_word = []

        def process(examples):
            # https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__.is_split_into_words
            # is_split_into_words (bool, optional, defaults to False) — Whether or not the input is already pre-tokenized
            # (e.g., split into words). If set to True, the tokenizer assumes the input is already split into words
            # (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.

            # tokenized_inputs = tokenizer(examples['text'], truncation=True, is_split_into_words=True, max_length=512)
            tokenized_inputs = tokenizer(examples['text'], truncation=True, max_length=512, padding=False)
            word_ids = []
            for i, _ in enumerate(examples['text']):
                wids = tokenized_inputs.word_ids(batch_index=i)
                wids_flt = filter(lambda x: x is not None, wids)
                wids_pd = pd.Series(wids_flt, dtype=int)
                dpl = wids_pd.duplicated().astype(int)
                dpl_sum = dpl.sum()
                if dpl_sum:
                    xid = examples['id'][i]
                    xfile_id = examples['file_id'][i]
                    tup = (xfile_id, xid, )
                    print('Found:', tup)
                    ds_id_with_sub_word.append(tup)
                word_ids.append(wids)
            tokenized_inputs['word_ids'] = word_ids
            return tokenized_inputs

        ds_wids = ds.map(process, batched=True, batch_size=100)
        print(ds_wids)
        print(ds_wids[:4])
        print('dataset id with duplicated word_ids:', ds_id_with_sub_word)

    _main()
