# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.9.2
# @Software:  PyCharm
# @FileName:  handle_data_bert_inputs.py
# @CTime:     2021/5/23 12:09   
# @Author:    Haiyang Yu
# @Email:     yuys0602@163.com
# @UTime:     2021/5/23 12:09
#
# @Description:
#     xxx
#     xxx
#
import os
import codecs
import json
import logging
from typing import List, Dict, Optional
from transformers import AutoTokenizer

logger = logging.getLogger(__name__)


def get_tag_map():
    out = {}
    with codecs.open('tags.txt', 'r', 'utf-8') as f:
        for i, l in enumerate(f):
            out[l.strip()] = i
    return out

def get_tagid_map():
    out = {}
    with codecs.open('tags.txt', 'r', 'utf-8') as f:
        for i, l in enumerate(f):
            out[i] = l.strip()
    return out

def main(input_file, output_file):
    tokenizer = AutoTokenizer.from_pretrained('hfl/rbt3')
    tag_map = get_tag_map()
    outputs = []
    with codecs.open(input_file, 'r', 'utf-8') as read_file:
        lines = read_file.readlines()
        for each_line in lines:
            line = json.loads(each_line)
            ids = [101] + tokenizer.convert_tokens_to_ids(line['tokens']) + [102]
            tags = [0] + [tag_map[i] for i in line['tags']] + [0]
            assert len(ids) == len(tags), 'this sent len is not equal with tags'
            output = {
                'ids': ids,
                'tags': tags,
            }
            outputs.append(output)

    with codecs.open(output_file, 'w', 'utf-8') as f:
        f.write(os.linesep.join([
            json.dumps(l, ensure_ascii=False) for l in outputs
        ]))


if __name__ == '__main__':
    # main('test.original.txt', 'test.txt')
    print(get_tagid_map())