# coding:utf-8

"""Classify data pre-deal

Author:
    name: reeseimk
    email: reeseimk@163.com

Homepage: https://gitee.com/reeseimk/mindspore_bert
"""

import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), "./")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), "../")))
import json
import pandas as pd
import mindspore.dataset as ds
import mindspore as ms
import src.generate_mindrecord.tokenization as tokenization

from utils.set_config import SetConfig

class DealDataCLS():
    def __init__(self, data_path: str=""):
        self.label_map = {
            "news_military": 0,
            "news_stock": 1,
            "news_tech": 2,
            "news_sports": 3,
            "news_edu": 4,
            "news_story": 5,
            "news_culture": 6,
            "news_game": 7,
            "news_agriculture": 8,
            "news_travel": 9,
            "news_finance": 10,
            "news_house": 11,
            "news_entertainment": 12,
            "news_world": 13,
            "news_car": 14
        }
        self.currentDir = os.path.abspath(os.path.join(os.path.dirname("__file__"), "./"))
        self.parentDir = os.path.abspath(os.path.join(os.path.dirname("__file__"), "../"))
        self.path = self.get_abs_path(data_path)
        self.inputs, self.input_masks, self.segments, self.labels = [], [], [], []
        self.max_seq_length = 128
        self.set_config = SetConfig()
        self.args, self.base_bert_cfg = self.set_config.get_config()
        self.tokenizer = tokenization.FullTokenizer(vocab_file=self.args.vocab_file_path, do_lower_case=True)

        self._load()
        
    def get_abs_path(self, path_relative: str=""):
        path = os.path.join(self.parentDir, path_relative)
        return path
    
    def convert_single_example(self, text, max_seq_length, label):
        tokens = self.tokenizer.tokenize(text)
        if len(tokens) > max_seq_length - 2:
            tokens = tokens[0:(max_seq_length - 2)]

        all_tokens = []
        segment_ids = []
        all_tokens.append("[CLS]")
        segment_ids.append(0)
        for token in tokens:
            all_tokens.append(token)
            segment_ids.append(0)
        all_tokens.append("[SEP]")
        segment_ids.append(0)

        input_ids = tokenization.convert_tokens_to_ids(self.args.vocab_file_path, all_tokens)
        input_mask = [1] * len(input_ids)
        while len(input_ids) < max_seq_length:
            input_ids.append(0)
            input_mask.append(0)
            segment_ids.append(0)

        input_ids = ms.Tensor(input_ids, dtype=ms.int32)
        input_mask = ms.Tensor(input_mask, dtype=ms.int32)
        segment_ids = ms.Tensor(segment_ids, dtype=ms.int32)
        # input_ids, input_mask, token_type_id, label_ids
        label_ids = ms.Tensor(label, dtype=ms.int32)
        return input_ids, input_mask, segment_ids, label_ids

    def _load(self):
        # 将数据加载至内存
        with open(self.path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            
        for line_item in lines:
            line_item = json.loads(line_item.strip())
            # self.texts.append(line_item['sentence'])
            # self.labels.append([self.label_map[line_item['label_desc']]])
            input_ids, input_mask, segment_ids, label_ids = self.convert_single_example(line_item['sentence'], self.max_seq_length, [self.label_map[line_item['label_desc']]])
            self.inputs.append(input_ids)
            self.input_masks.append(input_mask)
            self.segments.append(segment_ids)
            self.labels.append(label_ids)

    def __getitem__(self, idx):
        return self.inputs[idx], self.input_masks[idx], self.segments[idx], self.labels[idx]

    def __len__(self):
        return len(self.inputs)
    
def load_data_cls(data_path="data/train.json", batch_size: int=4):
    dataset = ds.GeneratorDataset(DealDataCLS(data_path=data_path), column_names=["inputs", "input_masks", "segments", "labels"], shuffle=False)
    dataset = dataset.batch(batch_size, drop_remainder=True)
    return dataset


class DealDataNER():
    def __init__(self, data_path: str="", chunk_path: str="", max_seq_length: int=128):
        self.chunk_path = chunk_path
        self.data_path = data_path
        # self.chunk_dict = {"地名": ["B-Loc", "I-Loc"]}
        self.chunk_dict = None
        # self.label_map = {"B-Loc": 0, "I-Loc": 1, "O": 2}
        self.label_map = None
        self.currentDir = os.path.abspath(os.path.join(os.path.dirname("__file__"), "./"))
        self.parentDir = os.path.abspath(os.path.join(os.path.dirname("__file__"), "../"))
        self.path = self.get_abs_path(data_path)
        self.inputs, self.input_masks, self.segments, self.seq_length, self.labels = [], [], [], [], []
        self.max_seq_length = max_seq_length
        self.set_config = SetConfig()
        self.args, self.base_bert_cfg = self.set_config.get_config()
        self.tokenizer = tokenization.FullTokenizer(vocab_file=self.args.vocab_file_path, do_lower_case=True)
        
        if self.chunk_path:
            self.get_chunk_dict()

        if self.chunk_path and self.data_path:
            self._load()
        
    def get_abs_path(self, path_relative: str=""):
        path = os.path.join(self.parentDir, path_relative)
        return path
    
    def get_chunk_dict(self):
        self.chunk_path = self.get_abs_path(path_relative=self.chunk_path)
        with open(self.chunk_path, 'r', encoding='utf-8') as f:
            self.chunk_dict = json.load(f)
            
        chunk_label_list = []
        for _, v in self.chunk_dict.items():
            chunk_label_list += v
        chunk_label_list.append('O')
        chunk_ids_list = [i for i in range(len(chunk_label_list))]
        self.label_map = dict(zip(chunk_label_list, chunk_ids_list))
        # print(chunk_label_list, chunk_ids_list)
        # print(self.label_map)
    
    def get_ner_label(self, text, labels, max_len):
        res_label = [self.label_map["O"]] * len(text)
        if labels:
            for label in labels:
                # print(label)
                b, i = self.chunk_dict[label[2]]
                b_ids, i_ids = self.label_map[b], self.label_map[i]
                # print(b_ids, i_ids)
                res_label[label[0]] = b_ids
                i_len = label[1] - 1
                if i_len:
                    for i_count in range(1, i_len+1):
                        res_label[label[0]+i_count] = i_ids
        res_label = [self.label_map["O"]] + res_label + [self.label_map["O"]] + [self.label_map["O"]] * (max_len - len(text))
        res_label = res_label[:max_len]
        # print(res_label)
        return res_label
    
    def convert_single_example(self, text, max_seq_length, label):
        tokens = self.tokenizer.tokenize(text)
        if len(tokens) > max_seq_length - 2:
            tokens = tokens[0:(max_seq_length - 2)]

        all_tokens = []
        segment_ids = []
        seq_length = []
        all_tokens.append("[CLS]")
        segment_ids.append(0)
        for token in tokens:
            all_tokens.append(token)
            segment_ids.append(0)
        all_tokens.append("[SEP]")
        segment_ids.append(0)

        input_ids = tokenization.convert_tokens_to_ids(self.args.vocab_file_path, all_tokens)
        input_mask = [1] * len(input_ids)
        if len(input_ids) < max_seq_length:
            seq_length.append(len(input_ids))
        else:
            seq_length.append(max_seq_length)
        while len(input_ids) < max_seq_length:
            input_ids.append(0)
            input_mask.append(0)
            segment_ids.append(0)

        input_ids = ms.Tensor(input_ids, dtype=ms.int32)
        input_mask = ms.Tensor(input_mask, dtype=ms.int32)
        segment_ids = ms.Tensor(segment_ids, dtype=ms.int32)
        seq_length = ms.Tensor(seq_length, dtype=ms.int32)
        # input_ids, input_mask, token_type_id, label_ids
        label = self.get_ner_label(text, label, max_seq_length)
        label_ids = ms.Tensor(label, dtype=ms.int32)
        return input_ids, input_mask, segment_ids, seq_length, label_ids
    
    def _load(self):
        # 将数据加载至内存
        with open(self.path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            
        for line_item in lines:
            line_item = json.loads(line_item.strip())
            # self.texts.append(line_item['sentence'])
            # self.labels.append([self.label_map[line_item['label_desc']]])
            input_ids, input_mask, segment_ids, seq_length, label_ids = self.convert_single_example(line_item['text'], self.max_seq_length, line_item['label'])
            self.inputs.append(input_ids)
            self.input_masks.append(input_mask)
            self.segments.append(segment_ids)
            self.seq_length.append(seq_length)
            self.labels.append(label_ids)
    
    def __getitem__(self, idx):
        return self.inputs[idx], self.input_masks[idx], self.segments[idx], self.seq_length[idx], self.labels[idx]

    def __len__(self):
        return len(self.inputs)
    
def load_data_ner(data_path: str="data/ner_data/ner_test.json", chunk_path: str="data/ner_data/chunk.json", batch_size: int=4):
    dataset = ds.GeneratorDataset(DealDataNER(data_path=data_path, chunk_path=chunk_path), column_names=["inputs", "input_masks", "segments", "seq_length", "labels"], shuffle=False)
    dataset = dataset.batch(batch_size, drop_remainder=True)
    return dataset

if __name__ == "__main__":
    # deal_data_ner = DealDataNER(chunk_path='data/ner_data/chunk.json')
    # deal_data_ner.get_chunk_dict()
    # print(deal_data_ner.chunk_dict)
    
    # dataset_train = load_data(data_path="data/train.json")
    batch_size = 4
    # dataset_dev = load_data_cls(data_path="data/dev_1.json", batch_size=4)
    # for item in dataset_dev:
    #     print(item)
        
    dataset_ner = load_data_ner(data_path="data/ner_data/ner_test.json", chunk_path="data/ner_data/chunk.json", batch_size=4)
    for item in dataset_ner:
        print(item)
