import os
import json
import pandas as pd
from app.config.config import Config
from datasets import Dataset, DatasetDict

class DataProcess:
    def __init__(self):
        self.config = Config()

    def build_vocab(self):
        """
        处理json文件，读取key，并存入txt文件
        :return:
        """
        chat_to_id = json.load(open(self.config.CHAT_TO_ID_PATH, mode='r', encoding='utf-8'))
        unique_words = list(chat_to_id.keys())[1:-1]
        unique_words.insert(0, '[UNK]')
        unique_words.insert(0, '[PAD]')
        # 把数据写入到文本中
        with open(self.config.BILSTM_CRF_VOCAB_AIDOC_PATH, 'w', encoding="utf-8") as file:
            for word in unique_words:
                file.write(word + '\n')

    def load_corpus(self):
        """
        加载语料库，转成csv文件
        :return:
        """
        self.process_text(self.config.TRAIN_TXT_PATH, 'train')
        self.process_text(self.config.VALIDATE_TXT_PATH, 'valid')

    def process_text(self, path, type):
        """
        加载语料库，转成csv文件
        :param path: 训练集路径
        :param type: 文件名称
        :return:
        """
        data_inputs, data_labels = [], []
        for line in open(path, mode='r', encoding='utf-8'):
            # print(line)
            data = json.loads(line)
            # print(data)
            data_inputs.append(' '.join(data['text']))
            data_labels.append(' '.join(data['label']))
        data_df = pd.DataFrame()
        data_df['data_inputs'] = data_inputs
        data_df['data_labels'] = data_labels
        data_df.to_csv(os.path.join(self.config.NER_DATA_PATH, type + '.csv'))
        print(type, '数据量:', len(data_df))

    def encode_label(self):
        """
        把样本中的标签进行编码
        把数据写入到dataset格式中
        :return:
        """
        train_data = pd.read_csv(self.config.TRAIN_CSV_PATH, index_col="Unnamed: 0")
        valid_data = pd.read_csv(self.config.VALID_CSV_PATH, index_col="Unnamed: 0")
        train_dataset = Dataset.from_pandas(train_data)
        valid_dataset = Dataset.from_pandas(valid_data)
        corpus_data = DatasetDict({'train': train_dataset, 'valid': valid_dataset})
        corpus_data = corpus_data.map(self.data_handler,
                                      input_columns=['data_labels', 'data_inputs'], batched=True)
        # 数据存储
        corpus_data.save_to_disk(self.config.BILSTM_CRF_DATA_AIDOC)

    def data_handler(self, data_labels, data_inputs):
        """
        将标签数据转换为索引表示
        :param data_labels:
        :param data_inputs:
        :return:
        """
        label_to_index = {"O": 0, "B-dis": 1, "I-dis": 2, "B-sym": 3, "I-sym": 4}
        data_label_ids = []
        for labels in data_labels:
            label_ids = []
            for label in labels.split():
                label_ids.append(label_to_index[label])
            data_label_ids.append(label_ids)
        return {'data_labels': data_label_ids, 'data_inputs': data_inputs}

