#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/3/21
# @Author  : geekhch
# @Email   : geekhch@qq.com
# @Desc    : 封装数据集, 使用Dataloader读数据

import os

from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_sequence
import torch
import json, re
from myVocab import vocab
from utils import logger
from myPath import *
import numpy as np


def tags2ids(tags: list) -> dict:
    '''tags: 分类标签列'''
    idmap = {}
    for i in range(len(tags)):
        idmap[tags[i]] = i
    return idmap

class CLUE(Dataset):
    """CLUENER2020
    原有标签类别：
        地址（address），书名（book），公司（company），游戏（game），政府（goverment）
        电影（movie），姓名（name），组织机构（organization），职位（position），景点（scene）
    分析标注数据发现细粒度分类不合理，也为了简化分类，重新整合分类：
        地址（address）：地址（address）、景点（scene）
        作品（composition）：书名（book）、游戏（game）、电影（movie）
        人物（person）:姓名（name）、职位（position）
        组织（organization）:公司（company）、政府（goverment）、组织机构（organization）
    """

    raw_tags = ['<begin>', '<end>', 'B-address', 'I-address', 'B-book', 'I-book', 'B-company', 'I-company', 'B-game',
            'I-game', 'B-government', 'I-government', 'B-movie', 'I-movie', 'B-name', 'I-name',
            'B-organization', 'I-organization', 'B-position', 'I-position', 'B-scene', 'I-scene', 'O']
    raw2tags = {'address':'address', 'scene':'address',
                'book':'composition', 'game':'composition', 'movie':'composition',
                'company':'organization', 'government':'organization', 'organization':'organization',
                'name':'person'}

    tags = ['<begin>', '<end>', 'B-address', 'I-address', 'B-composition', 'I-composition',
            'B-person', 'I-person', 'B-organization', 'I-organization', 'O']
    tag2id = tags2ids(tags)



    def __init__(self, data_path):
        logger.info(f"load dataset CLUE: {data_path}")
        with open(data_path, encoding='utf8') as fp:
            content = fp.read().strip().split('\n')
            self.corpus = list(map(json.loads, content))

        types = list(set([type[2:] for type in self.tags if 'B' in type]))
        types = dict(zip(types, range(len(types))))
        json.dump(types, open(f'{DULE_DIR}/tag2id.json', 'w'), ensure_ascii=False)


    def __len__(self):
        return len(self.corpus)

    def __getitem__(self, idx):
        '''
        这里使用char-based
        return: (list)sentence in word-id seq,
                (numpy array)numpy one-hot, shape(seq_len, tags_num),
                raw text
        '''
        sample = self.corpus[idx]
        text_ids = CLUE.sentence2ids(sample['text'])
        label = ['O'] * len(text_ids)
        labels = sample['label']
        for type, content in labels.items():
            type = CLUE.raw2tags.get(type)
            if type is None:
                continue
            for name, locs in content.items():
                for loc in locs:
                    label[loc[0]] = 'B-' + type
                    for i in range(loc[0] + 1, loc[1] + 1):
                        label[i] = 'I-' + type
        label_ids = torch.tensor([self.tag2id[tag] for tag in label])
        return text_ids, label_ids, sample['text']

    @staticmethod
    def sentence2ids(sentence):
        return torch.tensor(list(map(vocab.word2index, sentence)), dtype=torch.int64)

    @staticmethod
    def collate_fn(data):
        # 此函数是给Dataloader用的
        '''
        return: (id形式的句子bacth, 每个句子的长度), 标签的one-hot向量
        '''
        data.sort(key=lambda x: len(x[0]), reverse=True)
        lengths = torch.tensor([len(s[0]) for s in data])
        sents = [sample[0] for sample in data]
        labels = [sample[1] for sample in data]
        rawtext = [sample[2] for sample in data]
        sents = pad_sequence(sents, batch_first=True, padding_value=vocab.word2index('<pad>'))
        labels = pad_sequence(labels, batch_first=True, padding_value=CLUE.tag2id['<end>'])
        return (sents, lengths), labels, rawtext

    @staticmethod
    def pred2tags(softmax: torch.Tensor):
        '''
        :param softmax: the output tensor of model as softmax probilities
        :param taglabels: list
        :return: the predicted tag
        '''
        return CLUE.tags[softmax.argmax().item()]

    @staticmethod
    def get_loader(mode='train', batch_size=64):
        if mode != 'train':
            return DataLoader(CLUE(f'{CLUENER_DIR}/train.json'),
                       collate_fn=DUIE.collate_fn,
                       batch_size=32,
                       num_workers=2,
                       shuffle=True)
        else:
            return DataLoader(CLUE(f'{CLUENER_DIR}/dev.json'),
                              collate_fn=DUIE.collate_fn,
                              batch_size=batch_size,
                              num_workers=4,
                              shuffle=True)

class DUIE(Dataset):
    spo_info = json.load(open(f'{DULE_DIR}/raw_data/label2relation.json'))
    tags = spo_info['BIO-sub']
    tag2id = tags2ids(tags)

    def __init__(self, data_path=f'{DULE_DIR}/raw_data/dev_data.json'):
        logger.info(f"load dataset DUIE for NER: {data_path}")
        with open(data_path, encoding='utf8') as fp:
            content = fp.read().strip().split('\n')
            self.corpus = list(map(json.loads, content))

        # types = list(set([type[2:] for type in self.tags if 'B' in type]))
        # types = dict(zip(types, range(len(types))))
        # json.dump(types, open(f'{DULE_DIR}/tag2id.json', 'w'), ensure_ascii=False)

    def __len__(self):
        return len(self.corpus)

    def __getitem__(self, item):
        sample = self.corpus[item]
        text = sample['text']
        spo_list = sample['spo_list']
        sent_ids = torch.tensor(list(map(vocab.word2index, text)), dtype=torch.int64)
        label = ['O'] * len(sent_ids)
        for spo in spo_list:
            sub = spo['subject']
            type = spo['subject_type']
            start = 0
            if sub == '':
                continue
            while True:
                loc = text.find(sub, start)
                if loc == -1:
                    break
                begin = loc
                end = loc+len(sub)
                start = end

                label[begin] = 'B-' + type
                for i in range(begin+1, end):
                    label[i] = 'I-' + type
        label_ids = torch.tensor([self.tag2id[tag] for tag in label])
        return sent_ids, label_ids, sample['text']

    @staticmethod
    def collate_fn(data):
        # 此函数是给Dataloader用的
        '''
        return: (id形式的句子bacth, 每个句子的长度), 标签的one-hot向量
        '''
        data.sort(key=lambda x: len(x[0]), reverse=True)
        lengths = torch.tensor([len(s[0]) for s in data])
        sents = [sample[0] for sample in data]
        labels = [sample[1] for sample in data]
        rawtext = [sample[2] for sample in data]
        sents = pad_sequence(sents, batch_first=True, padding_value=vocab.word2index('<pad>'))
        labels = pad_sequence(labels, batch_first=True, padding_value=DUIE.tag2id['<end>'])
        return (sents, lengths), labels, rawtext

    @staticmethod
    def pred2tags(softmax: torch.Tensor):
        '''
        :param softmax: the output tensor of model as softmax probilities
        :param taglabels: list
        :return: the predicted tag
        '''
        return CLUE.tags[softmax.argmax().item()]

    @staticmethod
    def get_loader(mode='train', batch_size=64):
        if mode != 'train':
            return DataLoader(DUIE(f'{DULE_DIR}/raw_data/dev_data.json'),
                       collate_fn=DUIE.collate_fn,
                       batch_size=32,
                       num_workers=2,
                       shuffle=True)
        else:
            return DataLoader(DUIE(f'{DULE_DIR}/raw_data/train_data.json'),
                              collate_fn=DUIE.collate_fn,
                              batch_size=batch_size,
                              num_workers=2,
                              shuffle=True)

if __name__ == '__main__':
    "test code"
    # dataset = CLUE(path.join(CLUENER_DIR, 'train.json'))
    dataset = DUIE()
    dataloader = DataLoader(dataset, 2, collate_fn=CLUE.collate_fn)
    for batch in dataloader:
        print(batch)
