import os
import torch
from torch.utils.data import Dataset


# 读取数据
def get_data(data_dir, file):
    assert file in ['train', 'dev', 'test']

    with open(os.path.join(data_dir, file + ".char.bmes"), "r", encoding="utf-8") as f:
        data_all = f.read().split("\n")

    # 存放所有的字和标签
    word_all = []
    tag_all = []

    # 存放一句话的字和标签
    word_list = []
    tag_list = []

    for data in data_all:
        if data == "":  # 一句话结束
            word_all.append(word_list)
            tag_all.append(tag_list)
            word_list = []
            tag_list = []
        else:
            word, tag = data.split(" ")
            word_list.append(word)
            tag_list.append(tag)
    # 由长到短重新排列
    word_all = sorted(word_all, key=lambda x: len(x), reverse=True)
    tag_all = sorted(tag_all, key=lambda x: len(x), reverse=True)
    return word_all, tag_all


def build_corpus(file, make_vocab=True, data_dir="dataset"):
    word_all, tag_all = get_data(data_dir, file)

    # 若 make_vocab = True，返回word2id, tag2id
    if make_vocab:
        word2id = build_map(word_all)
        tag2id = build_map(tag_all)
        word2id['<UNK>'] = len(word2id)     # 未知字符统一记为<UNK>
        word2id['<PAD>'] = len(word2id)

        tag2id['<PAD>'] = len(tag2id)
        return word_all, tag_all, word2id, tag2id
    else:
        return word_all, tag_all


# 建立映射
def build_map(lists):
    maps = {}
    for list_ in lists:
        for e in list_:
            if e not in maps:
                maps[e] = len(maps)
    return maps


# 构建数据集
class NERDataset(Dataset):
    def __init__(self, words, tags, word_2_index, tag_2_index):
        self.words = words
        self.tags = tags
        self.word_2_index = word_2_index
        self.tag_2_index = tag_2_index

    def __getitem__(self, index):
        word = self.words[index]    # 单条数据
        tag = self.tags[index]

        word_index = [self.word_2_index.get(i, self.word_2_index["<UNK>"]) for i in word]
        tag_index = [self.tag_2_index[i] for i in tag]

        return word_index, tag_index

    def __len__(self):
        assert len(self.words) == len(self.tags)
        return len(self.words)

    # 填充batch data
    def pro_batch_data(self, batch_data):
        words = []
        tags = []
        batch_lens = []
        for word, tag in batch_data:
            words.append(word)
            tags.append(tag)
            batch_lens.append(len(word))
        batch_max_len = max(batch_lens)
        words = [i + [self.word_2_index["<PAD>"]] * (batch_max_len - len(i)) for i in words]
        tags = [i + [self.tag_2_index["<PAD>"]] * (batch_max_len - len(i)) for i in tags]

        device = "cuda:0" if torch.cuda.is_available() else "cpu"
        words = torch.tensor(words, dtype=torch.long, device=device)
        tags = torch.tensor(tags, dtype=torch.long, device=device)
        return words, tags, batch_lens
