# -*- coding: utf-8 -*-

import os
import sys
import torch
import numpy as np
import jieba
from typing import Dict, Any

# special char for bert.
CLS, SEP = '[CLS]', '[SEP]'


def __to_tensor(entities, tokens, types, raw_entities, pad_idx, device) -> tuple:
    entities = torch.LongTensor(entities)
    if tokens is not None:
        tokens = torch.LongTensor(tokens)
    if types is not None:
        types = torch.LongTensor(types)
    masks = (entities != pad_idx)
    seq_len = masks.sum(dim=1).long()
    _, sorted_index = torch.sort(seq_len, dim=0, descending=True)
    entities = entities[sorted_index].to(device)
    if tokens is not None:
        tokens = tokens[sorted_index].to(device)
    if types is not None:
        types = types[sorted_index].to(device)
    masks = masks[sorted_index].to(device)
    if raw_entities is not None:
        raw_entities = raw_entities[sorted_index]
    return entities, tokens, types, masks, raw_entities


def __my_split(line):
    data = line.split()
    return (" ".join(data[:-1]), data[-1]) if len(data) > 2 else data


def training_iterator(train_file: str,
                      batch_size: int,
                      max_len: int,
                      char_vocabs: Dict[str, int],
                      word_vocabs: Dict[str, int],
                      type_vocabs: Dict[str, int],
                      unk_idx: int = 1,
                      pad_idx: int = 0,
                      is_for_bert: bool = False,
                      device: torch.device = torch.device("cuda:0")):
    entities, tokens, types, raw = [], [], [], []
    count = 0
    tokenizer = lambda x: jieba.cut(x)

    with open(train_file, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            raw_entity, label = __my_split(line)
            entity = [char_vocabs.get(char, unk_idx) for char in raw_entity]
            token = [word_vocabs.get(tk, unk_idx) for tk in tokenizer(raw_entity) for _ in tk]
            if len(entity) == 0:
                continue
            if is_for_bert:
                # bert 模型需要在句子前面添加特殊字符 [CLS].
                # 单句还需要在末尾加上 [SEP]
                entity = [char_vocabs.get(CLS)] + entity
                token = [word_vocabs.get(CLS)] + token

            if len(entity) < max_len:
                entity.extend([pad_idx] * (max_len - len(entity) - 1))
                token.extend([pad_idx] * (max_len - len(token) - 1))
                entity.extend([char_vocabs.get(SEP)]) if is_for_bert else entity.extend([pad_idx])
                token.extend([word_vocabs.get(SEP)]) if is_for_bert else token.extend([pad_idx])
            else:
                # 可能后面的后缀信息更有用
                entity = entity[(len(entity) - max_len):] if not is_for_bert \
                    else ([char_vocabs.get(CLS)] + entity[(len(entity) - max_len + 2):] + [char_vocabs.get(SEP)])
                token = token[(len(token) - max_len):] if not is_for_bert \
                    else ([word_vocabs.get(CLS)] + token[(len(token) - max_len + 2):] + [word_vocabs.get(SEP)])
            label = type_vocabs.get(label)

            entities.append(entity)
            tokens.append(token)
            types.append(label)
            raw.append(raw_entity)
            count += 1

            if count == batch_size:
                data = __to_tensor(entities, tokens, types, np.array(raw), pad_idx, device)
                entities, tokens, types, raw = [], [], [], []
                count = 0
                yield data
        f.seek(0, 0)
        if count > 0:
            data = __to_tensor(entities, tokens, types, np.array(raw), pad_idx, device)
            yield data


def testing_iterator(test_file: str,
                     batch_size: int,
                     max_len: int,
                     char_vocabs: Dict[str, int],
                     word_vocabs: Dict[str, int],
                     unk_idx: int = 1,
                     pad_idx: int = 0,
                     is_for_bert: bool = False,
                     device: torch.device = torch.device("cuda:0")):
    entities, tokens, raw = [], [], []
    count = 0
    tokenizer = lambda x: jieba.cut(x)

    with open(test_file, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            entity = [char_vocabs.get(char, unk_idx) for char in line]
            token = [word_vocabs.get(tk, unk_idx) for tk in tokenizer(line) for _ in tk]
            if len(entity) == 0:
                continue
            if is_for_bert:
                entity = [char_vocabs.get(CLS)] + entity
                token = [word_vocabs.get(CLS)] + token
            if len(entity) < max_len:
                entity.extend([pad_idx] * (max_len - len(entity)))
                token.extend([pad_idx] * (max_len - len(token)))
            else:
                entity = entity[(len(entity) - max_len):] if not is_for_bert \
                    else [char_vocabs.get(CLS)] + entity[(len(entity) - max_len + 1):]
                token = token[(len(token) - max_len):] if not is_for_bert \
                    else [word_vocabs.get(CLS)] + token[(len(token) - max_len + 1):]

            raw.append(line)
            entities.append(entity)
            tokens.append(token)
            count += 1

            if count == batch_size:
                data = __to_tensor(entities, tokens, None, np.array(raw), pad_idx, device)
                entities, tokens, raw = [], [], []
                count = 0
                yield data
        if count > 0:
            data = __to_tensor(entities, tokens, None, np.array(raw), pad_idx, device)
            yield data  # 这里用 `return data` 会导致返回不了数据


class TrainIteratorWrapper(object):

    def __init__(self, *args):
        self.args = args

    def __iter__(self):
        args = self.args
        iter = training_iterator(*args)
        return iter

    def __next__(self):
        return self.__iter__().__next__()


class TestIteratorWrapper(object):

    def __init__(self, *args):
        self.args = args

    def __iter__(self):
        args = self.args
        iter = testing_iterator(*args)
        return iter

    def __next__(self):
        return self.__iter__().__next__()


def load_vocabs(vocab_file: str) -> Dict[str, int]:
    vocabs = {}
    count = 0
    with open(vocab_file, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            vocabs[line] = count
            count += 1
        pass
    return vocabs


def reverse_k_v(vocabs: Dict[Any, Any]):
    new_vocabs = {}
    for key, value in vocabs.items():
        new_vocabs[value] = key
    return new_vocabs


if __name__ == "__main__":
    if len(sys.argv) < 6:
        print("python3 load_data.py train_file test_file char_vocabs word_vocabs type_vocabs")
        exit(0)

    is_for_bert = False
    UNK, PAD = ("<UNK>", "<PAD>") if not is_for_bert else ("[UNK]", "[PAD]")

    train_file = sys.argv[1]
    test_file = sys.argv[2]
    char_vocabs_file = sys.argv[3]
    word_vocabs_file = sys.argv[4]
    type_vocabs_file = sys.argv[5]

    char_vocabs = load_vocabs(char_vocabs_file)
    word_vocabs = load_vocabs(word_vocabs_file)
    type_vocabs = load_vocabs(type_vocabs_file)
    idx2char = reverse_k_v(char_vocabs)

    unk_idx = char_vocabs.get(UNK)
    pad_idx = char_vocabs.get(PAD)

    epochs = 5
    batch_size = 128
    max_len = 16
    device = torch.device("cuda:0")

    train_iter = TrainIteratorWrapper(train_file, batch_size, max_len, char_vocabs, word_vocabs, type_vocabs, unk_idx, pad_idx, is_for_bert, device)
    test_iter = TestIteratorWrapper(test_file, batch_size, max_len, char_vocabs, word_vocabs, unk_idx, pad_idx, is_for_bert, device)

    count = 0
    for epoch in range(1, epochs + 1):
        print("[Epoch]: %d" % epoch)
        for data in train_iter:
            sent, token, tags, masks, _ = data
            count += 1
            print(epoch, " : ", count, " : ", sent)

    print('\n\n')
    for data in test_iter:
        sent, token, _, masks, raw = data
        print(sent)
