from typing import List

import jieba
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from torch.utils.data import random_split
from transformers import BertTokenizer
from zhon.hanzi import punctuation

import torch
import numpy as np
import jieba.posseg as psg

import logging
jieba.setLogLevel(logging.INFO)

pos_tags = {
    '[PAD]': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6,
    'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12, 'm': 13,
    'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19,
    't': 20, 'u': 21, 'v': 22, 'w': 23, 'x': 24, 'y': 25, 'z': 26,
    '[CLS]': 27, '[SEP]': 28
}
pos_num_tags = len(pos_tags)

# R: Reason, C: Consequence
out_tags = {
    '[PAD]': 0, 'O': 1, 'B-RC': 2, 'I-RC': 3, 'B-RP': 4, 'I-RP': 5,
    'B-C': 6, 'I-C': 7, 'B-CC': 8, 'I-CC': 9, 'B-CP': 10, 'I-CP': 11,
    '[CLS]': 12, '[SEP]': 13
}
out_num_tags = len(out_tags)
out_id2tag = dict([(ix, tag) for tag, ix in out_tags.items()])


def getPartOfSpeech(s: str, seq_len: int) -> List[int]:
    pos_list, cnt = [0] * seq_len, 1
    pos_list[0] = pos_tags['[CLS]']
    for word, pos in psg.cut(s):

        for _ in word:
            pos_list[cnt] = pos_tags[pos[0].lower()]
            cnt = cnt + 1
    pos_list[cnt] = pos_tags['[SEP]']
    return pos_list


def get_1hot(pos_list: List[int]) -> List[List[int]]:
    r"""
        Use it with getPartOfSpeech
    """
    eye = np.eye(pos_num_tags)
    return eye[pos_list, :]


def batchPosProcessing(texts: List[str], seq_len) -> torch.Tensor:
    processed = []
    for text in texts:
        pos_list = getPartOfSpeech(text, seq_len)
        processed.append(get_1hot(pos_list))
    return torch.tensor(processed, dtype=torch.float)


class Tokenizer(object):
    def __init__(self, bert_path):
        self.tokenizer = BertTokenizer.from_pretrained(bert_path)

    @staticmethod
    def is_chinese(ch) -> bool:
        return '\u4e00' <= ch <= '\u9fa5' or ch in punctuation

    def fit(self, texts, seq_len) -> torch.Tensor:
        outputs = []
        for _text in texts:
            text = ""
            for ch in _text:
                if self.is_chinese(ch):
                    text += ch
                else:
                    text += '[UNK]'
            tokens = self.tokenizer.encode(text)
            if seq_len - len(tokens) > 0:
                k = seq_len - len(tokens)
                tokens += [0] * k
            outputs.append(tokens)
        return torch.LongTensor(outputs)


def getDataLoader(raw_input1, raw_input2, raw_targets, batch_size=32, split_required=True, split_ratio=0.8):
    if split_required:
        dataset = TensorDataset(raw_input1, raw_input2, raw_targets)
        train_len = int(split_ratio * len(dataset))
        length = len(dataset)
        valid_len = length - train_len
        train_dataset, valid_dataset = random_split(dataset, (train_len, valid_len))
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)
        return train_loader, valid_loader
    else:
        dataset = TensorDataset(raw_input1, raw_input2, raw_targets)
        data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
        return data_loader


def generate_tags_from_txt(txt_path: str, seq_len: int) -> (List[str], torch.Tensor):
    batch_text, batch_tags = [], []
    with open(txt_path, 'r', encoding='utf-8') as f:
        while f.readline() != '':
            text = f.readline()[4:-1]
            r_cores = f.readline()[9:].split()
            r_preds = f.readline()[10:].split()
            centers = f.readline()[4:].split()
            c_cores = f.readline()[9:].split()
            c_preds = f.readline()[10:].split()
            if len(text) > seq_len - 2: continue
            length = len(text)
            tags = ['[CLS]'] + ['O'] * length + \
                   ['[SEP]'] + ['[PAD]'] * (seq_len - length - 2)
            pos = -1
            for r_core in r_cores:
                pos = text.find(r_core, pos + 1)
                if pos == -1: continue
                tags[pos+1] = 'B-RC'
                for i in range(pos + 1, pos + len(r_core)):
                    tags[i+1] = 'I-RC'
            pos = -1
            for r_pred in r_preds:
                pos = text.find(r_pred, pos + 1)
                if pos == -1: continue
                tags[pos+1] = 'B-RP'
                for i in range(pos + 1, pos + len(r_pred)):
                    tags[i+1] = 'I-RP'
            pos = -1
            for center in centers:
                pos = text.find(center, pos + 1)
                if pos == -1: continue
                tags[pos+1] = 'B-C'
                for i in range(pos + 1, pos + len(center)):
                    tags[i+1] = 'I-C'
            pos = -1
            for c_core in c_cores:
                pos = text.find(c_core, pos + 1)
                if pos == -1: continue
                tags[pos+1] = 'B-CC'
                for i in range(pos + 1, pos + len(c_core)):
                    tags[i+1] = 'I-CC'
            pos = -1
            for c_pred in c_preds:
                pos = text.find(c_pred, pos + 1)
                if pos == -1: continue
                tags[pos+1] = 'B-CP'
                for i in range(pos + 1, pos + len(c_pred)):
                    tags[i+1] = 'I-CP'
            batch_text.append(text)
            batch_tags.append(tags)
        f.close()
    batch_tags = batch_tags2tensor(batch_tags)
    return batch_text, batch_tags


def get_out_tags_id(tags: List[str]):
    ids = [0] * len(tags)
    for ix, tag in enumerate(tags):
        ids[ix] = out_tags[tag]
    return ids


def batch_tags2tensor(batch_tags: List[List[str]]):
    targets = []
    for tags in batch_tags:
        ids = get_out_tags_id(tags)
        targets.append(ids)
    return torch.LongTensor(targets)

# if __name__ == "__main__":
#     seq_len = 144
#     text, targets = generate_tags_from_txt('../debug.txt', seq_len)
#     text = text[0]
#     targets = targets.numpy().tolist()[0]
#     targets = targets[1:len(text) + 1]
#     for i in range(len(text)):
#         print(text[i], targets[i])