#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 18-10-31
import os

import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, train_test_split

_PAD = 0
_UNK = 1


def read_TSVdata(path):
    """
    读取一个chat detection数据集文件
    :param path: 数据集位置
    :return: 句子和标签
    """
    labels = []
    sentences = []
    with open(path, 'r', encoding='utf-8') as fp:
        for line in fp:
            label, sentence = line.split('\t')
            labels.append(label)
            sentences.append(sentence)
    return labels, sentences


def gather_data(data_dir):
    """
    将chat detection的分组数据组合
    :param data_dir: 数据集位置
    :return: 组合后的句子和标签
    """
    all_label = []
    all_sent = []
    for i in range(10):
        path = os.path.join(data_dir, 'dataset%d.txt' % i)
        l, s = read_TSVdata(path)
        all_label.extend(l)
        all_sent.extend(s)
    return all_sent, all_label


def get_kfold_train_dev_test(sents, labels, fold=10):
    """
    返回生成器
    :param sents: 数据集的句子
    :param labels: 数据集的标签
    :param fold: 折的数量
    :return: 迭代每一折的生成器
    """
    X = np.array(sents)
    y = np.array(labels)
    skf = StratifiedKFold(n_splits=fold, shuffle=True, random_state=1024)
    for train_dev_index, test_index in skf.split(X, y):
        X_train_dev, X_test = X[train_dev_index], X[test_index]
        y_train_dev, y_test = y[train_dev_index], y[test_index]
        X_train, X_dev, y_train, y_dev = train_test_split(X_train_dev, y_train_dev, test_size=1 / 9, random_state=1024)
        yield X_train, X_dev, X_test, y_train, y_dev, y_test


def generate_vocab(sentences):
    """
    将sentences中的词转化为token


    :param sentences: 文本列表, 如：['aaa bbb ccc', 'ddd eee']
    :return: (vocab, vocab_rev), 其中vocab为每个元素词的列表，vocab_rev为key为词，value为索引号的字典
    """
    word_set = set()
    for line in sentences:
        for word in line.strip().split(' '):
            word_set.add(word)
    vocab = ['_PAD', '_UNK']  # 第一个为PAD，使用pad_sequences的时候更方便
    vocab.extend(sorted(word_set))
    vocab_rev = dict([(vocab[i], i) for i in range(len(vocab))])

    return vocab, vocab_rev


def tokenize_sentences(sentences, vocab_rev) -> list:
    """
    将句子转化为ids
    :param sent: 一批句子
    :param vocab_rev: 词汇字典
    :return: 标签化后的句子
    """
    ret = []
    for sent in sentences:
        ids = []
        for word in sent.strip().split(' '):
            try:
                ids.append(vocab_rev[word])
            except KeyError:
                ids.append(_UNK)
        ret.append(ids)
    return ret


def read_data2DataFrame(path):
    """
    读取文件，返回pandas的 DataFrame 对象,columns为['label', 'sentence']
    :param path:
    :return:
    """
    df = pd.read_csv(path, header=None, sep='\t')
    df.columns = ['label', 'sentence']
    return df


def group_length(sentence):
    """
    根据句子长度添加长度标签，方便分组
    :param sentence:
    :return:
    """
    length = len(sentence.split())
    if length <= 5:
        ret = 1
    elif length <= 10:
        ret = 2
    elif length <= 15:
        ret = 3
    elif length <= 20:
        ret = 4
    else:
        ret = 5
    return ret


def train_dev_test_split(X, y, train_size=0.8):
    """
    将数据以8:1:1分成train，dev，test

    :return: X_train, X_dev, X_test, y_train, y_dev, y_test
    """
    random_state = np.random.RandomState(1024)
    X_train, X_dev_test, y_train, y_dev_test = train_test_split(X, y, train_size=train_size, random_state=random_state)
    X_dev, X_test, y_dev, y_test = train_test_split(X_dev_test, y_dev_test, train_size=0.5, random_state=random_state)
    return X_train, X_dev, X_test, y_train, y_dev, y_test


def generate_train_dev_test(path):
    """
    封装函数
    主要流程：
        读取文件转换成DataFrame对象
        数据按照长度分组
        将每一组扽成训练 开发 测试集
        集成每组数据，返回集成后的训练 开发 测试集
    :param path: chat detection文件路径
    :return:
    """
    X_train = []
    y_train = []
    X_dev = []
    y_dev = []
    X_test = []
    y_test = []

    df = read_data2DataFrame(path)
    df.columns = ['label', 'sentence']
    df['length'] = df['sentence'].apply(group_length)
    grouped = df.groupby('length')
    for group in grouped:
        X_train_, X_dev_, X_test_, y_train_, y_dev_, y_test_ = train_dev_test_split(group[1]['sentence'],
                                                                                    group[1]['label'])
        X_train.extend(X_train_)
        X_dev.extend(X_dev_)
        X_test.extend(X_test_)
        y_train.extend(y_train_)
        y_dev.extend(y_dev_)
        y_test.extend(y_test_)
    assert len(X_train) + len(X_dev) + len(X_test) == len(df)
    return X_train, X_dev, X_test, y_train, y_dev, y_test


def POS2id(tag_dir: str, POS_dict: dict, target_dir: str):
    """
    将字符串的POS转换为id，并以npy文件的形式存放
    :param tag_dir: 放置POS的文件夹
    :param POS_dict: POS的字典
    :param target_dir: npy文件存放的文件夹
    :return:
    """
    for file in os.listdir(tag_dir):
        if file.endswith('.txt'):
            # print(file)
            with open(os.path.join(tag_dir, file), 'r', encoding='utf-8') as fp:
                collection = []
                for line in fp:
                    tmp = []
                    for pos in line.strip().split():
                        tmp.append(POS_dict.get(pos, 0))
                    collection.append(tmp)
            # print(collection[:10])
            np.save(os.path.join(target_dir, 'POS' + file.replace('.txt', '')), collection)


def convert_to_bert_POS(word: str, word_tag: str, bert_char: str):
    """
    将word级别的词性标注转换成适应bert（char级别）的词性标注。
    由于bert词是用##作为拼接符号，所以要对其进行特殊处理，[UNK]为超出bert词汇表的词，也要特殊处理
    :param word: word级别分词后的句子
    :param word_tag: word级别的词性标注
    :param bert_char: bert模型分词后的句子
    :return: 与bert_char个数相同的char级别的词性标注
    """
    word, word_tag, bert_char = word.lower().split(), word_tag.split(), bert_char.split()
    char_tag = []
    i = 0
    j = 0
    assert len(word) == len(word_tag)
    word_len = len(word)
    char_len = len(bert_char)
    try:
        # 每个外层循环，word就前进一个
        while i < word_len and j < char_len:
            token = ''
            c = 0
            word_p = 0
            while i < word_len and j < char_len and (token in word[i] or bert_char[j] == '[UNK]'):
                if token == word[i]:
                    break
                else:
                    if bert_char[j].startswith('##'):
                        token += bert_char[j][2:]
                        word_p += len(bert_char[j][2:])
                    elif bert_char[j] == '[UNK]':
                        token = word[i][:word_p + 1]
                        word_p = len(token)
                    else:
                        token += bert_char[j]
                        word_p += len(bert_char)
                    c += 1
                    j += 1
            for k in range(c):
                if k == 0:
                    char_tag.append('B-' + word_tag[i])
                else:
                    char_tag.append('I-' + word_tag[i])
            i += 1
        assert len(bert_char) == len(char_tag)
    except Exception as e:
        print(word)
        print(bert_char)
        print(char_tag)
        e.with_traceback()
    return char_tag


def convert_2_bert_POS_file(word_seg_dir, word_tag_dir, bert_seg_dir, dest_dir):
    """
    入口函数，主要是遍历文件夹，处理后生成bert分词对应的POS
    :param word_seg_dir: 存放分词后文件的地方（内含十折和测试集）
    :param word_tag_dir: 存放词级别POS文件的地方（内含十折和测试集）
    :param bert_seg_dir: 存放BERT分词（char级别）后文件的地方（内含十折和测试集）
    :param dest_dir: 目标文件夹，用来存放目标文件，也就是bert分词对应的POS
    :return:
    """

    for file in os.listdir(word_seg_dir):
        word_seg_f = os.path.join(word_seg_dir, file)
        word_tag_f = os.path.join(word_tag_dir, file)
        bert_seg_f = os.path.join(bert_seg_dir, file.replace('.txt', '_token.txt'))
        dest_f = os.path.join(dest_dir, file)
        with open(word_seg_f, 'r', encoding='utf-8')as f_word, \
                open(word_tag_f, 'r', encoding='utf-8') as f_tag, \
                open(bert_seg_f, 'r', encoding='utf-8') as f_bert:
            bert_tag = []
            for word, word_tag, bert_char in zip(f_word, f_tag, f_bert):
                tmp = convert_to_bert_POS(word, word_tag, bert_char)
                bert_tag.append(tmp)
            with open(dest_f, 'w', encoding='utf-8') as fp:
                for sent_tag in bert_tag:
                    fp.write(' '.join(sent_tag))
                    fp.write('\n')


if __name__ == '__main__':
    # all_sent, all_label = gather_data("data/chat-detection-dataset")
    # for X_train, X_dev, X_test, y_train, y_dev, y_test in get_kfold_train_dev_test(all_sent, all_label):
    #     generate_vocab(X_train)
    # with open(os.path.join(args.data_dir, 'all_sentence.txt'), 'w', encoding='utf-8') as fp:
    #     fp.writelines(all_sent)
    #
    # with open(os.path.join(args.data_dir, 'all_label.txt'), 'w', encoding='utf-8') as fp:
    #     fp.writelines(all_label)
    from utils import load_POS2dict

    USE_BERT = True # 前提条件，有了bert的ELMO词向量和相应的token

    TAG_DIR = 'data/full_data/tagger'  # 读取POS的目录
    WORD_SEG_DIR = 'data/full_data/segment'  # 读取分词的目录
    DEST_DIR = 'data/full_data/vector'  # 目标文件传放的目录
    BERT_POS_DIR = 'data/full_data/bert_pos'  # 存放BERT POS的文件夹
    BERT_DIR = 'data/full_data/segment_bert_30'  # BERT数据目录（内含BERT 的分词）

    os.makedirs(BERT_POS_DIR, exist_ok=True)

    if USE_BERT:
        d = load_POS2dict([os.path.join(TAG_DIR, 'dev_x0.txt'), os.path.join(TAG_DIR, 'train_x0.txt')],
                          os.path.join(TAG_DIR, 'POS.json'), BERT_mode=True)
        convert_2_bert_POS_file(WORD_SEG_DIR, TAG_DIR, BERT_DIR, BERT_POS_DIR)
        POS2id(BERT_POS_DIR, d, BERT_POS_DIR)
    else:
        d = load_POS2dict([os.path.join(TAG_DIR, 'dev_x0.txt'), os.path.join(TAG_DIR, 'train_x0.txt')],
                          os.path.join(TAG_DIR, 'POS.json'), BERT_mode=False)
        POS2id(TAG_DIR, d, DEST_DIR)
