# -*- coding: utf-8 -*-
# @Time    : 2018/7/25 13:12
# @Author  : seeledu
# @email   : seeledu@bug.moe
# @File    : data_helpers.py
# @Software: PyCharm
"""
用来处理各种数据的
"""
import itertools
import re
from collections import Counter  # 统计单词和词频
import xlrd
import jieba
from pkuseg import pkuseg
import numpy as np


def clean_str(string):
    """
    Tokenization/string cleaning for datasets.
    Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
    """
    string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
    string = re.sub(r"\'s", " \'s", string)
    string = re.sub(r"\'ve", " \'ve", string)
    string = re.sub(r"n\'t", " n\'t", string)
    string = re.sub(r"\'re", " \'re", string)
    string = re.sub(r"\'d", " \'d", string)
    string = re.sub(r"\'ll", " \'ll", string)
    string = re.sub(r",", " , ", string)
    string = re.sub(r"!", " ! ", string)
    string = re.sub(r"\(", " ( ", string)
    string = re.sub(r"\)", " ) ", string)
    string = re.sub(r"\?", " ? ", string)
    string = re.sub(r"\s{2,}", " ", string)
    return string.strip().lower()


def stopwordslist(filepath):
    """
    创建停用词list
    :param filepath: the path if the stopword
    :return: the list of stopword
    """
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords


def pad_sentences(sentences, padding_word="<PAD/>", sequence_length=20):
    """
    Pads all sentences to the same length. The length is defined by the longest sentence.
    Returns padded sentences.
    """
    # sequence_length = max(len(x) for x in sentences)
    padded_sentences = []
    new_sentence = str()
    for sentence in sentences:
        # sentence = sentences[i]
        sentence = sentence.split(" ")
        num_padding = sequence_length - len(sentence)
        # new_sentence = sentence + [padding_word] * num_padding
        try:
            new_sentence = [padding_word] * num_padding + sentence
            # print(sentence)
        except TypeError:
            print(sentence)
        padded_sentences.append(new_sentence[:sequence_length])
    return padded_sentences


def pad_slots(sentences, padding_word="<PAD/>", sequence_length=30):
    """
    Pads all sentences to the same length. The length is defined by the longest sentence.
    Returns padded sentences.
    """
    # sequence_length = max(len(x) for x in sentences)
    padded_sentences = []
    new_sentence = str()
    for sentence in sentences:
        # sentence = sentences[i]
        num_padding = sequence_length - len(sentence)
        # new_sentence = sentence + [padding_word] * num_padding
        try:
            new_sentence = [padding_word] * num_padding + sentence
            # print(sentence)
        except TypeError:
            print(sentence)
        padded_sentences.append(new_sentence[:sequence_length])
    return padded_sentences


def build_vocab(sentences, slots):
    """
    Builds a vocabulary mapping from word to index based on the sentences.
    Returns vocabulary mapping and inverse vocabulary mapping.
    """
    # Build vocabulary
    word_counts = Counter(itertools.chain(*sentences + slots))
    # Mapping from index to word
    vocabulary_inv = [x[0] for x in word_counts.most_common()]
    vocabulary_inv = list(sorted(vocabulary_inv))
    # Mapping from word to index
    vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
    return [vocabulary, vocabulary_inv]


def load_data_and_labels():
    """
    Loads the text and labels.
    Changes the labels to tokens.
    :return:text and labels in number
    """
    # 分别加载分词后的文本（plan1处理之后的）、每个例子对应的标签，和总的标签集
    xlsfile = r"data/训练集处理.xlsx"
    book = xlrd.open_workbook(xlsfile)
    sheet0 = book.sheet_by_index(0)
    text = sheet0.col_values(0)[1:]
    label = sheet0.col_values(1)[1:]

    with open("data/4528new.txt", "r", encoding='utf8')as input_file:
        test_text = input_file.readlines()
    with open("data/4528_y_new.txt", "r", encoding='utf8')as input_file:
        test_label = input_file.readlines()
    with open("data/slot修正后.csv", "r", encoding='utf8') as input_file:
        slots = input_file.readlines()
    # with open("data/slotGated f150.txt","r",encoding='utf8') as input_file:
    #     slots_predict = input_file.readlines()
    slots = slots[1:]
    # slots[-4528:]=slots_predict

    # delete the "I-" or "B-" for embedding
    for i, tmp in enumerate(slots):
        slots[i] = slots[i].strip().split(" ")
        # print(slots[i])
        for t, ttmp in enumerate(slots[i]):
            if ttmp == "O":
                continue
            try:
                slots[i][t] = ttmp.split("-")[1]
            except:
                pass
                # print(ttmp)

    for i, tmp in enumerate(test_text):
        test_text[i] = test_text[i].strip()
        test_label[i] = test_label[i].strip()

    seg = pkuseg()
    all_text = []
    add_punc = '[’!"#$%&\'()*+,-.，。《》/:;<=>?？@[\\]^_`{|}~ ]+'
    set_label = set()
    wrong_line = []
    wrong_line_token = []
    labels = label + test_label

    # joint_file_train = open("data/for_joint_train.txt","w",encoding='utf8')
    # joint_file_dev = open("data/for_joint_dev.txt", "w", encoding='utf8')
    # joint_file_test = open("data/for_joint_test.txt", "w", encoding='utf8')
    # copy_tmp是没有去标点的句子
    for i, tmp in enumerate(text + test_text):
        tmp = re.sub(add_punc, "", tmp)
        # if i >len(labels)-4528:
        #     # joint_file = joint_file_test
        # elif len(labels)-4528-400<i<=len(labels)-4528:
        #     # joint_file = joint_file_dev
        # else:
        #     joint_file = joint_file_train
        tmp = jieba.cut(tmp.strip())
        # tmp = seg.cut(tmp.strip())
        tmp = (" ".join(tmp)).split(" ")
        i_slots = 0
        # 拼接例子标签
        # if i<len(text):
        #     tmp.append(labels[i])
        #     slot的两个应用，一个是替换另一个是拼接在后面。
        # for k,tmp2 in enumerate(tmp):
        # pass
        #     try:
        # continue
        # if re.match(add_punc,tmp2):
        #     continue
        #   模拟在训练集里面，可能会有slot的缺失
        # if slots[i][i_slots] !='O':
        # to control the slot and key word
        # if 1 == 1:

        # if slots[i][i_slots].split('-')[1] == 'stcok':
        # 随机丢弃
        # if i > 4528:
        #     ran = np.random.randint(10)
        #     if ran < 3:
        #         continue
        # 替换方案
        # tmp[k]='stock'
        # joint_file.write(tmp[k] + 'B-stock' + '\n')
        # 拼接方案
        # tmp.append('stock')

        # else:
        # 随机丢弃
        # if i > 4528:
        #     ran = np.random.randint(10)
        #     if ran < 3:
        #         continue
        # 替换方案
        # tmp[k]=slots[i][i_slots].split('-')[1]
        # joint_file.write(tmp[k] + ' ' + slots[i][i_slots] + '\n')
        # 拼接方案
        # tmp.append(slots[i][i_slots].split('-')[1])
        #     else:
        #         joint_file.write(tmp[k] + ' O' + '\n')
        #     i_slots += 1
        # except:
        # print(tmp)
        # print(slots[i])
        # wrong_line.append(i)
        # wrong_line_token.append(tmp)
        # break
        all_text.append(" ".join(tmp))
        # joint_file.write(labels[i]+'\n')
        # joint_file.write('\n')
    # print(len(all_text))
    # print(len(slots))
    # print(all_text[:-10])
    # print(slots[:-10])
    #
    # print(wrong_line)
    # with open('./data/wrong_thing.txt',"w") as output_file:
    #     for i,tmp in enumerate(wrong_line):
    #         output_file.write(str(tmp)+'\t'+str(wrong_line_token[i])+'\n')
    # print the output data
    # with open('./change_form.txt',"w",encoding='utf8') as ou:
    #     for i,line in enumerate(all_text):
    #         ou.write(all_text[i]+' __label__'+labels[i]+'\n')

    # print(all_text[-4528:])
    return all_text, labels, slots


def build_input_data(sentences, labels, vocabulary):
    """
    Maps sentences and labels to vectors based on a vocabulary.
    """
    table = {
        'app': 0,
        'bus': 1,
        'calc': 2,
        'cinemas': 3,
        'contacts': 4,
        'cookbook': 5,
        'datetime': 6,
        'email': 7,
        'epg': 8,
        'flight': 9,
        'health': 10,
        'lottery': 11,
        'map': 12,
        'match': 13,
        'message': 14,
        'music': 15,
        'news': 16,
        'novel': 17,
        'poetry': 18,
        'radio': 19,
        'riddle': 20,
        'schedule': 21,
        'stock': 22,
        'telephone': 23,
        'train': 24,
        'translation': 25,
        'tvchannel': 26,
        'video': 27,
        'weather': 28,
        'website': 29,
        'chat': 30
    }
    x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
    # x = np.reshape(-1,256)
    y = []
    for tmp in labels:
        y.append(table[tmp])
    y = np.array(y)
    return [x, y]


def build_slot_data(slots, vocabulary):
    """
    tokenlize the slots
    :param slots: raw IOB slots
    :param vocabulary: build from the text data
    :return: slots
    """
    slots = np.array([[vocabulary[word] for word in slot] for slot in slots])
    return slots


def load_data():
    """
    Loads and preprocessed data for the dataset.
    Returns input vectors, labels, vocabulary, and inverse vocabulary.
    """
    # Load and preprocess data
    sentences, labels, slots = load_data_and_labels()
    sentences_padded = pad_sentences(sentences)
    slots_padded = pad_slots(slots)
    # print(np.array(sentences_padded).shape)
    vocabulary, vocabulary_inv = build_vocab(sentences_padded, slots)
    x, y = build_input_data(sentences_padded, labels, vocabulary)
    slots = build_slot_data(slots_padded, vocabulary)
    return [x, y, slots, vocabulary, vocabulary_inv]


def add_dimention(sentNpy, flag, type_name):
    '''
    扩展词向量，增加知识维度，知识维度大小=知识类别个数
    type_name: dev,test,train
    kb_number: 参与知识表示的类别个数
    flag: 0~9
    sentNpy:句子的npy文件
    '''
    kb_number = 28  # 需要调整
    zero_vec = np.zeros(kb_number)
    kg_represent = np.load('data/知识表达_matching/' + str(flag) + '_' + type_name + '.npy')
    i = 0
    new_x = []
    index = kb_number
    for kg_item in kg_represent:
        row = []
        for c in range(20):
            row.append(np.r_[sentNpy[i][c], zero_vec])
        if len(kg_item) > 0:
            for k, v in kg_item.items():
                # print(kg_item)
                for j in range(20):
                    # print(j,k)
                    if j == int(k):
                        row[j][-index:] = np.array(v[0])
                        # print(v[0])
        # print(str(row[-1]))
        row = np.array(row)
        new_x.append(row)
        i = i + 1
    new_x = np.array(new_x)
    # if type == 'write':
    #     np.save("data/matching_430_npy/" + type_name + '_x' + str(flag) + ".npy", new_x)

    # print(new_x[0].shape)
    return new_x


if __name__ == "__main__":
    # TODO:fix the bug about english_word
    x, y, slots, vocabulary, vocabulary_inv = load_data()
    # print(x[10:])
    # print(slots[10])
    # cut_word2("data/data/callreason_train_data/callreason.train.fj_and_sh.2w", "data/callreason_train_data/")
    # cut_word("data/callreason_train_data/plan1_fusai.txt","data/callreason_train_data/")
    # x, y, vocabulary, vocabulary_inv = load_data()
    # x_test, title = load_test_data(vocabulary)
    # for tmp in x_test:
    #     if len(tmp) != 256:
    #         # print(tmp)
    #         k = 1
    # # print(x)
    # # pad_x = pad_sentences(x)
    # print(len(x_test))
    # print(len(title))
    # print(title[4999])
    # print(x_test[4999])
