import numpy as np
import pandas as pd
from tensorflow.contrib import learn
# NLTK是Python上著名的自然语义处理库
# 自带语料库、词性分类库  自带分类，分词
import nltk
import re
import chinese_text_clf.text_configure as cfg
import jieba
from rel_extact import utils
import tensorflow as tf
from chinese_text_clf import tokenization


# nltk.download('punkt')


def clean_str(text):
    text = text.lower()
    # Clean the text
    text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
    text = re.sub(r"what's", "what is ", text)
    text = re.sub(r"that's", "that is ", text)
    text = re.sub(r"there's", "there is ", text)
    text = re.sub(r"it's", "it is ", text)
    text = re.sub(r"\'s", " ", text)
    text = re.sub(r"\'ve", " have ", text)
    text = re.sub(r"can't", "can not ", text)
    text = re.sub(r"n't", " not ", text)
    text = re.sub(r"i'm", "i am ", text)
    text = re.sub(r"\'re", " are ", text)
    text = re.sub(r"\'d", " would ", text)
    text = re.sub(r"\'ll", " will ", text)
    text = re.sub(r",", " ", text)
    text = re.sub(r"\.", " ", text)
    text = re.sub(r"!", " ! ", text)
    text = re.sub(r"\/", " ", text)
    text = re.sub(r"\^", " ^ ", text)
    text = re.sub(r"\+", " + ", text)
    text = re.sub(r"\-", " - ", text)
    text = re.sub(r"\=", " = ", text)
    text = re.sub(r"'", " ", text)
    text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
    text = re.sub(r":", " : ", text)
    text = re.sub(r" e g ", " eg ", text)
    text = re.sub(r" b g ", " bg ", text)
    text = re.sub(r" u s ", " american ", text)
    text = re.sub(r"\0s", "0", text)
    text = re.sub(r" 9 11 ", "911", text)
    text = re.sub(r"e - mail", "email", text)
    text = re.sub(r"j k", "jk", text)
    text = re.sub(r"\s{2,}", " ", text)

    return text.strip()

# 加载关系抽取数据
def load_data_and_labels(path):
    data = []
    lines = [line.strip() for line in open(path)]
    max_sentence_length = 0
    for idx in range(0, len(lines), 4):
        # 文本id
        id = lines[idx].split("\t")[0]
        # 文本中存在的关系类型
        relation = lines[idx + 1]

        # 将 句子：The <e1>author</e1> of a keygen uses a <e2>disassembler</e2> to look at the raw assembly code 做简单转换
        sentence = lines[idx].split("\t")[1][1:-1]
        sentence = sentence.replace('<e1>', ' _e11_ ')
        sentence = sentence.replace('</e1>', ' _e12_ ')
        sentence = sentence.replace('<e2>', ' _e21_ ')
        sentence = sentence.replace('</e2>', ' _e22_ ')

        sentence = clean_str(sentence)
        # 分词
        tokens = nltk.word_tokenize(sentence)
        # 寻找最大文本长度
        if max_sentence_length < len(tokens):
            max_sentence_length = len(tokens)
        sentence = " ".join(tokens)

        # 文本id 文本 关系类型
        data.append([id, sentence, relation])

    print(path)
    print("max sentence length = {}\n".format(max_sentence_length))

    df = pd.DataFrame(data=data, columns=["id", "sentence", "relation"])

    # 将关系类型转化为标签
    df['label'] = [utils.class2label[r] for r in df['relation']]

    # Text Data
    x_text = df['sentence'].tolist()

    # Label Data
    y = df['label']
    labels_flat = y.values.ravel()
    # 标签个数的统计
    labels_count = np.unique(labels_flat).shape[0]

    # convert class labels from scalars to one-hot vectors onehot转换
    # 0  => [1 0 0 0 0 ... 0 0 0 0 0]
    # 1  => [0 1 0 0 0 ... 0 0 0 0 0]
    # ...
    # 18 => [0 0 0 0 0 ... 0 0 0 0 1]
    def dense_to_one_hot(labels_dense, num_classes):
        num_labels = labels_dense.shape[0]
        index_offset = np.arange(num_labels) * num_classes
        labels_one_hot = np.zeros((num_labels, num_classes))
        labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
        return labels_one_hot

    labels = dense_to_one_hot(labels_flat, labels_count)
    labels = labels.astype(np.uint8)

    return x_text, labels

# 外卖数据
def load_text_data_label(path,max_sentence_length):
    df = pd.read_csv(path)
    print("max sentence length = {}\n".format(max_sentence_length))
    # Text Data
    x_text = df['review'].tolist()

    # Label Data
    y = df['label']
    labels_flat = y.values.ravel()
    # 标签个数的统计
    labels_count = np.unique(labels_flat).shape[0]
    def dense_to_one_hot(labels_dense, num_classes):
        num_labels = labels_dense.shape[0]
        index_offset = np.arange(num_labels) * num_classes
        labels_one_hot = np.zeros((num_labels, num_classes))
        labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
        return labels_one_hot

    labels = dense_to_one_hot(labels_flat, labels_count)
    labels = labels.astype(np.uint8)

    y = labels

    # Build vocabulary 构造词袋
    # Example: x_text[3] = "A misty <e1>ridge</e1> uprises from the <e2>surge</e2>."
    # ['a misty ridge uprises from the surge <UNK> <UNK> ... <UNK>']
    # =>
    # [27 39 40 41 42  1 43  0  0 ... 0]
    # dimension = FLAGS.max_sentence_length
    # 每一个词唯一赋予一个数字id
    def chinese_tokenizer(docs):
        for doc in docs:
            yield list(jieba.cut(doc))

    vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(cfg.FLAGS.max_sentence_length,
                                                                         tokenizer_fn=chinese_tokenizer)
    # 将文本转化为词id，并保持文本的长度一致，缺失的位置用0填充
    x = np.array(list(vocab_processor.fit_transform(x_text)))
    print("Text Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("x = {0}".format(x.shape))
    print("y = {0}".format(y.shape))
    print("")

    # Randomly shuffle data to split into train and test(dev)
    # 将样本打乱
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # Split train/test set
    # TODO: This is very crude, should use cross-validation
    # 训练集 验证集 切分
    dev_sample_index = -1 * int(cfg.FLAGS.dev_sample_percentage * float(len(y)))
    x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
    y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
    print("Train/Dev split: {:d}/{:d}\n".format(len(y_train), len(y_dev)))

    return x_train, y_train, vocab_processor, x_dev, y_dev


def batch_iter(data, batch_size, num_epochs, shuffle=True):
    """
    Generates a batch iterator for a dataset.
    """
    data = np.array(data)
    data_size = len(data)
    num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
    for epoch in range(num_epochs):
        # Shuffle the data at each epoch
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):
            start_index = batch_num * batch_size
            end_index = min((batch_num + 1) * batch_size, data_size)
            yield shuffled_data[start_index:end_index]


def load_data_and_labels(positive_data_file, negative_data_file):
    """
    Loads MR polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    # Load data from files
    positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
    positive_examples = [s.strip() for s in positive_examples]
    negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
    negative_examples = [s.strip() for s in negative_examples]
    # Split by words
    x_text = positive_examples + negative_examples
    x_text = [clean_str(sent) for sent in x_text]
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)
    return [x_text, y]

# 处理正负样本分离的数据集
def preprocess_p_n_split():
    # Data Preparation
    # ==================================================

    # Load data  bad good 各一半，加载文本数据以及其对应标签
    print("Loading data...")
    x_text, y = load_data_and_labels(cfg.FLAGS.positive_data_file, cfg.FLAGS.negative_data_file)

    # Build vocabulary
    max_document_length = max([len(x.split(" ")) for x in x_text])
    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
    # 构造词袋，即为每一个单词附一个id
    x = np.array(list(vocab_processor.fit_transform(x_text)))

    # Randomly shuffle data
    # 随机种子，保持每次运行打乱之后的结果一致
    np.random.seed(10)
    # 随机打乱
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # Split train/test set
    # TODO: This is very crude, should use cross-validation
    dev_sample_index = -1 * int(cfg.FLAGS.dev_sample_percentage * float(len(y)))
    x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
    y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]

    del x, y, x_shuffled, y_shuffled

    print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
    return x_train, y_train, vocab_processor, x_dev, y_dev

def load_data_zh_comment(path):
    f = open(path, 'r', encoding='utf-8')
    train_data = []
    index = 0
    x = []
    y = []
    for line in f.readlines():
        guid = 'train-%d' % index  # 参数guid是用来区分每个example的
        line = line.replace("\n", "").split("\t")
        text_a = tokenization.convert_to_unicode(str(line[1]))  # 要分类的文本
        x.append(text_a)

        label = str(line[2])  # 文本对应的情感类别

        if label == '0':
            y.append([0, 0, 1])
        elif label == '1':
            y.append([0, 1, 0])
        else:
            y.append([1, 0, 0])

    y_f = np.array(y)
    return (x, y_f)

# 自定义数据读取 中文评论
def preprocess_zh_comment():
    x_train, y_train = load_data_zh_comment('data/comment/train_sentiment.txt')
    x_test, y_test = load_data_zh_comment('data/comment/test_sentiment.txt')

    # 获得最大长度
    # max_document_length = max([len(list(jieba.cut(x, cut_all=False))) for x in x_train])
    max_document_length = 120
    print('文本最大长度',max_document_length,'train len',len(x_train),'test len',len(x_test))

    def chinese_tokenizer(docs):
        for doc in docs:
            yield list(jieba.cut(doc))

    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length,tokenizer_fn=chinese_tokenizer)

    x_ = x_train + x_test
    # 构造词袋，即为每一个单词附一个id
    x = np.array(list(vocab_processor.fit_transform(x_)))

    return x[:len(x_train)], y_train, vocab_processor, x[len(x_train):], y_test

if __name__ == "__main__":
    # trainFile = 'SemEval2010_task8_all_data/SemEval2010_task8_training/TRAIN_FILE.TXT'
    # testFile = 'SemEval2010_task8_all_data/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT'
    #
    # load_data_and_labels(testFile)

    x_text, labels = load_text_data_label('data/waimai_10k.csv', 300)

    # x_train, y_train, vocab_processor, x_dev, y_dev = preprocess_p_n_split()

    pass
