
import pandas as pd
import numpy as np
import jieba
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import tokenization
from tensorflow.contrib import learn
import pandas as pd
from sklearn.utils import shuffle

def load_data_own():
    # 外卖评论数据
    df = pd.read_csv('data/waimai_10k.csv')
    # 酒店评论数据
    # df = pd.read_csv('data/ChnSentiCorp_htl_all.csv')

    x = df['review'].to_list()
    y_ = df['label'].to_list()

    y = []
    for k in y_:
        if k == 1:
            y.append([0, 1])
        else:
            y.append([1, 0])

    y_f = np.array(y)

    return (x, y_f)

def load_data_zh_comment(path):
    f = open(path, 'r', encoding='utf-8')
    train_data = []
    index = 0
    x = []
    y = []
    for line in f.readlines():
        guid = 'train-%d' % index  # 参数guid是用来区分每个example的
        line = line.replace("\n", "").split("\t")
        text_a = tokenization.convert_to_unicode(str(line[1]))  # 要分类的文本
        x.append(text_a)

        label = str(line[2])  # 文本对应的情感类别

        if label == '0':
            y.append([0, 0, 1])
        elif label == '1':
            y.append([0, 1, 0])
        else:
            y.append([1, 0, 0])

    y_f = np.array(y)
    return (x, y_f)



# 自定义数据读取 中文评论
def preprocess_zh_comment():
    x_train, y_train = load_data_zh_comment('data/comment/train_sentiment.txt')
    x_test, y_test = load_data_zh_comment('data/comment/test_sentiment.txt')

    # 获得最大长度
    # 获得最大长度
    len_sen = [len(list(jieba.cut(x, cut_all=False))) for x in x_train]
    max_document_length = max(len_sen)
    mean_len = np.mean(len_sen)
    print('文本最大长度', max_document_length, '文本平均长度', mean_len)

    # max_document_length = max([len(list(jieba.cut(x, cut_all=False))) for x in x_train])
    # max_document_length = 120
    print('文本最大长度',max_document_length,'train len',len(x_train),'test len',len(x_test))

    def chinese_tokenizer(docs):
        for doc in docs:
            yield list(jieba.cut(doc))

    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length,tokenizer_fn=chinese_tokenizer)

    x_ = x_train + x_test
    # 构造词袋，即为每一个单词附一个id
    x = np.array(list(vocab_processor.fit_transform(x_)))

    return x[:len(x_train)], y_train, vocab_processor, x[len(x_train):], y_test

# 自定义数据读取
def preprocess_own_train():

    print("Loading data...")
    x_text, y = load_data_own()

    # 获得最大长度
    max_document_length = max([len(list(jieba.cut(x, cut_all=False))) for x in x_text])
    print('文本最大长度',max_document_length)

    def chinese_tokenizer(docs):
        for doc in docs:
            yield list(jieba.cut(doc))

    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length,tokenizer_fn=chinese_tokenizer)
    # 构造词袋，即为每一个单词附一个id
    x = np.array(list(vocab_processor.fit_transform(x_text)))

    # Randomly shuffle data
    # 随机种子，保持每次运行打乱之后的结果一致
    np.random.seed(10)
    # 随机打乱
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # Split train/test set
    # TODO: This is very crude, should use cross-validation
    dev_sample_index = -1 * int(0.1 * float(len(y)))
    x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
    y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]

    del x, y, x_shuffled, y_shuffled

    print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
    return x_train, y_train, vocab_processor

# 自定义数据读取
def preprocess_own_dev():

    print("Loading data...")
    x_text, y = load_data_own()

    # 获得最大长度
    max_document_length = max([len(list(jieba.cut(x, cut_all=False))) for x in x_text])
    print('文本最大长度',max_document_length)

    def chinese_tokenizer(docs):
        for doc in docs:
            yield list(jieba.cut(doc))

    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length,tokenizer_fn=chinese_tokenizer)
    # 构造词袋，即为每一个单词附一个id
    x = np.array(list(vocab_processor.fit_transform(x_text)))

    # Randomly shuffle data
    # 随机种子，保持每次运行打乱之后的结果一致
    np.random.seed(10)
    # 随机打乱
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # Split train/test set
    # TODO: This is very crude, should use cross-validation
    dev_sample_index = -1 * int(0.1 * float(len(y)))
    x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
    y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]

    del x, y, x_shuffled, y_shuffled

    print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
    return x_dev, y_dev

def build_train_dev():
    df = pd.read_csv('data/waimai_10k.csv')
    df = shuffle(df)

    df = df.sample(frac=1.0)  # 全部打乱
    cut_idx = int(round(0.1 * df.shape[0]))
    df_test, df_train = df.iloc[:cut_idx], df.iloc[cut_idx:]
    print(df.shape, df_test.shape, df_train.shape)

    df_train.to_csv('data/waimai_train.txt',sep="\t")
    df_test.to_csv('data/waimai_test.txt', sep="\t")


def build_train_dev_eng():
    data_list = []
    with open('data/rt-polaritydata/rt-polarity.neg','r', encoding='utf-8') as f:
        for line in f.readlines():
            # print(line)
            line = line.replace("\n", "")
            data_list.append([line,'1'])

    with open('data/rt-polaritydata/rt-polarity.pos','r', encoding='utf-8') as f:
        for line in f.readlines():
            # print(line)
            line = line.replace("\n", "")
            data_list.append([line,'0'])


    df = pd.DataFrame(data_list,columns=['text','label'])

    print(df.shape)

    df = shuffle(df)

    df = df.sample(frac=1.0)  # 全部打乱
    cut_idx = int(round(0.1 * df.shape[0]))
    df_test, df_train = df.iloc[:cut_idx], df.iloc[cut_idx:]
    print(df.shape, df_test.shape, df_train.shape)

    df_train.to_csv('data/emo_eng_train.txt', sep="\t")
    df_test.to_csv('data/emo_eng_test.txt', sep="\t")





if __name__ == '__main__':
    # preprocess_zh_comment()

    # build_train_dev()

    build_train_dev_eng()