import pandas as pd
import numpy as np
import datetime
import time
import os
from paddle import fluid


def load_data():
    def trans_data(top_num=1000000):
        """
        从原始数据中获取前100万行
        :return:
        """
        with open(source_file, 'r') as f:
            data_set = f.readlines()

        with open(target_file, 'a+') as wf:
            for num, data in enumerate(data_set):
                print('num: {}, rate: {}'.format(num, num / len(data_set)))
                if num > top_num:
                    break

                mid_data = data[::-1][data[::-1].index(',') + 1:]
                mid_data = mid_data[::-1] + '\n'

                wf.write(mid_data)
                wf.flush()

    def read_source_data():
        # 读取原始数据的前100万行，并进行基本的格式转换
        df = pd.read_csv(target_file, sep=',', header=None, usecols=[0, 1, 2], dtype={0: np.int32, 1: str, 2: np.int64})
        df.columns = ['session_id', 'time_str', 'item_id']
        # 格式转换
        df['time'] = df['time_str'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp())

        return df[['session_id', 'time', 'item_id']]

    def data_filter(df, session_id_thresh=1, item_id_thresh=5):
        """
        数据过滤
        :param df:
        :return:
        """
        # 剔除session_id数量（包括缺失值）<=1的session_id数据
        session_size = df.groupby('session_id').size()
        df = df[np.in1d(df['session_id'], session_size[session_size > session_id_thresh].index)]

        # 剔除item_id小于5个的数据
        item_size = df.groupby('item_id').size()
        df = df[np.in1d(df['item_id'], item_size[item_size >= item_id_thresh].index)]

        session_size = df.groupby('session_id').size()
        return df[np.in1d(df['session_id'], session_size[session_size > session_id_thresh].index)]

    def read_and_pre_treatment():
        """
        数据读取和预处理
        :return:
        """
        # 读取前100万行原始数据
        trans_data()
        # 基础格式转换
        df_data = read_source_data()
        # 数据过滤
        df_filter_data = data_filter(df_data)
        # 数据保存
        df_filter_data.to_csv(save_file_name)

    source_file_path = 'E:\\BaiduNetdiskDownload\\机器学习案例实战\\chp13_基于GRU的Session-based推荐系统\\数据'
    source_file_name = 'yoochoose-clicks.dat'
    target_file_name = 'yoochoose-clicks.txt'
    source_file = os.path.join(source_file_path, source_file_name)
    target_file = os.path.join(source_file_path, target_file_name)

    save_file_name = './data/C/chp9/yoochoose-clicks.csv'
    if not os.path.exists(save_file_name):
        # 数据读取和预处理
        read_and_pre_treatment()

    # 重新读取预处理后的数据
    return pd.read_csv(save_file_name)[['session_id', 'time', 'item_id']]


def split_data(df):
    """
    数据切分：切分为训练集，验证集和测试集
    :return:
    """
    def _split_train_test(df):
        """
        切分数据为训练集和测试集
        :param df:
        :return:
        """
        max_day = df['time'].max()
        # 每个session的最新的日期
        session_id_max_day = df.groupby('session_id')['time'].max()

        # last day为测试集，其他为训练集,86400为24H
        session_id_train = session_id_max_day[session_id_max_day < max_day - 86400].index
        session_id_test = session_id_max_day[session_id_max_day >= max_day - 86400].index

        # np.in1d(a, b)，列出a中存在b的部分，相当于excel的vslookup
        train_data = df[np.in1d(df['session_id'], session_id_train)]
        test_data = df[np.in1d(df['session_id'], session_id_test)]

        # 在测试集中筛选item共同存在的
        test_data = test_data[np.in1d(test_data['item_id'], train_data['item_id'])]
        test_session_id_length = test_data.groupby('session_id').size()
        # 剔除session_id只有1个情况
        test_data = test_data[
            np.in1d(test_data['session_id'], test_session_id_length[test_session_id_length >= 2].index)]
        return train_data, test_data

    train_data, test_data = _split_train_test(df)
    train_data, valid_data = _split_train_test(train_data)

    train_data.to_csv('./data/C/chp9/yoochoose-clicks-train.csv')
    test_data.to_csv('./data/C/chp9/yoochoose-clicks-test.csv')
    valid_data.to_csv('./data/C/chp9/yoochoose-clicks-valid.csv')


def trans_local2paddle(file_name):
    """
    生成模型所需数据
    一个session_id一行，item_id按time排序
    :param file_name:
    :return:
    """
    common_path = './data/C/chp13'
    file = os.path.join(common_path, file_name)
    df = pd.read_csv(file)
    df = df.sort_values(['session_id', 'time'])
    df['item_id'] = df['item_id'].astype(str)

    new_file_name = 'paddle_{}.txt'.format(file_name[: file_name.index('.')])
    new_file = os.path.join(common_path, new_file_name)

    with open(new_file, 'w') as wf:
        last_sess = -1
        sign = 1
        i = 0
        for index, row in df.iterrows():
            i += 1
            if i == 1:
                continue
            session_id = row['session_id']
            item_id = row['item_id']
            print('file_name: {}, session_id: {}, item_id: {}'.format(file_name, session_id, item_id))
            if int(session_id) != last_sess:
                if sign:
                    sign = 0
                    wf.write(item_id + ' ')
                else:
                    wf.write('\n' + item_id + ' ')
                last_sess = int(session_id)
            else:
                wf.write(item_id + ' ')


def prepare_data():
    """
    数据准备
    :return:
    """
    # 数据加载
    df = load_data()
    # 数据切分
    split_data(df)

    # 生成模型所需数据
    for file in ['yoochoose-clicks-train.csv', 'yoochoose-clicks-test.csv', 'yoochoose-clicks-valid.csv']:
        trans_local2paddle(file)


def word_count(msg_file, word_freq=None):
    """
    对输入文件进行词频计数
    :param msg_file:
    :param word_freq:
    :return:
    """
    if word_freq is None:
        word_freq = dict()
    for line in msg_file:
        for word in line.strip().split(' '):
            word_freq[word] = word_freq.get(word, 0) + 1
    return word_freq


def build_words_dict(train_file='', valid_file='', test_file=''):
    """
    词频排序，然后返回word及其所在的位置

    :param train_file:
    :param valid_file:
    :param test_file:
    :return: word及其所在的位置
    """
    with open(train_file) as train_in:
        with open(valid_file) as valid_in:
            with open(test_file) as test_in:
                word_freq = word_count(msg_file=test_in)
                # valid累加
                word_freq = word_count(msg_file=valid_in, word_freq=word_freq)
                # train累加
                word_freq = word_count(msg_file=train_in, word_freq=word_freq)
        # 排序，并将dict转为tuple
        word_freq_sorted = sorted(word_freq.items(), lambda x: (-x[1], x[0]))
        words = [x[0] for x in word_freq_sorted]
        # word及位置
        return dict(zip(words, range(len(words))))


def to_lod_tensor(data, place):
    """

    :param data:
    :param place:？
    :return:
    """
    seq_length = [len(seq) for seq in data]
    seq_length.insert(0, 0)
    # 长度累加
    lod = list(np.cumsum(seq_length))

    # 二维数组转为一维数组
    flatten_data = np.concatenate(data, axis=0).astype(np.int64)
    reshape_data = flatten_data.reshape((len(flatten_data), 1))
    # 数据转换成paddle识别的格式
    res = fluid.LoDTenSor()
    res.set(reshape_data, place)
    res.set_lod([lod])
    return res


src = fluid.layers.data(name='src', shape=[1], dtype='int64', lod_level=1)
dst = fluid.layers.data(name='dst', shape=[1], dtype='int64', lod_level=1)
embedding_lr = 10.0
gru_lr = 1.0
fc_lr = 1.0
embedding = fluid.layers.embedding(input=src, size=[vocab_size, hid_size],
                                   param_attr=fluid.ParamAttr(initializer=
                                                              fluid.initializer.Uniform(low=init_low_bound,
                                                                                        high=init_high_bound),
                                                              learning_rate=embedding_lr),
                                   is_sparse=True)
fc = fluid.layers.fc(input=embedding, size=hid_size * 3,
                     param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(low=init_low_bound,
                                                                                      high=init_high_bound),
                                                              learning_rate=fc_lr))
# gru推荐模型
gru = fluid.layers.dynamic_gru(input=fc, size=hid_size,
                               param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(low=init_low_bound,
                                                                                                high=init_high_bound),
                                                              learning_rate=fc_lr))
# predict
predict = fluid.layers.fc(input=gru, size=vocab_size, act='softmax',
                          param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(low=init_low_bound,
                                                                                           high=init_high_bound),
                                                              learning_rate=fc_lr))
# 迭代cost
cost = fluid.layers.cross_entropy(input=predict, label=dst)
# 评估
acc = fluid.layers.accuracy(input=predict, label=dst, k=20)
avg_cost = fluid.layers.mean(x=cost)

# 定义优化方法和优化器
optimizer = fluid.optimizer.Adagrad(learning_rate=base_lr)
optimizer.minimize(avg_cost)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# 构建并行的cpu执行器
train_exe = fluid.ParallelExecutor(use_cuda=False, loss_name=avg_cost.name)

total_time = 0
for pass_idx in range(pass_num):
    epoch_idx = pass_idx + 1
    print('epoch_{} start'.format(epoch_idx))
    t0 = time.time()
    i = 0
    for data in train_render():
        i += 1
        lod_src = to_lod_tensor([dat[0] for dat in data], place)
        lod_dst = to_lod_tensor([dat[1] for dat in data], place)
        ret = train_exe.run(
            feed={'src': lod_src, 'dst': lod_dst},
            fetch_list=[avg_cost.name]
        )
        print_cost = np.mean(ret[0])
        ppl = np.mean(np.exp(ret[0]))
        if i % 10 == 0:
            print('step: {}, cost: {}, ppl: {}'.format(i, print_cost, ppl))

        if i == 400:
            save_dir = 'E:\code\gitee\book-ml\data\C\chp13\model\epoch_{}'.format(epoch_idx)
            feed_var_name = ['src', 'dst']
            fetch_vars = [avg_cost, acc, predict]
            # 模型落盘
            fluid.io.save_inference_model(save_dir, feed_var_name, fetch_vars, exe)
            print('model saved!')
            exit(0)

    t1 = time.time()
    total_time += (t1 - t0)
    print('epoch: {}, num_steps: {}'.format(epoch_idx, i))
    save_dir = 'E:\code\gitee\book-ml\data\C\chp13\model\epoch_{}'.format(epoch_idx)
    feed_var_name = ['src', 'dst']
    fetch_vars = [avg_cost, acc, predict]
    # 模型落盘
    fluid.io.save_inference_model(save_dir, feed_var_name, fetch_vars, exe)
    print('model saved!')


