# -*- coding: utf-8 -*-
# @Time    : 2021/8/9 18:24
# @Author  : huangwei
# @File    : mlp_model.py
# @Software: PyCharm
from method import data_gen
import paddle
import paddle.nn.functional as F
import numpy as np
from visualdl import LogWriter

log_writer = LogWriter("./log")

good_num = 100000
store_num = 600
mf_emb_size = 64
mlp_emb_size = 64
batch_size = 1024
params_filepath = 'models/mlp.pdparams'

# 划分数据集
filepath = 'data/price_data2.txt'
test_ratio = 0.2
batch_size = 1024

train_loader, test_loader = data_gen(filepath, test_ratio, batch_size)


# 只含有 MLP 的模型
class MLP(paddle.nn.Layer):
    def __init__( self, mlp_layers_units ):
        super(MLP, self).__init__()
        self.mlp_layers_units = mlp_layers_units
        self.mlp_emb_store = paddle.nn.Embedding(num_embeddings=store_num, embedding_dim=mlp_emb_size)
        self.mlp_emb_good = paddle.nn.Embedding(num_embeddings=good_num, embedding_dim=mlp_emb_size)

        self.mlp_layers = []
        for i in range(len(self.mlp_layers_units)):
            if i == 0:
                linear = paddle.nn.Linear(in_features=mlp_emb_size + mlp_emb_size,
                                          out_features=self.mlp_layers_units[i])
            else:
                linear = paddle.nn.Linear(in_features=self.mlp_layers_units[i - 1],
                                          out_features=self.mlp_layers_units[i])
            self.mlp_layers.append(linear)
            act = paddle.nn.ReLU()
            self.mlp_layers.append(act)

        self.neu_mf = paddle.nn.Linear(in_features=self.mlp_layers_units[-1], out_features=1)
        self.neu_sig = paddle.nn.Sigmoid()

    def forward( self, store_tensor, good_tensor ):
        store_embedding = self.mlp_emb_store(store_tensor)
        good_embedding = self.mlp_emb_good(good_tensor)

        mlp_out = paddle.concat([store_embedding, good_embedding], axis=1)

        for layer in self.mlp_layers:
            mlp_out = layer(mlp_out)

        mlp_out = self.neu_mf(mlp_out)
        mlp_out = self.neu_sig(mlp_out) * 5

        return mlp_out


def train( model ):
    lr = 0.01
    epochs = 30
    model.train()

    step = 0
    for epoch in range(epochs):
        # lr逐渐变小
        p = epoch // 10
        new_lr = lr / (5 ** p)
        print("epoch:{}, learning rate:{}".format(epoch, new_lr))

        opt = paddle.optimizer.Adam(learning_rate=new_lr, parameters=model.parameters())

        for idx, data in enumerate(train_loader()):
            store_arr, good_arr, _, label_arr = data

            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            out = model(store_tensor, good_tensor)
            loss = F.l1_loss(out, label_tensor)
            # print("loss:", loss.numpy())

            step += 1
            # 保存训练过程的记录
            if step % 100 == 0:
                log_writer.add_scalar(tag='paddle mlp result', step=step, value=loss.numpy())

            loss.backward()
            opt.step()
            opt.clear_grad()

    paddle.save(model.state_dict(), params_filepath)


def test( model ):
    model.eval()

    loss_set = []
    for idx, data in enumerate(test_loader()):
        store_arr, good_arr, label_arr = data
        if len(store_arr) == batch_size:
            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            pred = model(store_tensor, good_tensor)
            loss = F.l1_loss(pred, label_tensor)
            loss_value = loss.numpy()[0]
            loss_set.append(loss_value)
    mean_loss = np.mean(loss_set)
    print("test dataset mean loss is:", round(mean_loss, 4))


model = MLP([64, 32, 16, 8, 4])
# model_state_dict = paddle.load(params_filepath)
# model.load_dict(model_state_dict)

train(model)
test(model)
