# -*- coding: utf-8 -*-
# @Time    : 2021/8/9 17:48
# @Author  : huangwei
# @File    : gmf_model.py
# @Software: PyCharm

from method import data_gen
import paddle
import paddle.nn.functional as F
import numpy as np

from visualdl import LogWriter

log_writer = LogWriter("./log")

good_num = 100000
store_num = 600
mf_emb_size = 64
mlp_emb_size = 64
batch_size = 1024
params_filepath = 'models/gmf.pdparams'

train_loader, test_loader = data_gen(batch_size)


# 只含有 GMF 的模型
class GMF(paddle.nn.Layer):
    def __init__( self ):
        super(GMF, self).__init__()
        self.gmf_emb_store = paddle.nn.Embedding(num_embeddings=store_num, embedding_dim=mf_emb_size)
        self.gmf_emb_good = paddle.nn.Embedding(num_embeddings=good_num, embedding_dim=mf_emb_size)

        self.mf_layer = paddle.nn.Linear(in_features=mf_emb_size, out_features=1)
        self.mf_sig = paddle.nn.Sigmoid()

    def forward( self, store_tensor, good_tensor ):
        store_embedding = self.gmf_emb_store(store_tensor)
        good_embedding = self.gmf_emb_good(good_tensor)

        gmf_out = paddle.fluid.layers.elementwise_mul(store_embedding, good_embedding)
        gmf_out = self.mf_layer(gmf_out)
        gmf_out = self.mf_sig(gmf_out) * 5

        return gmf_out


def run( model ):
    lr = 0.01
    epochs = 20
    opt = paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters())

    step = 0
    for epoch in range(epochs):
        model.train()
        for idx, data in enumerate(train_loader()):
            store_arr, good_arr, sort_arr, label_arr = data
            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            out = model(store_tensor, good_tensor)
            loss = F.l1_loss(out, label_tensor)

            step += 1
            # 保存训练过程的记录
            if step % 1000 == 0:
                log_writer.add_scalar(tag='gmf train loss', step=step, value=loss.numpy())
                print("gmf train loss:", loss.numpy())

            loss.backward()
            opt.step()
            opt.clear_grad()

        test_loss = test(model, test_loader())
        print("test loss:", test_loss)
        train_loss = test(model, train_loader())
        print("train loss:", train_loss)


def train( model ):
    lr = 0.01
    epochs = 20
    model.train()

    step = 0
    for epoch in range(epochs):
        # # lr逐渐变小
        # p = epoch // 10
        # new_lr = lr / (5 ** p)
        # print("epoch:{}, learning rate:{}".format(epoch, new_lr))
        print("epoch:", epoch)

        opt = paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters())

        for idx, data in enumerate(train_loader()):
            store_arr, good_arr, sort_arr, label_arr = data
            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            out = model(store_tensor, good_tensor)
            loss = F.l1_loss(out, label_tensor)

            step += 1
            # 保存训练过程的记录
            if step % 1000 == 0:
                log_writer.add_scalar(tag='gmf train loss', step=step, value=loss.numpy())
                print("epoch:{}, loss:{}".format(epoch, loss.numpy()))

            loss.backward()
            opt.step()
            opt.clear_grad()

    paddle.save(model.state_dict(), params_filepath)


def test( model, data_loader ):
    model.eval()

    loss_set = []
    for idx, data in enumerate(data_loader):
        store_arr, good_arr, sort_arr, label_arr = data
        if len(store_arr) == batch_size:
            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            pred = model(store_tensor, good_tensor)
            loss = F.l1_loss(pred, label_tensor)
            loss_value = loss.numpy()[0]
            loss_set.append(loss_value)
    mean_loss = np.mean(loss_set)
    mean_loss = round(mean_loss, 4)
    return mean_loss


model = GMF()
run(model)
# model_state_dict = paddle.load(params_filepath)
# model.load_dict(model_state_dict)

# train(model)
# test(model)
