# -*- coding: utf-8 -*-
# @Time    : 2021/9/22 9:49
# @Author  : huangwei
# @File    : mix_model2.py
# @Software: PyCharm
import paddle
import paddle.nn.functional as F
import numpy as np
from method import data_gen

from visualdl import LogWriter

log_writer = LogWriter("./log")

params_filepath = 'models/mix.pdparams'

# 划分数据集
filepath = 'data/price_data2.txt'
test_ratio = 0.2
batch_size = 1024

train_loader, test_loader = data_gen(filepath, test_ratio, batch_size)

good_num = 100000
store_num = 600
mf_emb_size = 64
mlp_emb_size = 64
alpha = 0.5


class Model(paddle.nn.Layer):
    def __init__( self, mlp_layers_units ):
        super(Model, self).__init__()
        self.mlp_layers_units = mlp_layers_units
        self.gmf_emb_store = paddle.nn.Embedding(num_embeddings=store_num, embedding_dim=mf_emb_size)
        self.gmf_emb_good = paddle.nn.Embedding(num_embeddings=good_num, embedding_dim=mf_emb_size)
        self.mlp_emb_store = paddle.nn.Embedding(num_embeddings=store_num, embedding_dim=mlp_emb_size)
        self.mlp_emb_good = paddle.nn.Embedding(num_embeddings=good_num, embedding_dim=mlp_emb_size)

        self.mlp_layers = []
        for i in range(len(self.mlp_layers_units)):
            if i == 0:
                linear = paddle.nn.Linear(in_features=mlp_emb_size + mlp_emb_size,
                                          out_features=self.mlp_layers_units[i])
            else:
                linear = paddle.nn.Linear(in_features=self.mlp_layers_units[i - 1],
                                          out_features=self.mlp_layers_units[i])
            self.mlp_layers.append(linear)
            act = paddle.nn.ReLU()
            self.mlp_layers.append(act)

        self.neu_layer = paddle.nn.Linear(in_features=self.mlp_layers_units[-1] + mf_emb_size, out_features=1)
        self.neu_sig = paddle.nn.Sigmoid()

        # gmf部分
        self.linear1 = paddle.nn.Linear(in_features=mf_emb_size, out_features=1)
        self.linear2 = paddle.nn.Linear(in_features=mf_emb_size, out_features=1)
        self.linear3 = paddle.nn.Linear(in_features=mf_emb_size, out_features=1)
        self.sig = paddle.nn.Sigmoid()

    def forward( self, store_tensor, good_tensor, sort_tensor ):
        gmf_store_embedding = self.gmf_emb_store(store_tensor)
        gmf_good_embedding = self.gmf_emb_good(good_tensor)
        mlp_store_embedding = self.mlp_emb_store(store_tensor)
        mlp_good_embedding = self.mlp_emb_good(good_tensor)

        # ncf out
        gmf_out = paddle.fluid.layers.elementwise_mul(gmf_store_embedding, gmf_good_embedding)

        mlp_out = paddle.concat([mlp_store_embedding, mlp_good_embedding], axis=1)
        for layer in self.mlp_layers:
            mlp_out = layer(mlp_out)

        concat_out = paddle.concat([gmf_out, mlp_out], axis=1)
        ncf_out = self.neu_layer(concat_out)
        ncf_out = self.neu_sig(ncf_out) * 5

        # gmf out
        store_reshape = paddle.reshape(gmf_store_embedding, shape=[-1, 1, mf_emb_size])
        temp = paddle.fluid.layers.elementwise_mul(sort_tensor, store_reshape)
        temp = paddle.reshape(temp, shape=[-1, mf_emb_size * 3])

        flip1 = temp[:, :64]
        flip2 = temp[:, 64:128]
        flip3 = temp[:, 128:]
        gmf_out1 = paddle.fluid.layers.elementwise_mul(flip1, gmf_good_embedding)
        gmf_out2 = paddle.fluid.layers.elementwise_mul(flip2, gmf_good_embedding)
        gmf_out3 = paddle.fluid.layers.elementwise_mul(flip3, gmf_good_embedding)

        gmf_out1 = self.linear1(gmf_out1)
        gmf_out1 = self.sig(gmf_out1)

        gmf_out2 = self.linear2(gmf_out2)
        gmf_out2 = self.sig(gmf_out2)

        gmf_out3 = self.linear3(gmf_out3)
        gmf_out3 = self.sig(gmf_out3)

        gmf_concat = paddle.concat([gmf_out1, gmf_out2, gmf_out3], axis=1)
        out2 = paddle.fluid.layers.elementwise_mul(gmf_concat, paddle.reshape(sort_tensor, shape=[-1, 3]))
        out2 = paddle.sum(out2, axis=1) * 5
        out2 = paddle.reshape(out2, shape=[-1, 1])

        out = ncf_out * alpha + out2 * (1 - alpha)
        return out


def train( model ):
    lr = 0.01
    epochs = 15
    model.train()

    step = 0
    for epoch in range(epochs):
        # lr逐渐变小
        # p = epoch // 10
        # new_lr = lr / (5 ** p)
        # print("epoch:{}, learning rate:{}".format(epoch, new_lr))

        from paddle.regularizer import L1Decay, L2Decay

        opt = paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters(),
                                    weight_decay=L2Decay())

        for idx, data in enumerate(train_loader()):
            store_arr, good_arr, sort_arr, label_arr = data
            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            sort_tensor = paddle.to_tensor(sort_arr, dtype='float32')
            sort_tensor = paddle.reshape(sort_tensor, shape=[-1, 3, 1])
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            out = model(store_tensor, good_tensor, sort_tensor)
            # print("out:", out)
            loss = F.l1_loss(out, label_tensor)

            step += 1
            # 保存训练过程的记录
            if step % 100 == 0:
                log_writer.add_scalar(tag='mix loss', step=step, value=loss.numpy())
            if step % 100 == 0:
                print("epoch:{}, loss:{}".format(epoch, loss.numpy()))

            loss.backward()
            opt.step()
            opt.clear_grad()

        paddle.save(model.state_dict(), params_filepath)


def test( model ):
    model.eval()

    loss_set = []
    for idx, data in enumerate(test_loader()):
        store_arr, good_arr, sort_arr, label_arr = data
        if len(store_arr) == batch_size:
            store_tensor = paddle.to_tensor(store_arr)
            good_tensor = paddle.to_tensor(good_arr)
            sort_tensor = paddle.to_tensor(sort_arr, dtype='float32')
            sort_tensor = paddle.reshape(sort_tensor, shape=[-1, 3, 1])
            label_tensor = paddle.to_tensor(label_arr, dtype='float32')
            label_tensor = paddle.reshape(label_tensor, shape=[-1, 1])

            pred = model(store_tensor, good_tensor, sort_tensor)
            loss = F.l1_loss(pred, label_tensor)
            loss_value = loss.numpy()[0]
            loss_set.append(loss_value)
    mean_loss = np.mean(loss_set)
    mean_loss = round(mean_loss, 4)
    print("test dataset mean loss is:", mean_loss)


model = Model([256, 128, 64, 32, 16])
model_state_dict = paddle.load(params_filepath)
model.load_dict(model_state_dict)

# test(model)
# train(model)
# test(model)
# input_data = ['store_2', 7790427005179.0, 1.7]
from data_info import store_dict, good_dict, sort_list
from method import get_one_hot

min_max_dict = {}
with open('data/min_max2.txt') as f:
    lines = f.readlines()
    for line in lines:
        line = line.strip().split(',')
        good_id, min_price, max_price = float(line[0]), float(line[1]), float(line[2])
        if good_id not in min_max_dict:
            min_max_dict[good_id] = [min_price, max_price]
        else:
            print("已经存在了")


def predict( input_data ):
    # 给定店铺id和商品id输出一个价格
    store_id, good_id = input_data
    min_value, max_value = min_max_dict[float(good_id)]

    store_id = store_dict[store_id]
    good_id = good_dict[good_id]
    sort_id = sort_list[store_id]
    sort_data = get_one_hot(sort_id)

    store_arr = [store_id]
    good_arr = [good_id]
    sort_arr = [sort_data]

    store_tensor = paddle.to_tensor(store_arr)
    good_tensor = paddle.to_tensor(good_arr)
    sort_tensor = paddle.to_tensor(sort_arr, dtype='float32')
    sort_tensor = paddle.reshape(sort_tensor, shape=[-1, 3, 1])

    pred = model(store_tensor, good_tensor, sort_tensor)
    # print(pred.numpy()[0])
    # print(store_tensor)

    pred_value = pred.numpy()[0][0]

    # 根据 min max 表返回真实价格

    true_value = (max_value - min_value) * (pred_value / 5) + min_value
    print("predict value is:", true_value)


predict(['store_2', '7790427005179.0'])
