# coding: utf-8
# 2021/7/1 @ tongshiwei
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F

from .base import Module
from .MCD import MCDNet


class TwoLinear(Module):
    def __init__(self, user_num, item_num):
        super(TwoLinear, self).__init__()
        self.user_num = user_num
        self.item_num = item_num

        # self.user_mlp = nn.Linear(user_num, 1)
        # self.item_mlp = nn.Linear(item_num, 1)

        self.user_embedding = torch.nn.Embedding(user_num, 10)
        self.user_mlp = nn.Linear(10, 3)
        self.item_embedding = torch.nn.Embedding(item_num, 10)
        self.item_mlp = nn.Linear(10, 3)
        self.mlp = nn.Linear(6, 1)
        self.activation = nn.Sigmoid()
        self.relu = nn.LeakyReLU()

        self.init_embedding(0)

    def init_embedding(self, init):
        nn.init.kaiming_normal_(self.user_embedding.weight, mode="fan_out", a=init)
        nn.init.kaiming_normal_(self.item_embedding.weight, mode="fan_out", a=init)
        nn.init.kaiming_normal_(self.mlp.weight, mode="fan_out", a=init)
        nn.init.kaiming_normal_(self.user_mlp.weight, mode="fan_out", a=init)
        nn.init.kaiming_normal_(self.item_mlp.weight, mode="fan_out", a=init)
        # nn.init.kaiming_normal_(self.user_mlp.weight, mode="fan_out", a=init)
        # nn.init.kaiming_normal_(self.item_mlp.weight, mode="fan_out", a=init)

    def forward(self, users, items):
        # u_bias = self.user_mlp(users)
        # i_bias = self.item_mlp(items)
        u_bias = self.user_embedding(users)
        u_bias = self.user_mlp(u_bias)
        u_bias = self.relu(u_bias)
        i_bias = self.item_embedding(items)
        i_bias = self.item_mlp(i_bias)
        i_bias = self.relu(i_bias)
        preds = self.mlp(torch.cat([u_bias, i_bias], dim=-1))
        # preds = self.activation(preds)
        # preds = u_bias + i_bias
        return preds.squeeze()


class MCDDebiasWrapper(nn.Module):
    def __init__(
        self,
        user_num,
        item_num,
        latent_dim,
        random_dataset=None,
        mcd_optim=torch.optim.Adam,
        lr=0.001,
    ):
        self.user_num = user_num
        self.item_num = item_num
        self.latent_dim = latent_dim
        self.random_dataset = random_dataset

        self.b1 = nn.Embedding(self.latent_dim, self.latent_dim)
        self.b2 = nn.Embedding(self.latent_dim, self.latent_dim)

        self.optim_b1 = torch.optim.Adam(self.b1.parameters(), lr=0.001)
        self.optim_b2 = torch.optim.Adam(self.b2.parameters(), lr=0.001)

        self.loss_fn = nn.BCELoss()

        self.one_step_model = MCDDebias(user_num, item_num, latent_dim)
        self.mcd = MCDDebias(user_num, item_num, latent_dim)

        self.one_step_optim = mcd_optim(self.one_step_model.parameters(), lr=lr)
        self.optim = mcd_optim(self.mcd.parameters(), lr=lr)

    def forward(self, user_id, item_id, y_train=None):
        if self.training:
            assert y_train is not None

            self.b1.train()
            self.b2.train()
            self.mcd.train()
            self.one_step_model.train()

            self.one_step_model.load_state_dict(self.mcd.state_dict())
            y_pred_all = self.one_step_model(user_id, item_id)
            loss_all = self.loss_fn(y_pred_all, y_train)

            user = self.mcd.user_embedding(user_id)
            item = self.mcd.item_embedding(item_id)

            user = self.b1(user)
            item = self.b2(item)

        else:
            self.b1.eval()
            self.b2.eval()

        return torch.squeeze(
            torch.sigmoid(self.mcd.response(torch.cat([user, item], dim=-1))), dim=-1
        )
