import numpy as np
import torch
from torch.nn import Embedding, Linear

from exp.modules import DotProductClickPredictor
from exp.news_encoder import NewsEncoder
from exp.user_encoder import UserEncoder
from general.dnn_predict import DNNClickPredictor
from general.activate_unit import Attention

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class MyModel(torch.nn.Module):
    def __init__(self, config, pretrained_word_embedding=None, pretrained_entity_embedding=None):
        super(MyModel, self).__init__()
        self.config = config
        self.news_embed_dim = config.word_embedding_dim
        self.news_encoder = NewsEncoder(config=config, pretrained_word_embedding=pretrained_word_embedding,
                                        pretrained_entity_embedding=pretrained_entity_embedding)
        self.user_encoder = UserEncoder(config=config)

        # User long-term interest represent，embedding后的维度是user_shot-term represent vector的维度，因为要与他concat
        self.user_embedding = Embedding(num_embeddings=config.num_users,
                                        embedding_dim=self.news_embed_dim,
                                        padding_idx=0)

        self.click_predictor = DotProductClickPredictor()

        self.interest_activate = Attention()
        self.dnn_predict = DNNClickPredictor(input_size=config.word_embedding_dim)
        self.product_product = config.inner_product_click_predict

    def forward(self, user, candidate_news, clicked_news):
        """

        Args:
            user: userID ,dense features, as user long-term interest represent,(batch_size)
            candidate_news:(3,)
            clicked_news:(50,)

        Returns:

        """
        # (batch_size,embed_dim) : 128,300
        # user_profile_embed = self.user_embedding(user.to(device))

        #  (batch_size,news_num,embed_dim) : 128 3 300
        candidate_news_vector = torch.stack(
            [self.news_encoder(x) for x in candidate_news], dim=1)
        # (batch_size,news_num,embed_dim) : 128 50 300
        clicked_news_vector = torch.stack(
            [self.news_encoder(x) for x in clicked_news], dim=1)

        # (batch_size,news_embed_dim)
        user_vector = self.user_encoder(user, clicked_news_vector)

        if self.product_product:
            if not self.config.activate_unit:
                # (batch,candidate_news_num)
                click_probability = self.click_predictor(candidate_news_vector, user_vector)
            else:
                outputs = self.interest_activate([candidate_news_vector, user_vector, user_vector])
                click_probability = self.click_predictor(candidate_news_vector, outputs)
        else:
            # todo 有bug需要修改，改了效果也不好，没必要改
            context, att = self.interest_activate(candidate_news_vector, user_vector, user_vector)
            click_probability = self.dnn_predict(context)
        return click_probability

    def get_news_vector(self, news):
        return self.news_encoder(news)

    def get_user_vector(self, user, clicked_news_vector):
        return self.user_encoder(user, clicked_news_vector)

    def get_prediction(self, news_vector, user_vector):
        return self.click_predictor(
            news_vector.unsqueeze(dim=0),
            user_vector.unsqueeze(dim=0)).squeeze(dim=0)
