import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F


class FM(nn.Module):
    def __init__(self,
                 embedding_size,
                 user_total,
                 item_total,
                 ):
        super(FM, self).__init__()
        self.embedding_size = embedding_size
        self.user_total = user_total
        self.item_total = item_total

        # create user and item biases
        self.user_bias = nn.Embedding(self.user_total, 1)
        self.item_bias = nn.Embedding(self.item_total, 1)
        user_bias = torch.zeros(self.user_total, 1)
        item_bias = torch.zeros(self.item_total, 1)
        # feed values
        self.user_bias.weight.data.copy_(user_bias)
        self.item_bias.weight.data.copy_(item_bias)

        # miscs
        self.bias = nn.Parameter(torch.FloatTensor([0.0]))

    def forward(self, u_ids, i_ids, u_e, i_e):
        batch_size = len(u_ids)
        u_b = self.user_bias(u_ids)
        i_b = self.item_bias(i_ids)

        y = self.bias.expand(batch_size) + u_b.squeeze() + i_b.squeeze() + torch.bmm(u_e.unsqueeze(1),
                                                                                     i_e.unsqueeze(2)).squeeze()
        return y


class image_cast(nn.Module):
    def __init__(self):
        super(image_cast, self).__init__()
        self.input = torch.nn.Linear(2048, 100)

    def forward(self, img_feature):
        img_feature = self.input(img_feature)
        return img_feature


class feature_combine(nn.Module):
    def __init__(self, pic_feature_size=100, text_feature_size=100, text_weight=0.01, output=100):
        super(feature_combine, self).__init__()
        pic_weight = torch.ones(pic_feature_size)
        text_weight = text_weight * torch.ones(text_feature_size)
        self.input_layer = torch.nn.Parameter(torch.cat([pic_weight, text_weight]))
        self.comb = torch.nn.Linear(pic_feature_size+text_feature_size, output)
        self.output = torch.nn.Parameter(torch.rand(100))

    def forward(self, pic, text, dim=-1):
        f = torch.cat([pic, text], dim=dim)
        f = self.input_layer*f
        f = self.comb(f)
        f = self.output*f
        return f
