import torch
import torch.nn as nn
import torch.nn.functional as F
from models.Modules import Embedding
from models.config_class import BaseConfig

class DIN(nn.Module):
    def __init__(self,config:BaseConfig):
        """
        DIN模型初始化
        Args:
            num_items (int): 物品ID的总数
            num_categories (int): 类别ID的总数
            embed_dim (int): 嵌入维度，默认为64
            att_hidden_units (list): 注意力网络的隐藏层单元数，默认为[80,40]
            mlp_hidden_units (list): MLP的隐藏层单元数，默认为[200,80]
        """
        super(DIN, self).__init__()
        self.embedding = Embedding(config.vocab_size_dic, config.id_embed_dim,config.simple_embed_dim)
        # 物品ID和类别ID的嵌入层
        self.embed_dim = self.embedding.h + self.embedding.c

        # 注意力网络
        att_layers = []
        input_dim = 2 * self.embed_dim  # 拼接后维度
        for units in config.att_hidden_units:
            att_layers.append(nn.Linear(input_dim, units))
            att_layers.append(nn.ReLU())
            input_dim = units
        att_layers.append(nn.Linear(input_dim, 1))  # 输出注意力分数
        self.attention_net = nn.Sequential(*att_layers)

        # MLP网络
        mlp_layers = []
        input_dim = 2 * self.embed_dim  # 用户兴趣表示和目标物品拼接
        for units in config.mlp_hidden_units:
            mlp_layers.append(nn.Linear(input_dim, units))
            mlp_layers.append(nn.ReLU())
            input_dim = units
        mlp_layers.append(nn.Linear(input_dim, 1))
        mlp_layers.append(nn.Sigmoid())  # 输出概率
        self.mlp = nn.Sequential(*mlp_layers)

    def forward(self,behaviors, target_item):
        # 1. 生成序列掩码（0表示填充位置）
        mask = (behaviors[:,:,0] != 0).float()  # shape=(batch_size, seq_len)
        # 2. 历史序列嵌入
        his_emb = self.embedding(behaviors)  # (batch_size, seq_len, 2*embed_dim)
        # 3. 目标物品嵌入
        target_emb = self.embedding(target_item)
        # 4. 注意力计算
        seq_len = his_emb.size(1)
        target_expanded = target_emb.unsqueeze(1).expand(-1, seq_len, -1)  # 扩展目标嵌入
        att_input = torch.cat([his_emb, target_expanded],
                              dim=-1)  # (batch_size, seq_len, 4*embed_dim)

        att_logits = self.attention_net(att_input).squeeze(-1)  # (batch_size, seq_len)
        att_logits = att_logits.masked_fill(mask == 0, -1e9)  # 掩码填充位置
        att_weights = F.softmax(att_logits, dim=1)  # 归一化权重
        # 5. 加权兴趣表示
        user_interest = torch.sum(his_emb * att_weights.unsqueeze(-1), dim=1)  # (batch_size, 2*embed_dim)

        # 6. 拼接用户兴趣和目标物品
        combined = torch.cat([user_interest, target_emb], dim=-1)  # (batch_size, 4*embed_dim)

        # 7. MLP预测CTR
        pred = self.mlp(combined).repeat(1,2)  # (batch_size, 1)
        pred[:,0] = 1- pred[:,1]
        return pred


# 测试代码
if __name__ == '__main__':
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 500, 1)),
                 torch.randint(0, 19, (10, 500, 1)),
                 torch.randint(0, 9, (10, 500, 1)),
                 torch.randint(0, 49, (10, 500, 1))]
    behaviors = torch.concat(behaviors, dim=-1)


    target_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    target_item = torch.concat(target_item, dim=-1)


    model = DIN(vocab_size_dic)
    output = model(behaviors, target_item)
    print(output)