import torch
import torch.nn as nn
import torch.nn.functional as F
from models.Modules import Embedding
from models.config_class import BaseConfig

class SIM(nn.Module):
    def __init__(self,config:BaseConfig):
        """
        Sim模型初始化
        Args:
            num_items: 商品总数
            num_categories: 类目总数
            emb_dim: 嵌入维度 (默认64)
            topk: hard模式下的top-k数量 (默认10)
            mode: 'soft'或'hard'模式选择 (默认'soft')
        """
        super().__init__()
        self.embedding = Embedding(config.vocab_size_dic, config.id_embed_dim, config.simple_embed_dim)
        self.emb_dim = self.embedding.h + self.embedding.c
        self.topk = config.k
        self.mode = config.sim_mode

        # 注意力权重矩阵
        self.W = nn.Linear(self.emb_dim , self.emb_dim , bias=False)

        # 预测层
        self.predict_layer = nn.Sequential(
            nn.Linear(self.emb_dim * 2, self.emb_dim),
            nn.ReLU(),
            nn.Linear(self.emb_dim , 1),
            nn.Sigmoid()
        )

        # 初始化权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Embedding):
                nn.init.xavier_normal_(m.weight)

    def forward(self, behaviors, target_item):
        batch_size, seq_len,_ = behaviors.size()


        his_emb = self.embedding(behaviors)  # [b, s, 2d]
        target_emb = self.embedding(target_item)  # [b, 2d]

        # 计算相似度得分
        target_expanded = target_emb.unsqueeze(1).expand(-1, seq_len, -1)  # [b, s, 2d]
        sim_scores = torch.sum(his_emb * target_expanded, dim=-1)  # [b, s]

        # 创建序列掩码 (忽略padding位置)
        mask = (behaviors[:, :,0] != 0).float()  # [b, s]
        sim_scores = sim_scores * mask - 1e9 * (1 - mask)  # 将padding位置设为极小值

        # 根据模式选择行为
        if self.mode == 'soft':
            # Soft模式：加权平均
            attn_weights = F.softmax(sim_scores, dim=-1)  # [b, s]
            user_rep = torch.sum(his_emb * attn_weights.unsqueeze(-1), dim=1)  # [b, 2d]

        elif self.mode == 'hard':
            # Hard模式：选择Top-k行为
            _, topk_indices = torch.topk(sim_scores, k=self.topk, dim=-1)  # [b, k]

            # 收集Top-k嵌入
            batch_indices = torch.arange(batch_size).view(-1, 1).expand(-1, self.topk)
            topk_emb = his_emb[batch_indices, topk_indices]  # [b, k, 2d]

            # 平均池化
            user_rep = torch.mean(topk_emb, dim=1)  # [b, 2d]

        else:
            raise ValueError("模式必须是'soft'或'hard'")

        # 拼接用户表示和目标项表示
        combined = torch.cat([user_rep, target_emb], dim=-1)  # [b, 4d]

        # 预测CTR
        ctr_pred = self.predict_layer(combined).repeat(1,2)  # [b]
        ctr_pred[:,0] = 1- ctr_pred[:,1]
        return ctr_pred


# 使用示例
if __name__ == "__main__":
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 500, 1)),
                 torch.randint(0, 19, (10, 500, 1)),
                 torch.randint(0, 9, (10, 500, 1)),
                 torch.randint(0, 49, (10, 500, 1))]
    behaviors = torch.concat(behaviors, dim=-1)

    target_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    target_item = torch.concat(target_item, dim=-1)


    model = SIM(vocab_size_dic, mode="soft")
    output = model(behaviors, target_item)
    print(output)