import torch
import torch.nn as nn
import torch.nn.functional as F
from models.Modules import Embedding
from models.config_class import BaseConfig


class AUGRUCell(nn.Module):
    """AUGRU细胞单元，用于兴趣进化层"""

    def __init__(self, input_size, hidden_size):
        super(AUGRUCell, self).__init__()
        # 重置门和更新门的权重
        self.weight_ih = nn.Parameter(torch.Tensor(2 * hidden_size, input_size))
        self.weight_hh = nn.Parameter(torch.Tensor(2 * hidden_size, hidden_size))
        self.bias_ih = nn.Parameter(torch.Tensor(2 * hidden_size))
        self.bias_hh = nn.Parameter(torch.Tensor(2 * hidden_size))
        # 候选隐藏状态的权重
        self.weight_ih_c = nn.Parameter(torch.Tensor(hidden_size, input_size))
        self.weight_hh_c = nn.Parameter(torch.Tensor(hidden_size, hidden_size))
        self.bias_ih_c = nn.Parameter(torch.Tensor(hidden_size))
        self.bias_hh_c = nn.Parameter(torch.Tensor(hidden_size))
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.xavier_uniform_(self.weight_ih)
        nn.init.xavier_uniform_(self.weight_hh)
        nn.init.xavier_uniform_(self.weight_ih_c)
        nn.init.xavier_uniform_(self.weight_hh_c)
        nn.init.zeros_(self.bias_ih)
        nn.init.zeros_(self.bias_hh)
        nn.init.zeros_(self.bias_ih_c)
        nn.init.zeros_(self.bias_hh_c)

    def forward(self, input, hx, att_score):
        # 计算重置门(r)和更新门(z)
        gates = F.linear(input, self.weight_ih, self.bias_ih) + \
                F.linear(hx, self.weight_hh, self.bias_hh)
        r_gate, z_gate = gates.chunk(2, 1)
        r_gate = torch.sigmoid(r_gate)
        z_gate = torch.sigmoid(z_gate)

        # 应用注意力分数调整更新门
        z_gate = z_gate * att_score.unsqueeze(1)

        # 计算候选隐藏状态
        n_gate = F.linear(input, self.weight_ih_c, self.bias_ih_c) + \
                 F.linear(hx * r_gate, self.weight_hh_c, self.bias_hh_c)
        n_gate = torch.tanh(n_gate)

        # 计算新隐藏状态
        hy = (1 - z_gate) * n_gate + z_gate * hx
        return hy


class AUGRU(nn.Module):
    """AUGRU层，处理整个序列"""

    def __init__(self, input_size, hidden_size):
        super(AUGRU, self).__init__()
        self.cell = AUGRUCell(input_size, hidden_size)
        self.hidden_size = hidden_size

    def forward(self, input, att_scores):
        batch_size, seq_len, input_dim = input.size()
        outputs = []
        hx = torch.zeros(batch_size, self.hidden_size, device=input.device)

        for t in range(seq_len):
            x_t = input[:, t, :]
            att_t = att_scores[:, t]
            hx = self.cell(x_t, hx, att_t)
            outputs.append(hx)

        return torch.stack(outputs, dim=1), hx


class AttentionLayer(nn.Module):
    """注意力计算层"""

    def __init__(self, embedding_dim):
        super(AttentionLayer, self).__init__()
        self.linear = nn.Linear(embedding_dim, embedding_dim)
        self.target_proj = nn.Linear(embedding_dim, 1, bias=False)

    def forward(self, seq_emb, target_emb):
        # 序列嵌入: (batch, seq_len, emb_dim)
        # 目标嵌入: (batch, emb_dim)

        # 变换序列嵌入
        seq_trans = torch.tanh(self.linear(seq_emb))  # (batch, seq_len, emb_dim)

        # 扩展目标嵌入并计算注意力分数
        target_emb_exp = target_emb.unsqueeze(1)  # (batch, 1, emb_dim)
        scores = self.target_proj(seq_trans * target_emb_exp).squeeze(-1)  # (batch, seq_len)

        # 计算注意力权重
        att_weights = F.softmax(scores, dim=1)  # (batch, seq_len)
        return att_weights


class DIEN(nn.Module):
    def __init__(self,config: BaseConfig):
        super(DIEN, self).__init__()
        # 嵌入层
        self.embedding = Embedding(vocab_size_dic=config.vocab_size_dic,
                                   id_embedding_dim=config.id_embed_dim,
                                   simple_embedding_dim=config.simple_embed_dim)
        self.emb_dim = self.embedding.h + self.embedding.c

        # 第一层GRU（兴趣提取层）
        self.gru_extractor = nn.GRU(
            input_size= self.emb_dim,  # item + category
            hidden_size= self.emb_dim ,
            batch_first=True
        )

        # 注意力层
        self.attention = AttentionLayer(self.emb_dim)

        # 兴趣进化层 (AUGRU)
        self.augru = AUGRU(
            input_size=self.emb_dim,
            hidden_size=self.emb_dim
        )

        # 全连接层
        self.fc = nn.Sequential(
            nn.Linear(2 * self.emb_dim + self.emb_dim, 128),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1),
            nn.Sigmoid()
        )

    def forward(self, behaviors, target_item):
        # 历史序列嵌入
        b, s, _ = behaviors.size()
        his_emb = self.embedding(behaviors)  # (batch, seq, emb_dim)

        # 目标项嵌入
        target_emb = self.embedding(target_item)  # (batch, emb_dim)


        # 兴趣提取层 (GRU)
        gru_out, _ = self.gru_extractor(his_emb)  # gru_out: (batch, seq, hidden_size)

        # 注意力计算
        att_weights = self.attention(gru_out, target_emb)  # (batch, seq_len)

        # 兴趣进化层 (AUGRU)
        _, final_hidden = self.augru(gru_out, att_weights)  # final_hidden: (batch, hidden_size)

        # 拼接最终兴趣表示和目标项
        combined = torch.cat([
            final_hidden,
            target_emb,
            final_hidden * target_emb  # 加入交互特征
        ], dim=-1)

        # CTR预测
        pred = self.fc(combined).repeat(1,2)
        pred[:,0] = 1 - pred[:,1]
        return pred # (batch,)

if __name__ == '__main__':
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 500, 1)),
                 torch.randint(0, 19, (10, 500, 1)),
                 torch.randint(0, 9, (10, 500, 1)),
                 torch.randint(0, 49, (10, 500, 1))]
    behaviors = torch.concat(behaviors, dim=-1)


    target_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    target_item = torch.concat(target_item, dim=-1)


    model = DIEN(vocab_size_dic)
    output = model(behaviors, target_item)
    print(output)