import torch
import torch.nn as nn
from Modules import *
from config_class import BaseConfig

class END4Rec(nn.Module):
    def __init__(self, config:BaseConfig):
        super().__init__()
        self.embedding = Embedding(config.vocab_size_dic, config.id_embed_dim, config.simple_embed_dim)
        self.embedding_dim = self.embedding.h + self.embedding.c
        self.positional_encoding = PositionalEncoding( self.embedding_dim , max_len=config.seq_len)

        self.ebm = EBMLayer( self.embedding_dim , config.heads, config.num_blocks)
        self.hard_noise_eliminator = HardNoiseEliminator( self.embedding_dim )
        self.soft_noise_filter = SoftNoiseFilter(self.embedding_dim)

        self.fc = nn.Linear( self.embedding_dim * 2, 1)

    def forward(self, behaviors, target_item):
        batch_size,seq_len, _ = behaviors.size()

        # Embedding

        # Sequence embedding
        sequence_emb = self.embedding(behaviors)
        sequence_emb = self.positional_encoding(sequence_emb)
        target_emb = self.embedding(target_item)

        # EBM
        sequence_emb_ebm = self.ebm(sequence_emb)  # (batch_size, embed_dim)

        # Combine with target
        combined_emb = torch.cat([
            sequence_emb_ebm,
            target_emb
        ], dim=1)  # (batch_size, 2 * embed_dim)

        # Hard Noise Eliminator
        noise_weights_hard = self.hard_noise_eliminator(sequence_emb, target_emb.unsqueeze(1))

        # Soft Noise Filter
        noise_weights_soft = self.soft_noise_filter(combined_emb)

        # Final output
        output = self.fc(combined_emb)  # (batch_size, 1)
        output = output * noise_weights_hard * noise_weights_soft  # 加权输出
        prob = torch.sigmoid(output).repeat(1,2)
        prob[:,0] = 1 - prob[:,1]
        return prob

if __name__ == '__main__':
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 500, 1)),
                 torch.randint(0, 19, (10, 500, 1)),
                 torch.randint(0, 9, (10, 500, 1)),
                 torch.randint(0, 49, (10, 500, 1))]
    behaviors = torch.concat(behaviors, dim=-1)


    target_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    target_item = torch.concat(target_item, dim=-1)


    model = END4Rec(vocab_size_dic, seq_len=500)
    output = model(behaviors, target_item)
    print(output)