import torch
from torch.nn import Module, Embedding
from config import DatasetsConfig
class EmbeddingLayer(Module):
    def __init__(self, cfg, device):
        super().__init__()
        base_dict = cfg["Base"]
        train_dict = cfg["Train"]
        datasets_dict = DatasetsConfig(base_dict['datasets_info'])
        self.embedding = {}
        for field in ["uid", "item_id", "author_id", "item_city", "channel", "music_id"]:
            self.embedding[field] = Embedding(datasets_dict.max_val(field)+ 1 , train_dict[field+"_embedding_size"]).to(device)
    
    def forward(self, u_item_id, context_discrete, behavior_discrete_tensor):
        uid_tensor = self.embedding['uid'](u_item_id[:, 0])
        item_id_tensor = self.embedding['item_id'](u_item_id[:, 1])
        i = 0
        discrete = []
        behavior_discrete = []
        for key, emb in self.embedding.items():
            if key in ["uid", "item_id"]:
                continue
            discrete.append(emb(context_discrete[:, i]))
            behavior_discrete.append(emb(behavior_discrete_tensor[:, :, i]))
            i += 1

        return uid_tensor, item_id_tensor, torch.stack(discrete, dim=1), torch.stack(behavior_discrete, dim=2)
    
