import torch
import torch.nn as nn


class NCFModel(nn.Module):
    def __init__(self, user_num, item_num, hidden_dim, mlp_layer_num, dropout):
        # 原始代码
        super().__init__()
        self.hidden_dim = hidden_dim
        self.user_num = user_num
        self.item_num = item_num
        self.mlp_layer_num = mlp_layer_num
        # self.weight_decay = weight_decay
        self.dropout = dropout

        # Initialize embeddings
        self.mlp_user_embedding = torch.nn.Embedding(user_num, hidden_dim *(2**(self.mlp_layer_num - 1)))   # mlp_user_embedding=(1000, 256), hidden_dim=64
        self.mlp_item_embedding = torch.nn.Embedding(item_num, hidden_dim *(2**(self.mlp_layer_num - 1)))   # mlp_item_embedding=(500, 256), hidden_dim=64
        self.gmf_user_embedding = torch.nn.Embedding(user_num, hidden_dim)  # gmf_user_embedding=(1000, 64)
        self.gmf_item_embedding = torch.nn.Embedding(item_num, hidden_dim)  # gmf_item_embedding=(500, 64)

        # Initialize MLP layers
        mlp_layers = []
        input_size = hidden_dim * (2 ** (self.mlp_layer_num))     # input_size=512, Since concatenation doubles the input size, start with user and item embeddings
        for _ in range(self.mlp_layer_num):
            mlp_layers.append(torch.nn.Linear(int(input_size), int(input_size / 2))) # 512, 256      256, 128    128, 64
            mlp_layers.append(torch.nn.Dropout(self.dropout))
            mlp_layers.append(torch.nn.ReLU())
            input_size /= 2
        # mlp_layers.append(torch.nn.Linear(input_size, hidden_dim))  # input_size=64, hidden_dim=64      Ensure final layer outputs hidden_dim
        self.mlp_layers = torch.nn.Sequential(*mlp_layers)

        # Output layer
        self.output_layer = torch.nn.Linear(2 * self.hidden_dim, 1)

    def forward(self, user, item):  # user=64, item=64
        # GMF pathway
        user_gmf_embedding = self.gmf_user_embedding(user)  # user_gmf_embedding=(64,64), gmf_user_embedding=(1000, 64)
        item_gmf_embedding = self.gmf_item_embedding(item)  # item_gmf_embedding=(64,64), gmf_item_embedding=(500, 64)
        # gmf_output = user_gmf_embedding * item_gmf_embedding    # gmf_output=(64,64), user_gmf_embedding(64,64), item_gmf_embedding(64,64)

        # MLP pathway
        user_mlp_embedding = self.mlp_user_embedding(user)
        item_mlp_embedding = self.mlp_item_embedding(item)

        gmf_output = user_gmf_embedding * item_gmf_embedding    # gmf_output=(64,64), user_gmf_embedding(64,64), item_gmf_embedding(64,64)

        mlp_input = torch.cat([user_mlp_embedding, item_mlp_embedding], dim=-1)     # mlp_input=(64, 512)
        mlp_output = self.mlp_layers(mlp_input)

        # Concatenate GMF and MLP outputs
        output = torch.sigmoid(self.output_layer(torch.cat([gmf_output, mlp_output], dim=-1).squeeze(-1)))
        return output

        # output = torch.cat([gmf_output, mlp_output], dim=-1)
        # output = output.squeeze(-1)
        # final_output = torch.sigmoid(self.output_layer(output))
        # return final_output


import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset


# Example dataset class
class ExampleDataset(Dataset):
    def __init__(self, user_tensor, item_tensor, label_tensor):
        self.user_tensor = user_tensor
        self.item_tensor = item_tensor
        self.label_tensor = label_tensor

    def __getitem__(self, index):
        return self.user_tensor[index], self.item_tensor[index], self.label_tensor[index]

    def __len__(self):
        return len(self.user_tensor)


# Parameters
user_num = 1000  # Number of unique users
item_num = 500  # Number of unique items
hidden_dim = 64
mlp_layer_num = 3
dropout = 0.2
learning_rate = 0.001
epochs = 10
batch_size = 32

# Create dummy data for demonstration
num_samples = 10000
user_tensor = torch.randint(0, user_num, (num_samples,))
item_tensor = torch.randint(0, item_num, (num_samples,))
label_tensor = torch.randint(0, 2, (num_samples,), dtype=torch.float32)

# Create datasets and dataloaders
dataset = ExampleDataset(user_tensor, item_tensor, label_tensor)
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# Initialize the model
model = NCFModel(user_num, item_num, hidden_dim, mlp_layer_num, dropout)

# Define loss and optimizer
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(epochs):
    model.train()
    total_loss = 0
    for user, item, label in train_loader:
        optimizer.zero_grad()
        output = model(user, item)
        loss = criterion(output.squeeze(), label)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

    print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(train_loader)}")

# Evaluation (if you have a test set)
# test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# model.eval()
# with torch.no_grad():
#     for user, item, label in test_loader:
#         output = model(user, item)
#         # Evaluate the output (e.g., accuracy, AUC, etc.)

print("Training complete.")

import torch
import torch.nn as nn


class NCFModel(nn.Module):
    def __init__(self, user_num, item_num, hidden_dim, mlp_layer_num, dropout):
        super().__init__()
        self.hidden_dim = hidden_dim
        self.user_num = user_num
        self.item_num = item_num
        self.mlp_layer_num = mlp_layer_num
        self.dropout = dropout

        self.mlp_user_embedding = nn.Embedding(user_num, hidden_dim)
        self.mlp_item_embedding = nn.Embedding(item_num, hidden_dim)
        self.gmf_user_embedding = nn.Embedding(user_num, hidden_dim)
        self.gmf_item_embedding = nn.Embedding(item_num, hidden_dim)

        mlp_layers = []
        input_size = hidden_dim * 2  # Start with user and item embeddings concatenated
        for i in range(mlp_layer_num - 1):
            mlp_layers.append(nn.Linear(input_size, input_size // 2))
            mlp_layers.append(nn.Dropout(dropout))
            mlp_layers.append(nn.ReLU())
            input_size //= 2
        # Ensure the final layer's output size is hidden_dim
        mlp_layers.append(nn.Linear(input_size, hidden_dim))
        mlp_layers.append(nn.Dropout(dropout))
        mlp_layers.append(nn.ReLU())

        self.mlp_layers = nn.Sequential(*mlp_layers)

        # The final output layer
        self.output_layer = nn.Linear(2 * hidden_dim, 1)

    def forward(self, user, item):
        # GMF part
        user_gmf_embedding = self.gmf_user_embedding(user)
        item_gmf_embedding = self.gmf_item_embedding(item)
        gmf_output = user_gmf_embedding * item_gmf_embedding

        # MLP part
        user_mlp_embedding = self.mlp_user_embedding(user)
        item_mlp_embedding = self.mlp_item_embedding(item)
        mlp_input = torch.cat((user_mlp_embedding, item_mlp_embedding), dim=-1)
        mlp_output = self.mlp_layers(mlp_input)

        # Concatenate GMF and MLP parts
        final_input = torch.cat((gmf_output, mlp_output), dim=-1)
        output = torch.sigmoid(self.output_layer(final_input))

        return output

