import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt

data = np.loadtxt('../data/movielens_100k.csv', delimiter=',', dtype=int)
data[:, :2] = data[:, :2] - 1

users = set()
items = set()
for i, j, k in data:
    users.add(i)
    items.add(j)
user_num = len(users)
item_num = len(items)

# 划分训练集和测试集
np.random.seed(0)
ratio = 0.8
split = int(ratio * len(data))
np.random.shuffle(data)
train = data[:split]
test = data[split:]
user_train, user_test = train[:, 0], test[:, 0]
item_train, item_test = train[:, 1], test[:, 1]
y_train, y_test = train[:, 2], test[:, 2]


class MF:
    def __init__(self, N, M, d):
        self.user_params = np.ones((N, d))  # P
        self.item_params = np.ones((M, d))  # Q

    def pred(self, user_id, item_id):
        rating_pred = np.sum(self.item_params[item_id] * self.user_params[user_id], axis=1)
        return rating_pred

    def update(self, user_pred, item_pred):
        self.user_params -= learning_rate * user_pred
        self.item_params -= learning_rate * item_pred

    def batch_generator(self, x, y, z, batch_size, shuffle=True):
        batch_count = 0
        if shuffle:
            idx = np.random.permutation(len(x))
            x = x[idx]
            y = y[idx]
            z = z[idx]
        while True:
            start = batch_count * batch_size
            end = min(start + batch_size, len(x))
            if start >= len(x):
                break
            batch_count += 1
            yield x[start:end], y[start:end], z[start:end]  # 生成器

    def train(self, max_training_step, batch_size, lbd, learning_rate):
        # 进度条
        total_batches = 0
        for epoch in range(max_training_step):
            gen = self.batch_generator(user_train, item_train, y_train, batch_size=batch_size, shuffle=False)
            for _ in gen:
                total_batches += 1
        pbar = tqdm(total=total_batches, desc="Training Process")

        # 误差
        train_losses = []
        test_losses = []

        for epoch in range(max_training_step):
            train_rmse = 0
            gen = self.batch_generator(user_train, item_train, y_train, batch_size=batch_size, shuffle=False)
            for user_batch, item_batch, y_batch in gen:
                P = self.user_params
                Q = self.item_params
                P_grad = np.zeros_like(P)
                Q_grad = np.zeros_like(Q)
                errs = self.pred(user_id=user_batch, item_id=item_batch) - y_batch
                for user, item, err in zip(user_batch, item_batch, errs):
                    P_grad[user] = P_grad[user] + err * Q[item] + lbd * P[user]
                    Q_grad[item] = Q_grad[item] + err * P[user] + lbd * Q[item]
                self.user_params = self.user_params - learning_rate * P_grad / len(user_batch)
                self.item_params = self.item_params - learning_rate * Q_grad / len(item_batch)

                train_rmse += np.mean(errs ** 2)

                pbar.update(1)
                pbar.set_postfix({
                    'Epoch': f'{epoch + 1}/{max_training_step}'
                })
            train_rmse = np.sqrt(train_rmse / len(user_train))
            train_losses.append(train_rmse)
            y_test_pred = model.pred(user_test, item_test)
            test_losses.append(np.mean((y_test_pred - y_test) ** 2))
        return train_losses, test_losses


max_training_step = 30
batch_size = 64
lbd = 1e-4
learning_rate = 0.1
N = user_num
M = item_num
d = 16

model = MF(N, M, d)
train_losses, test_losses = model.train(max_training_step, batch_size, lbd, learning_rate)
print(train_losses)
print(test_losses)

# 画图
plt.figure(figsize=(8, 5))
plt.plot(train_losses, label='Train RMSE')
plt.plot(test_losses, label='Test RMSE')
plt.xlabel('Epoch')
plt.ylabel('RMSE')
plt.title('Training and Test RMSE over Epochs')
plt.legend()
plt.grid(True)
plt.show()

y_test_pred = model.pred(user_test, item_test)
print(y_test_pred)
print(y_test)
