import numpy as np
import tensorflow as tf

class RBM(object):

    def __init__(self, visibleDimensions, epochs=20, hiddenDimensions=50, ratingValues=10, learningRate=0.001,
                 batchSize=100):
        # 输入层维度
        self.visibleDimensions = visibleDimensions
        self.epochs = epochs
        # 隐藏层维度 - 隐藏层神经元的数量
        self.hiddenDimensions = hiddenDimensions
        self.ratingValues = ratingValues
        self.learningRate = learningRate
        self.batchSize = batchSize

        # Build the model
        self.weights = self.build_weights()
        self.hiddenBias = self.build_hidden_bias()
        self.visibleBias = self.build_visible_bias()

    def build_weights(self):
        maxWeight = -4.0 * np.sqrt(6.0 / (self.hiddenDimensions + self.visibleDimensions))
        return tf.Variable(
            tf.random.uniform([self.visibleDimensions, self.hiddenDimensions], minval=-maxWeight, maxval=maxWeight),
            dtype=tf.float32, name="weights")

    def build_hidden_bias(self):
        return tf.Variable(tf.zeros([self.hiddenDimensions], dtype=tf.float32), name="hiddenBias")

    def build_visible_bias(self):
        return tf.Variable(tf.zeros([self.visibleDimensions], dtype=tf.float32), name="visibleBias")

    def Train(self, X):
        X = np.array(X)

        for epoch in range(self.epochs):
            np.random.shuffle(X)

            for i in range(0, X.shape[0], self.batchSize):
                batch = X[i:i + self.batchSize]
                self.update_weights(batch)

            print("Trained epoch ", epoch)

    def update_weights(self, batch):
        with tf.GradientTape() as tape:
            forward, backward = self.contrastive_divergence(batch)
            loss = self.compute_loss(forward, backward)

        gradients = tape.gradient(loss, [self.weights, self.hiddenBias, self.visibleBias])
        self.weights.assign_sub(self.learningRate * gradients[0])
        self.hiddenBias.assign_sub(self.learningRate * gradients[1])
        self.visibleBias.assign_sub(self.learningRate * gradients[2])

    def contrastive_divergence(self, batch):
        # Forward pass
        hProb0 = tf.nn.sigmoid(tf.matmul(batch, self.weights) + self.hiddenBias)
        hSample = tf.nn.relu(tf.sign(hProb0 - tf.random.uniform(tf.shape(hProb0))))
        forward = tf.matmul(tf.transpose(batch), hSample)

        # Backward pass
        v = tf.matmul(hSample, tf.transpose(self.weights)) + self.visibleBias

        # Build up our mask for missing ratings
        vMask = tf.sign(batch)  # Make sure everything is 0 or 1
        vMask3D = tf.reshape(vMask, [tf.shape(v)[0], -1, self.ratingValues])
        vMask3D = tf.reduce_max(vMask3D, axis=[2], keepdims=True)

        # Extract rating vectors for each individual set of 10 rating binary values
        v = tf.reshape(v, [tf.shape(v)[0], -1, self.ratingValues])
        vProb = tf.nn.softmax(v * vMask3D)
        vProb = tf.reshape(vProb, [tf.shape(v)[0], -1])

        hProb1 = tf.nn.sigmoid(tf.matmul(vProb, self.weights) + self.hiddenBias)
        backward = tf.matmul(tf.transpose(vProb), hProb1)

        return forward, backward

    def compute_loss(self, forward, backward):
        return tf.reduce_mean(tf.square(forward - backward))

    def GetRecommendations(self, inputUser):
        inputUser = np.array(inputUser)
        hidden = tf.nn.sigmoid(tf.matmul(inputUser, self.weights) + self.hiddenBias)
        visible = tf.nn.sigmoid(tf.matmul(hidden, tf.transpose(self.weights)) + self.visibleBias)
        return visible.numpy()[0]

# Example usage:
# visibleDimensions = 1000  # Example number of items
# rbm = RBM(visibleDimensions)
# rbm.Train(X)  # X is your training data
# recommendations = rbm.GetRecommendations(inputUser)  # inputUser is a single user's data
