import numpy as np
from tqdm import tqdm
from sklearn import metrics
import matplotlib.pyplot as plt

data = np.loadtxt('../data/fm_dataset.csv', delimiter=',')

# 划分训练集和测试集
np.random.seed(0)
ratio = 0.8
split = int(len(data) * ratio)
x_train, y_train = data[:split, :-1], data[:split, -1]
x_test, y_test = data[split:, :-1], data[split:, -1]
feature_number = x_train.shape[1]  # d


class FM:
    def __init__(self, feature_number, vector_dim):
        self.theta0 = 0.0
        self.theta = np.zeros(feature_number)
        self.v = np.random.normal(size=(feature_number, vector_dim))
        self.eps = 1e-6

    def logistic(self, x):
        return 1 / (1 + np.exp(-x))

    def predict(self, x):
        # 线性部分
        linear_term = self.theta0 + x @ self.theta
        # 双线性部分
        sqaure_of_sum = np.square(x @ self.v)
        sum_of_sqaure = np.square(x) @ np.square(self.v)
        bilinear_term = np.sum(sqaure_of_sum / 2 - sum_of_sqaure / 2, axis=1)
        y_pred = linear_term + bilinear_term
        y_pred = self.logistic(y_pred)
        y_pred = np.clip(y_pred, self.eps, 1 - self.eps)
        return y_pred

    def update(self, grad0, grad_theta, grad_v, learning_rate):
        self.theta0 = self.theta0 - grad0 * learning_rate
        self.theta = self.theta - grad_theta * learning_rate
        self.v = self.v - grad_v * learning_rate


vector_dim = 16
max_training_step = 200
batch_size = 32
lbd = 0.05
learning_rate = 0.01
model = FM(feature_number=feature_number, vector_dim=vector_dim)

train_acc = []
test_acc = []
train_auc = []
test_auc = []

for epoch in tqdm(range(max_training_step)):
    # 取小样本做SGD
    st = 0
    while st < len(x_train):
        ed = min(st + batch_size, len(x_train))
        X = x_train[st:ed]
        Y = y_train[st:ed]
        st += batch_size

        y_pred = model.predict(X)

        # 常数项
        grad0 = np.sum((y_pred - Y).reshape(-1, 1) * 1) / len(X) + lbd * model.theta0
        # 线性项
        grad_theta = np.sum((y_pred - Y).reshape(-1, 1) * X, axis=0) / len(X) + lbd * model.theta
        # 双线性项
        grad_v = np.zeros((feature_number, vector_dim))
        for i, x in enumerate(X):
            xv = x @ model.v
            grad_vi = np.zeros((feature_number, vector_dim))
            for s in range(feature_number):
                grad_vi[s] += (x[s] * xv - (x[s] ** 2) * model.v[s])
            grad_v += (y_pred - Y).reshape(-1, 1)[i] * grad_vi
        grad_v = (grad_v / len(X)) + lbd * model.v
        model.update(grad0, grad_theta, grad_v, learning_rate=learning_rate)

    y_test_pred = model.predict(x_test) >= 0.5
    acc_test = np.mean(y_test_pred == y_test)
    test_acc.append(acc_test)
    auc = metrics.roc_auc_score(y_test, y_test_pred)
    test_auc.append(auc)

    y_train_pred = model.predict(x_train) >= 0.5
    acc_train = np.mean(y_train_pred == y_train)
    train_acc.append(acc_train)
    auc = metrics.roc_auc_score(y_train, y_train_pred)
    train_auc.append(auc)


plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
plt.plot(train_acc, label='train acc'); plt.plot(test_acc, label='test acc')
plt.legend(); plt.title('Accuracy'); plt.xlabel('epoch'); plt.ylabel('acc')

plt.subplot(1,2,2)
plt.plot(train_auc, label='train auc'); plt.plot(test_auc, label='test auc')
plt.legend(); plt.title('AUC'); plt.xlabel('epoch'); plt.ylabel('auc')

plt.tight_layout()
plt.show()
