import torch
import pandas as pd
# import tensorflow as tf
import numpy as np
import torch.utils.data as Data


def load_dataset(root1):
    # user_features处理部分
    header = ['user_id', 'age', 'gender', 'occupation', 'zip_code']
    url = root1 + 'u.user'
    df_user = pd.read_csv(url, sep='|', names=header)
    df_user['age'] = pd.cut(df_user['age'], [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
                            labels=['0-10', '10-20', '20-30', '30-40', '40-50', '50-60', '60-70', '70-80', '80-90',
                                    '90-100'])
    df_user = pd.get_dummies(df_user, columns=['gender', 'occupation', 'age'])  # one_hot编码
    df_user = df_user.drop(columns=['zip_code'])
    # print('df_user',df_user.columns)
    user_features = df_user.columns.values.tolist()  # 列表形式存储user_features列表名
    # print('user_features',user_features)

    # movie_features处理部分
    header = ['item_id', 'title', 'release_date', 'video_release_date', 'IMDb_URL', 'unknown', 'Action', 'Adventure',
              'Animation', 'Children',
              'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery',
              'Romance', 'Sci-Fi',
              'Thriller', 'War', 'Western']
    url = root1 + 'u.item'
    df_item = pd.read_csv(url, sep='|', names=header, encoding="ISO-8859-1")
    df_item = df_item.drop(columns=['title', 'release_date', 'video_release_date', 'IMDb_URL', 'unknown'])
    # print('item_columns',df_item.columns)
    movie_features = df_item.columns.values.tolist()  # 列表形式存储movie_features列表名
    # print('movie_features',movie_features)

    cols = user_features + movie_features
    cols.remove('user_id')
    cols.remove('item_id')
    # print(cols)
    # 训练集处理部分
    header = ['user_id', 'item_id', 'rating', 'timestamp']
    url = root1 + 'ua.base'
    df_train = pd.read_csv(url, sep='\t', names=header)
    df_train['rating'] = df_train.rating.apply(lambda x: 1 if int(x) == 5 else 0)
    df_train = df_train.merge(df_user, on='user_id', how='left')
    df_train = df_train.merge(df_item, on='item_id', how='left')
    # print("df_trian",df_train.columns)

    # 测试集处理部分
    url = root1 + 'ua.test'
    df_test = pd.read_csv(url, sep='\t', names=header)
    df_test['rating'] = df_test.rating.apply(lambda x: 1 if int(x) == 5 else 0)
    df_test = df_test.merge(df_user, on='user_id', how='left')
    df_test = df_test.merge(df_item, on='item_id', how='left')
    # print("df_test",df_test.columns)
    train_labels = pd.get_dummies(df_train, columns=['rating'], dtype=np.float32)  # one_hot编码
    train_labels = train_labels[['rating_0', 'rating_1']]
    # print("train_labels",train_labels.columns)
    test_labels = pd.get_dummies(df_test, columns=['rating'], dtype=np.float32)  # one_hot编码
    test_labels = test_labels[['rating_0', 'rating_1']]
    # train_labels = onehot_encoder(df_train['rating'].astype(np.int32), 2)
    # test_labels = onehot_encoder(df_test['rating'].astype(np.int32), 2)
    # print(df_train[cols].values.shape)  (90570, 53)
    # print(train_labels.shape)  #(90570, 2)
    # print(df_test[cols].values.shape)  (9430, 53)
    # print(test_labels.shape)  #(9430, 2)
    # print(cols)

    df_train = torch.from_numpy(df_train[cols].values).float()
    train_labels = torch.from_numpy(train_labels.values).float()
    df_test = torch.from_numpy(df_test[cols].values).float()
    test_labels = torch.from_numpy(test_labels.values).float()
    return df_train, train_labels, df_test, test_labels


def fun_normalization(input_tensor):  # 归一化
    result = (input_tensor - input_tensor.min()) / (input_tensor.max() - input_tensor.min())
    return result


# 小批训练的模块
def get_data_loader(data, label):
    torch_dataset = Data.TensorDataset(data, label)
    loader = Data.DataLoader(
        dataset=torch_dataset,
        batch_size=128,
        shuffle=True,
        num_workers=2  # 线程为2
    )
    return loader


# FM 中线性部分
class linearlayer(torch.nn.Module):
    # 执行y = Wx + B
    """
    A pytorch implementation of Logistic Regression.
    """

    def __init__(self, field_dims, output_dim=2):
        super().__init__()
        self.linear = torch.nn.Linear(field_dims, output_dim)

    def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_fields)``
        """
        # print('x.shape',x.shape,'x.dtype',type(x))
        x = x.float()
        x = self.linear(x)
        x = x.long()
        return x


# FM 中二阶特征交叉部分
class FactorizationMachine(torch.nn.Module):

    def __init__(self, reduce_sum=True):
        super().__init__()
        self.reduce_sum = reduce_sum

    def forward(self, x):
        """
        :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
        """
        # x.shape1 torch.Size([128, 53, 16])
        # square_of_sum.shape2 torch.Size([128, 16])
        # sum_of_square.shape2 torch.Size([128, 16])
        # print("x.shape1",x.shape)# torch.Size([128, 53, 16])
        square_of_sum = torch.sum(x, dim=1) ** 2  # torch.Size([128, 16])
        # print("square_of_sum.shape2",square_of_sum.shape)
        sum_of_square = torch.sum(x ** 2, dim=1)  # torch.Size([128, 16])
        # print("sum_of_square.shape2",sum_of_square.shape)
        ix = square_of_sum - sum_of_square
        # print("ix.shape",ix.shape)
        if self.reduce_sum:
            ix = torch.sum(ix, dim=1, keepdim=True)
            # print("idx",ix.shape)
        return 0.5 * ix


# FM embedding部分
class FeaturesEmbedding(torch.nn.Module):

    def __init__(self, field_dims, embed_dim):
        super().__init__()

        self.embedding = torch.nn.Embedding(field_dims, embed_dim)

    def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_fields)``
        """
        # print(self.embedding.num_embeddings)
        x = self.embedding(x)
        return x


class FM_Model(torch.nn.Module):
    """
    A pytorch implementation of Factorization Machine.

    Reference:
        S Rendle, Factorization Machines, 2010.
    """

    def __init__(self, field_dims, embed_dim=16):
        super().__init__()
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.linear = linearlayer(field_dims)
        self.fm = FactorizationMachine(reduce_sum=True)

    def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_fields)``
        """
        y = self.embedding(x)
        # print(y.shape)
        # embedding后的向量，[128，53，16]
        # 解释，128为batchsize,53为特征，16为每个特征的隐向量
        x = self.linear(x) + self.fm(self.embedding(x))
        return torch.sigmoid(x.squeeze(1))


def train(model, optimizer, data_loader, loss_func):
    model.train()  # 切换为训练模式
    total_loss = 0
    total_correct = 0
    for i, (batch_x, batch_y) in enumerate(data_loader):
        batch_x = fun_normalization(batch_x)
        batch_x = batch_x.long()
        # print(batch_x[batch_x!=0].sum())
        # batch_y = batch_y.long()
        output = model(batch_x)
        batch_x = batch_x.float()
        loss = loss_func(output, batch_y)
        model.zero_grad()
        loss.backward()
        optimizer.step()
        total_loss = total_loss + loss
        correct = np.mean((torch.argmax(output, 1) == torch.argmax(batch_y, 1)).numpy())
        total_correct = total_correct + correct
    return total_loss, total_correct


def test(model, data_loader):
    model.eval()  # 切换为评估模式
    targets, predicts = list(), list()
    # print(data_loader.shape)
    total_loss = 0
    total_correct = 0
    with torch.no_grad():
        for i, (batch_x, batch_y) in enumerate(data_loader):
            batch_x = fun_normalization(batch_x)
            batch_x = batch_x.long()
            # print(batch_x.shape, '??????????????????????????????????')
            output = model(batch_x)

            batch_x = batch_x.float()
            loss = loss_func(output, batch_y)
            total_loss = total_loss + loss
            # print(output.shape,batch_y.shape)
            correct = np.mean((torch.argmax(output, 1) == torch.argmax(batch_y, 1)).numpy())
            # print(correct)
            total_correct += correct
    return total_loss, total_correct




if __name__ == '__main__':
    root1 = './data/ml-100k/'
    x_train, y_train, x_test, y_test = load_dataset(root1)

    # initialize the model
    feature_length = x_train.shape[1]  # 特征数目53
    # print(type(x_train))
    # print(type(y_train))
    loader_train = get_data_loader(x_train, y_train)
    loader_test = get_data_loader(x_test, y_test)
    model = FM_Model(feature_length, embed_dim=16)
    # print(model)
    loss_func = torch.nn.CrossEntropyLoss()
    # Ln = linear_layer()
    optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001, weight_decay=0.00001)
    for epoch in range(30):
        loss_train, correcttrain = train(model, optimizer, loader_train, loss_func)
        loss_test, correcttest = test(model, loader_test)
        print("loss_train", loss_train / (x_train.shape[0] / 128), "loss_test", loss_test / (x_test.shape[0] / 128))
        print("correcttrain", correcttrain / (x_train.shape[0] / 128), "correcttest",
              correcttest / (x_test.shape[0] / 128))
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
