import numpy
import torch
import csv
from itertools import islice

from torch import nn
import heapq

e = 0.5
lambda_ = 1
hidden_neuron = 660
train_epoch = 50
learning_rate = 0.001


def get_user_id(train_data_path):
    file = csv.reader(open(train_data_path, "r", encoding="utf-8"))
    train_user_id_list = [0]
    user_id = 0
    for line in islice(file, 1, None):
        if int(line[0]) != user_id:
            user_id = int(line[0])
            train_user_id_list.append(user_id)
    return train_user_id_list

def load_train_data(data_path, m, n):
    #生成共现矩阵
    file = csv.reader(open(data_path, "r", encoding="utf-8"))

    user_like_list = torch.full((m, n), e)
    user_mask_r = torch.zeros((m, n))

    user_id = 0
    user_index = 0
    for line in islice(file, 1, None):
        if int(line[0]) == user_id:
            user_like_list[user_index][int(line[1])] = 1
            user_mask_r[user_index][int(line[1])] = 1
        else:
            user_id = int(line[0])
            user_index += 1

    return user_like_list, user_mask_r

class AutoRec(nn.Module):
    def __init__(self, m):
        super(AutoRec, self).__init__()
        self.m = m
        self._encoder = nn.Sequential(
            nn.Linear(self.m, hidden_neuron),
            nn.Sigmoid()
        )
        self._decoder = nn.Sequential(
            nn.Linear(hidden_neuron, self.m),
            #nn.Sigmoid()
        )
    def forward(self, input):
        return self._decoder(self._encoder(input))


def train(train_data, m, n, mask_r):

    model = AutoRec(m)
    if torch.cuda.is_available():
        train_data, mask_r = train_data.cuda(), mask_r.cuda()
    if torch.cuda.is_available():
        model = model.cuda()
    optim = torch.optim.SGD(model.parameters(), lr=learning_rate)
    train_res = []
    for i in range(n):
        #一共有n个物品
        if torch.cuda.is_available():
            ri = torch.gather(train_data, dim=1, index=torch.LongTensor([[i for k in range(m)]]).t().cuda())
            mask = torch.gather(mask_r, dim=1, index=torch.LongTensor([[i for k in range(m)]]).t().cuda())
        else:
            ri = torch.gather(train_data, dim=1, index=torch.LongTensor([[i for k in range(m)]]).t())
            mask = torch.gather(mask_r, dim=1, index=torch.LongTensor([[i for k in range(m)]]).t())
        for k in range(train_epoch):
            # ri向量为物品 i的评分矩阵
            output = model(ri.t())
            result_mse = ((ri.t() - output) * mask.t()).pow(2).sum()
            cost = 0
            for group in optim.param_groups:
                for param in group['params']:
                    if param.data.dim() == 2:
                        cost += torch.t(param.data).pow(2).sum()

            # if k == train_epoch - 1:
            #     print(result_mse.float(), cost.float())
            cost = cost * lambda_ * 0.5 + result_mse
            optim.zero_grad()
            cost.backward()
            optim.step()
        if torch.cuda.is_available():
            output = model(ri.t()).cpu().detach().numpy().tolist()[0]
        else:
            output = model(ri.t()).detach().numpy().tolist()[0]
        train_res.append(output)
        if i % 200 == 0:
            print("训练进度：{:.2f}%".format((i+1) / n * 100))
    return train_res

def load_test_data(data_path):
    file = csv.reader(open(data_path, "r", encoding="utf-8"))
    user_id_list = []
    for line in islice(file, 1, None):
        user_id_list.append(int(line[0]))
    return user_id_list

def recommendation(train_res, train_user_id_list, test_user_id_list, k):
    user_recommendation_list = {}
    train_res = numpy.array(train_res).T
    i = 0
    for user_id in test_user_id_list:
        index = train_user_id_list.index(user_id)
        scores = train_res[index].tolist()
        rank = map(scores.index, heapq.nlargest(k, scores))
        rec = list(rank)
        user_recommendation_list[user_id] = rec
        i += 1
        if i % 200 == 0:
            print("当前推荐进度: {:.2f}%".format(i / len(test_user_id_list) * 100))
    return user_recommendation_list

def write_res_to_csv(user_recommendation_res):
    file = open("submission_auto_rec.csv", "w", encoding="utf-8", newline="")
    csv_writer = csv.writer(file)
    csv_writer.writerow(["user_id", "item_id"])

    for user_id, recommendation_list in user_recommendation_res.items():
        for item_id in recommendation_list:
            csv_writer.writerow([str(user_id), str(item_id)])
    file.close()
if __name__ == "__main__":
    train_data_path = "./dataset/book_train_dataset.csv"
    #train_data_path = "mini_train.csv"
    test_data_path = "./dataset/book_test_dataset.csv"
    #test_data_path = "mini_test.csv"
    k = 10
    n = 10000 #物品数目

    train_user_id_list = get_user_id(train_data_path)
    m = len(train_user_id_list)

    index = train_user_id_list.index(13488)


    train_data, mask_r = load_train_data(train_data_path, m, n)
    print("训练集加载完成...")
    test_user_id_list = load_test_data(test_data_path)
    print("测试user_id读取完成...")



    train_res = train(train_data, m, n, mask_r)
    print("完成训练...")
    user_recommendation_res = recommendation(train_res, train_user_id_list, test_user_id_list, k)
    print("完成推荐，正在写入文件...")
    write_res_to_csv(user_recommendation_res)
    print("submission文件生成.")