#保存数据
import pickle as cPickle
import pandas as pd
import itertools

#处理事件字符串
import datetime

import numpy as np
import scipy.io as sio
import scipy.sparse as ss

#相似度/距离
import scipy.spatial.distance as ssd

from collections import defaultdict
from sklearn.preprocessing import normalize

# 统计训练集中有多少不同的用户的events
uniqueUsers = set()
uniqueEvents = set()

# 倒排表
# 统计每个用户参加的活动   / 每个活动参加的用户
eventsForUser = defaultdict(set)
usersForEvent = defaultdict(set)

for filename in ["train.csv", "test.csv"]:
    f = open(filename, 'r')

    # 忽略第一行（列名字）
    f.readline().strip().split(",")

    for line in f:  # 对每条记录
        cols = line.strip().split(",")
        uniqueUsers.add(cols[0])  # 第一列为用户ID
        uniqueEvents.add(cols[1])  # 第二列为活动ID

        # eventsForUser[cols[0]].add(cols[1])    #该用户参加了这个活动
        # usersForEvent[cols[1]].add(cols[0])    #该活动被用户参加
    f.close()
# uniqueUsers is like {'4222590338', '1651844876', '1728055600', '3145229913', '2456225582', '915098649', .....}
# uniqueEvents is like {'447655182', '3801216482', '1235652015', '4021236305', '1196882187', '3504402444', .....}
n_uniqueUsers = len(uniqueUsers)
n_uniqueEvents = len(uniqueEvents)

print("number of uniqueUsers :%d" % n_uniqueUsers)
print("number of uniqueEvents :%d" % n_uniqueEvents)

# 用户关系矩阵表，可用于后续LFM/SVD++处理的输入
# 这是一个稀疏矩阵，记录用户对活动感兴趣
userEventScores = ss.dok_matrix((n_uniqueUsers, n_uniqueEvents))
userIndex = dict()
eventIndex = dict()

# 重新编码用户索引字典
for i, u in enumerate(uniqueUsers):
    userIndex[u] = i

# 重新编码活动索引字典
for i, e in enumerate(uniqueEvents):
    eventIndex[e] = i

n_records = 0
ftrain = open("train.csv", 'r')
ftrain.readline()

for line in ftrain:
    cols = line.strip().split(",")
    i = userIndex[cols[0]]  # 用户
    j = eventIndex[cols[1]]  # 活动

    eventsForUser[i].add(j)  # 该用户参加了这个活动
    usersForEvent[j].add(i)  # 该活动被用户参加

    # userEventScores[i, j] = int(cols[4]) - int(cols[5])   #interested - not_interested
    score = int(cols[4])
    # if score == 0:  #0在稀疏矩阵中表示该元素不存在，因此借用-1表示interested=0
    # userEventScores[i, j] = -1
    # else:
    userEventScores[i, j] = score
ftrain.close()
# eventIndex is like {'2312345356': 0, '1866219535': 1, '1238211380': 2, '902974037': 3, '1619381515': 4, '2662850238': 5, '1259317250':..}
# eventsForUser is lie defaultdict(<class 'set'>, {1772: {1888, 1799, 11534, 5392, 11536, 5437}, 1066: {4449, 9347, 748, 12941, 8015, 9044, 7902},...}
#  userEventScores is like
# (1772, 5392)	1.0
#   (1066, 12941)	1.0
#   (1066, 9347)	1.0
#   (1066, 748)	1.0
# #统计每个用户参加的活动，后续用于将用户朋友参加的活动影响到用户
cPickle.dump(eventsForUser, open("PE_eventsForUser.pkl", 'wb'))
##统计活动参加的用户
cPickle.dump(usersForEvent, open("PE_usersForEvent.pkl", 'wb'))

# 保存用户-活动关系矩阵R，以备后用
sio.mmwrite("PE_userEventScores", userEventScores)

# 保存用户索引表
cPickle.dump(userIndex, open("PE_userIndex.pkl", 'wb'))
# 保存活动索引表
cPickle.dump(eventIndex, open("PE_eventIndex.pkl", 'wb'))

# 为了防止不必要的计算，我们找出来所有关联的用户 或者 关联的event
# 所谓的关联用户，指的是至少在同一个event上有行为的用户pair
# 关联的event指的是至少同一个user有行为的event pair
uniqueUserPairs = set()
uniqueEventPairs = set()
# test
# for i in itertools.combinations({1888, 1799, 11534}, 2):
#     print(i)
# (1888, 11534)
# (1888, 1799)
# (11534, 1799)
for event in uniqueEvents:
    i = eventIndex[event]
    users = usersForEvent[i]
    if len(users) > 2:
        uniqueUserPairs.update(itertools.combinations(users, 2))

for user in uniqueUsers:
    u = userIndex[user]
    events = eventsForUser[u]
    if len(events) > 2:
        uniqueEventPairs.update(itertools.combinations(events, 2))

# 保存用户-事件关系对索引表
# uniqueEventPairs is like {(1294, 1131), (12559, 3192), (9290, 2379), (4207, 4916), ...}
# uniqueUserPairs is lie {(584, 430), (1172, 311), (1642, 45), (630, 817), (2641, 2795), ....}
cPickle.dump(uniqueUserPairs, open("FE_uniqueUserPairs.pkl", 'wb'))
cPickle.dump(uniqueEventPairs, open("PE_uniqueEventPairs.pkl", 'wb'))
