from pyspark import SparkConf, SparkContext
from pyspark.sql import HiveContext

#python packages
import numpy as np
import time

import pandas as pd
from pyspark.sql.types import *
from pyspark.ml.recommendation import ALS
from pyspark.sql.functions import col
import numpy as np
import json
from operator import itemgetter


def get_rating(data):
    """
    1	刷文章列表
    2	查看文章详情
    3	评论文章
    4	收藏文章
    5	退出文章详情
    6	打开app
    7	隐藏app
    8	取消收藏文章
    input: list of tuple of (user_id, news_id, action, timestamp)
    output: user-news rating matrix
    """
    # TODO：考虑时间调整权重
    # rating计算调整
    # 文章分类、 标签
    # 考虑其他特征如文章停留时长
    # 考虑用户、文章本身特征

    current_time = time.time()

    rating_dict = dict()
    # action_score = {1:-1.0, 2:5.0, 3:10.0, 4:20.0, 8:0}
    action_score = {2: 5.0, 3: 10.0, 4: 20.0, 8: -2}

    """
    user_id_map = dict() #旧id -> 新id
    user_id_base = 0
    """

    for item in data:

        user_id = item[0]
        news_id = item[1]
        action = item[2]
        timestamp = item[3]

        if action not in action_score or news_id == 0:  # 暂不考虑的用户操作
            continue

        # 把用户id（string）按次序映射成新id(int)
        """
        if user_id in user_id_map:
            user_id = user_id_map[user_id]
        else:
            user_id_map[user_id] = user_id_base
            user_id = user_id_base
            user_id_base += 1
        """

        # time_weight = 2/(1 + 1.5**((current_time - timestamp)/86400))
        time_weight = 0.9 ** ((current_time - timestamp) / 86400)

        key = str(user_id) + "," + str(news_id)
        if key not in rating_dict:
            rating_dict[key] = 0.0

        rating_dict[key] += action_score[action] * time_weight

    # 暂时用dict -> pandas dataframe -> spark dataframe
    new_rating_dict = {"user_id": [], "news_id": [], "rating": []}

    for key in rating_dict:
        arr = key.split(",")
        new_rating_dict["user_id"].append(str(arr[0]))
        new_rating_dict["news_id"].append(int(arr[1]))
        new_rating_dict["rating"].append(rating_dict[key])

    ratings = pd.DataFrame(new_rating_dict)

    mySchema = StructType([StructField("user_id", StringType(), True), StructField("news_id", IntegerType(), True),
                           StructField("rating", DoubleType(), True)])
    ratings = spark.createDataFrame(ratings, schema=mySchema)

    return ratings


def read_file(filename):
    data = []
    with open(filename, "r") as f:
        for line in f:
            arr = line.split("|")
            if len(arr) != 4:
                print(arr)
                continue
            user_id = str(arr[0])
            news_id = str(arr[1])
            action = int(arr[2])
            timestamp = int(arr[3])
            data.append((user_id, news_id, action, timestamp))

    return data


min_common_item_bar = 1

# 1.user_based CF. user_similarity
#common_item <5 return None
def user_sim_func(xi):
    l1 = xi[0][1]
    l2 = xi[1][1]
    common_item = set([kv[0] for kv in l1]).intersection(set([kv[0] for kv in l2]))
    if len(common_item) >= min_common_item_bar:
        vector_1 = [kv[1] for kv in l1 if kv[0] in common_item]
        vector_2 = [kv[1] for kv in l2 if kv[0] in common_item]
        #保留3位小数
        cos = np.around(cos_sim(vector_1, vector_2),5)
        return (xi[0][0],  (xi[1][0], cos))


def cos_sim(vector_a, vector_b):
    """
    计算两个向量之间的余弦相似度
    :param vector_a: 向量 a
    :param vector_b: 向量 b
    :return: sim
    """
    vector_a = np.mat(vector_a)
    vector_b = np.mat(vector_b)
    num = float(vector_a * vector_b.T)
    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
    cos = num / denom
    return cos


# 一个用户
def prepare_rating(x):
    similar_user = x[0]
    target_user = x[1][0][0]
    similarity = float(x[1][0][1])
    articles = x[1][1]

    res = []
    for article in articles:
        res.append((target_user, article[0], article[1], similarity))

    return res



filename = "/Users/huyi/Desktop/action.json"
data = read_file(filename)
print("file read finished!")

ratings = get_rating(data)
print("ratings done!")


ratings = ratings.select('user_id','news_id','rating')
#分组 T
rating_by_user_rdd = ratings.rdd.map(lambda x: (x.user_id, (x.news_id, x.rating))).groupByKey().mapValues(list)

#先过滤rating少于3个的 T
rating_by_user_rdd = rating_by_user_rdd.filter(lambda kv: len(kv[1])>=2)
# 交叉  xi:((sku1, [(u1,r1),(u2,r2)])   ,   (sku2, [(u1,r1),(u2,r2)]))
rating_cross = rating_by_user_rdd.cartesian(rating_by_user_rdd)

#计算相似度,过滤掉相似度为None的 T
user_sim_rdd = rating_cross.map(user_sim_func).filter(lambda x: x is not None).groupByKey().mapValues(list)



#把相似用户<5的滤掉
user_sim_rdd = user_sim_rdd.filter(lambda x: len(x[1]) >= 5)

#row中相似度降序排序
user_sim_rdd = user_sim_rdd.mapValues(lambda x: sorted(x, key = itemgetter(1), reverse = True))


#将相似用户作为key
def f(x): return x
temp = user_sim_rdd.flatMapValues(f).map(lambda x:(x[1][0], (x[0], x[1][1])))

#join相似用户和用户对新闻评分
temp2 = temp.join(rating_by_user_rdd)


temp3 = temp2.map(prepare_rating).flatMap(f)

#rdd变dataframe
mySchema = StructType([ StructField("user_id", StringType(), True) ,StructField("news_id", IntegerType(), True), StructField("rating", DoubleType(), True), StructField("similarity", DoubleType(), True)])
for_rating = temp3.toDF(schema=mySchema)

for_rating.createOrReplaceTempView("for_rating")
predict_rating = spark.sql("SELECT user_id, news_id, SUM(rating*similarity)/SUM(similarity) AS weighted_rating FROM for_rating GROUP BY user_id, news_id")

predict_rating.show(10)

