
import time
import pandas as pd
from pyspark.ml.recommendation import ALS
from pyspark.sql.types import StructType, StructField, IntegerType, DoubleType


def recommend(ratings):
    """
    input: spark dataframe ratings
    output: spark dataframe recommendation
    """

    als = ALS(userCol="user_id", itemCol="news_id", ratingCol="rating", regParam=0.01, rank=20, seed=12)
    model = als.fit(ratings)
    users = ratings.select(als.getUserCol())
    userSubsetRecs = model.recommendForUserSubset(users, 10)
    return userSubsetRecs


def get_rating(data):
    """
    1	刷文章列表
    2	查看文章详情
    3	评论文章
    4	收藏文章
    5	退出文章详情
    6	打开app
    7	隐藏app
    8	取消收藏文章
    input: list of tuple of (user_id, news_id, action, timestamp)
    output: user-news rating matrix
    """
    # TODO：考虑时间调整权重
    # rating计算调整
    # 文章分类、 标签
    # 考虑其他特征如文章停留时长
    # 考虑用户、文章本身特征

    current_time = time.time()

    rating_dict = dict()
    action_score = {1: -1.0, 2: 1.0, 3: 2.0, 4: 2.0, 8: -2.0}

    user_id_map = dict()  # 旧id -> 新id
    user_id_base = 0

    for item in data:

        user_id = item[0]
        news_id = item[1]
        action = item[2]
        timestamp = item[3]

        if action not in action_score or news_id == 0:  # 暂不考虑的用户操作
            continue

        # 把用户id（string）按次序映射成新id(int)
        if user_id in user_id_map:
            user_id = user_id_map[user_id]
        else:
            user_id = user_id_base
            user_id_map[user_id] = user_id_base
            user_id_base += 1

        # time_weight = 1/(np.exp((current_time - timestamp)/3600))#时间权重，在0~1之间
        time_weight = 2 / (1 + 1.5 ** ((current_time - timestamp) / 86400))

        key = str(user_id) + "," + news_id
        if key not in rating_dict:
            rating_dict[key] = 0.0

        rating_dict[key] += action_score[action] * time_weight

    # 暂时用dict -> pandas dataframe -> spark dataframe
    new_rating_dict = {"user_id": [], "news_id": [], "rating": []}

    for key in rating_dict:
        arr = key.split(",")
        new_rating_dict["user_id"].append(int(arr[0]))
        new_rating_dict["news_id"].append(int(arr[1]))
        new_rating_dict["rating"].append(rating_dict[key])

    ratings = pd.DataFrame(new_rating_dict)

    mySchema = StructType([StructField("user_id", IntegerType(), True), StructField("news_id", IntegerType(), True),
                           StructField("rating", DoubleType(), True)])
    ratings = spark.createDataFrame(ratings, schema=mySchema)

    return ratings, {v: k for k, v in user_id_map.items()}


def read_file(filename):
    data = []
    with open(filename, "r") as f:
        for line in f:
            arr = line.split("|")
            if len(arr) != 4:
                print(arr)
                continue
            user_id = str(arr[0])
            news_id = str(arr[1])
            action = int(arr[2])
            timestamp = int(arr[3])
            data.append((user_id, news_id, action, timestamp))

    return data
