# -*- coding: utf-8 -*-
"""
基于Item-CF相似推荐
"""
import json

from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql import Window
from utils.DataBase import DataBase
import os

os.environ["PYSPARK_PYTHON"] = '/opt/module/python3.7/bin/python3.7'
os.environ["PYSPARK_DRIVER_PYTHON"] = '/opt/module/python3.7/bin/python3.7'
os.environ['JAVA_HOME'] = "/opt/module/jdk1.8.0_212"


class ItemCFRecommender:
    def __init__(self):
        self.sc = None
        self.db = DataBase()
        # 创建一个Spark会话
        self.spark = SparkSession.builder.appName("ItemCFRecommender").getOrCreate()
        self.df = None
        # 获取 SparkContext
        self.sc = self.spark.sparkContext
        # 设置新的日志级别
        self.sc.setLogLevel("FATAL")

    def loadData(self, sql):
        data = self.db.select(sql)
        # 将列表转换为DataFrame
        self.df = self.spark.createDataFrame(data, ["user_id", "product_id", "score"]).cache()

    def commend(self, similarity_n=6):
        # 1. 统计每个商品的评分个数，按照productId进行group by
        product_counts = self.df.groupBy("product_id").agg(F.count("score").alias("num_ratings"))
        # 2. 在原有的评分表上rating添加count
        self.df = self.df.join(product_counts, "product_id", "left")
        # 3. 将评分按照用户id两两配对，统计两个商品被同一个用户评分过的次数
        joinedDF = self.df.join(self.df, "user_id") \
            .toDF("user_id", "product1", "score1", "count1", "product2", "score2", "count2") \
            .select("user_id", "product1", "count1", "product2", "count2") \
            .filter(F.col("product1") != F.col("product2"))
        # 4. 按照商品1和商品2进行group by，统计每组的用户数量，同时保留count1和count2
        groupedDF = joinedDF.groupBy("product1", "product2") \
            .agg(
            F.countDistinct("user_id").alias("cocount"),
            F.first("count1").alias("count1"),
            F.first("count2").alias("count2")
        )
        # 5. 计算商品1和商品2的同现相似度，并包装成(productId1, (productId2, score))
        similarityDF = groupedDF.withColumn("score", F.col("cocount") / F.sqrt(F.col("count1") * F.col("count2"))) \
            .select("product1", "product2", "score")

        # 6. 使用窗口函数按照 product1 分组，并按照相似度降序排序，然后提取前 similarity_n 个
        windowSpec = Window.partitionBy("product1").orderBy(F.desc("score"))
        topNDF = similarityDF.withColumn("rank", F.row_number().over(windowSpec)) \
            .filter(F.col("rank") <= similarity_n) \
            .select("product1", "product2", "score")

        # 7. 将每个 product1 的前 similarity_n 个相似商品整理成一个 JSON 字符串列表
        itemCFProductRecs_list = topNDF.groupBy("product1").agg(
            F.collect_list(F.struct("product2", "score")).alias("recommendations")) \
            .rdd \
            .map(
            lambda row: (row['product1'], json.dumps(
                [{"product_id": i['product2'], "score": i['score']} for i in row['recommendations']]))
        ) \
            .collect()

        self.sc.stop()
        self.db.delete("TRUNCATE TABLE itemCF_product_recs;")
        try:
            self.db.insert_many("insert into itemCF_product_recs(product_id, recommendations) values(%s,%s);",
                                itemCFProductRecs_list)
            print("itemCF_product_recs数据插入成功")
        except Exception as e:
            print(f"发生错误：{e}")


def main():
    itemCFRecommender = ItemCFRecommender()

    itemCFRecommender.loadData("select user_id,product_id,rating from ratings;")
    print("数据加载成功！")
    itemCFRecommender.commend()
    print("获取到商品相似矩阵！")

    # 停止Spark会话
    itemCFRecommender.spark.stop()


if __name__ == '__main__':
    main()
