# -*- coding: utf-8 -*-
"""
基于内容推荐
"""
from pyspark import SparkConf
from pyspark.ml.feature import HashingTF, IDF
from pyspark.sql.functions import collect_list, struct, col
import json
import pyspark.sql.functions as F
from pyspark.sql import SparkSession, Window
from utils.DataBase import DataBase
from pyspark.sql.functions import udf
from pyspark.sql.types import FloatType
import os

os.environ["PYSPARK_PYTHON"] = '/opt/module/allPythonVenv/algorithmVenv/bin/python3.7'
os.environ["PYSPARK_DRIVER_PYTHON"] = '/opt/module/allPythonVenv/algorithmVenv/bin/python3.7'
os.environ['JAVA_HOME'] = "/opt/module/jdk1.8.0_212"

def main():
    recommender = ContentBasedRecommender()

    # 载入商品数据集
    recommender.loadTagsDF("select product_id, product_name, product_ugc_tag from product;")
    print("数据加载成功！")
    # 训练模型
    recommender.idf_train()
    print("模型训练成功！")
    # 计算特征向量，并保存
    recommender.recommend(6)
    print("获取到内容相似商品矩阵！")

    # 停止Spark会话
    recommender.spark.stop()


class ContentBasedRecommender:
    def __init__(self, ):
        # 初始化Spark会话
        self.db = DataBase()
        # 使用 SparkConf 创建 SparkSession
        self.spark = SparkSession.builder.appName("ContentBasedRecommender").getOrCreate()
        # 商品标签数据
        self.productTagsDF = None
        # 商品的特征向量
        self.featurizedDataDF = None
        self.idfModel = None

    def loadTagsDF(self, sql):
        result_df = self.db.select(sql)
        # 使用 SparkSession.createDataFrame 方法创建 DataFrame,对DataFrame 进行转换和缓存
        self.productTagsDF = self.spark.createDataFrame(result_df, ["product_id", "product_name", "product_ugc_tag"]) \
            .rdd \
            .map(lambda x: (x.product_id, x.product_name, [c if c != '|' else ' ' for c in x.product_ugc_tag])) \
            .toDF(["productId", "name", "tags"]) \
            .cache()

    def idf_train(self):
        # 分词
        wordsDataDF = self.productTagsDF
        # 定义一个HashingTF工具，计算频次
        hashingTF = HashingTF(inputCol="tags", outputCol="rawFeatures", numFeatures=800)
        self.featurizedDataDF = hashingTF.transform(wordsDataDF)
        # self.featurizedDataDF.show(truncate=False)
        # 定义一个IDF工具，计算TF-IDF
        idf = IDF(inputCol="rawFeatures", outputCol="features")
        # 训练一个idf模型
        self.idfModel = idf.fit(self.featurizedDataDF)

    def recommend(self, similarity_n):
        # 得到增加新列features的DF
        rescaledDataDF = self.idfModel.transform(self.featurizedDataDF)

        # 创建计算两个向量余弦相似度的UDF
        cosine_similarity_udf = udf(lambda v1, v2: float(v1.dot(v2) / (v1.norm(2) * v2.norm(2))), FloatType())

        # 计算相似度
        similarityDF = rescaledDataDF.alias("p1").join(rescaledDataDF.alias("p2"),
                                                       col("p1.productId") != col("p2.productId")) \
            .withColumn("similarity", cosine_similarity_udf("p1.features", "p2.features").alias("similarity")) \
            .select(col("p1.productId").alias("product1"), col("p2.productId").alias("product2"), "similarity")

        # 使用窗口函数按照 product1 分组，并按照相似度降序排序，然后提取前 similarity_n 个
        windowSpec = Window.partitionBy("product1").orderBy(F.desc("similarity"))
        topNDF = similarityDF.withColumn("rank", F.row_number().over(windowSpec)) \
            .filter(F.col("rank") <= similarity_n) \
            .select("product1", "product2", "similarity")

        # 保存到数据库
        productRecs_list = topNDF \
            .groupBy("product1") \
            .agg(collect_list(struct("product2", "similarity")).alias("recommendations")) \
            .rdd \
            .map(
            lambda x: (
                x["product1"], json.dumps([{"product_id": rec[0], "score": rec[1]} for rec in x["recommendations"]]))
        ).collect()

        # print(productRecs_list)
        # 保存到数据库
        self.db.delete("TRUNCATE TABLE content_based_product_recs;")
        try:
            self.db.insert_many("insert into content_based_product_recs(product_id, recommendations) values(%s,%s);",
                                productRecs_list)
            print("content_based_product_recs数据插入成功")
        except Exception as e:
            print(f"发生错误：{e}")


if __name__ == '__main__':
    main()
