# -*- coding:utf-8 -*-
# @Author: shenyuyu
# @Time: 2023/6/28 20:20
# @File: h_2_2.py
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType, IntegerType
from pyspark.sql import functions as F

if __name__ == '__main__':
    spark = SparkSession.builder.appName("a").master("local[*]").getOrCreate()
    df = spark.read.format("csv")\
        .option("header", False)\
        .option("sep", "\t")\
        .option("encoding", "utf-8")\
        .schema("user Int, movie Int, score Int, time String")\
        .load("file:///tmp/pycharm_project_161/data/sql/u.data")
    df.createTempView("movie")

    # todo 查询用户平均分
    # df.groupby("user").avg("score")\
    #     .withColumnRenamed("avg(score)", "score_avg")\
    #     .withColumn("score_avg", F.round("score_avg", 2))\
    #     .orderBy("score_avg", ascending=False).show()
    # todo 查询电影平均分
    # df.groupby("movie").avg("score")\
    #     .withColumnRenamed("avg(score)", "score_avg")\
    #     .withColumn("score_avg", F.round("score_avg", 2))\
    #     .orderBy("score_avg", ascending=False).show()
    # todo 查询大于平均分的电影的数量
    df.where(df["score"] > df.groupby().avg("score").first()["avg(score)"]).show()
    print(df.where(df["score"] > df.groupby().avg("score").first()["avg(score)"]).count())
    # todo 查询高分电影中(>3)打分次数最多的用户, 此人打分的平均分

    # todo 查询每个用户的平局打分, 最低打分, 最高打分

    # todo 查询评分超过100次的电影, 的平均分 排名 TOP10