from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import col, expr, concat, countDistinct, to_timestamp, year, min, max, month, avg, \
    weekofyear, count, dense_rank
from pyspark.sql.types import StructType, ArrayType, StringType, StructField, IntegerType, FloatType, BooleanType
from pyspark import find_spark_home

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('Hello Spark') \
    .getOrCreate()
print(find_spark_home._find_spark_home())

review_schema = StructType([StructField("business_id", StringType(), True),
                            StructField("cool", IntegerType(), True),
                            StructField("date", StringType(), True),
                            StructField("funny", IntegerType(), True),
                            StructField("review_id", StringType(), True),
                            StructField("stars", FloatType(), True),
                            StructField("text", StringType(), True),
                            StructField("useful", IntegerType(), True),
                            StructField("user_id", StringType(), True),

                            ]
                           )

review_df = spark.read.option("header", True).json("../dataset/review.json")

df = review_df.withColumn('year', year('date')).orderBy(col('year').desc())
df.show()
ef = df.groupby('user_id', 'year') \
    .agg(count('review_id').alias('count'))
window = Window.partitionBy('year').orderBy(col('count').desc())

ef.select('year', 'user_id', 'count', dense_rank().over(window).alias('rank')) \
    .where("rank <='3'") \
    .show()
