
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *

# 创建spark会话
from pyspark.sql.window import Window

spark = SparkSession \
    .builder \
    .appName("Python spark SQL") \
    .getOrCreate()

user_schema = StructType([
    StructField("user_id", StringType(), True),
    StructField("name", StringType(), True),
    StructField("review_count", IntegerType(), True),
    StructField("yelping_since", StringType(), True),
    StructField("friends", ArrayType(StringType()), True),
    StructField("useful", IntegerType(), True),
    StructField("funny", IntegerType(), True),
    StructField("cool", IntegerType(), True),
    StructField("fans", IntegerType(), True),
    StructField("elite", ArrayType(IntegerType()), True),
    StructField("average_stars", FloatType(), True),
    StructField("compliment_hot", IntegerType(), True),
    StructField("compliment_more", IntegerType(), True),
    StructField("compliment_profile", IntegerType(), True),
    StructField("compliment_cute", IntegerType(), True),
    StructField("compliment_list", IntegerType(), True),
    StructField("compliment_note", IntegerType(), True),
    StructField("compliment_plain", IntegerType(), True),
    StructField("compliment_cool", IntegerType(), True),
    StructField("compliment_funny", IntegerType(), True),
    StructField("compliment_writer", IntegerType(), True),
    StructField("compliment_photos", IntegerType(), True)
])

user_df = spark.read. \
    json('../dataset/yelp_academic_dataset_user.sample.json')

# 预处理修改时间类型 2010-07-04 17:18:40
user_df = user_df.withColumn("Join_Date",
                             to_timestamp(col("yelping_since"), "yyyy-MM-dd HH:mm:ss")) \
    .drop("yelping_since")

# 1.分析每年加入的用户数量
print("每年新加入的用户数量--------")
count_df = user_df.select('Join_Date', year('Join_Date').alias('join_year')) \
    .distinct() \
    .groupby('join_year') \
    .count() \
    .withColumn('user_count', col('count')).drop('count') \
    .orderBy(col('join_year'))
count_df.show()

# 2.统计评论达人（review_count）
print("评论最高的10个达人--------")
user_df.select('user_id', 'name', 'review_count').distinct() \
    .orderBy(col('review_count').desc()) \
    # .show(10)

# 3.统计人气最高的用户（fans）
print("人气最高的10个用户--------")
user_df.select('user_id', 'name', 'fans').distinct() \
    .orderBy(col('fans').desc()) \
    # .show(10)

# 4.统计每年优质用户、普通用户比例
# 先统计每年总用户，利用每年新用户累加
print("count_df数据格式---------")
count_df.printSchema()
temp_df = spark.sql("""
select a.join_year, sum(b.user_count) as each_year_user_count
from   user_count_tb as a, user_count_tb as b
where  a.join_year>=b.join_year
group by a.join_year
""")
print("每年用户总数----------")
temp_df.show()


print("每年的精英用户--------")
elite_df = user_df.withColumn('elite_each_year', explode(split(col('elite'), ','))) \
    .select('user_id', 'name', 'elite', 'elite_each_year', year('Join_Date').alias('join_year')) \
    .groupby('elite_each_year') \
    .count() \
    .withColumn('elite_count', col('count')).drop('count')
# elite_df.show(20)

print('join表------')
ratio_df = count_df.join(elite_df, temp_df['join_year'] == elite_df['elite_each_year'])
ratio_df \
    .select(col('join_year').alias('year'), (col('elite_count') / col('user_count')).alias('ratio')) \
    .orderBy(col('year').desc()) \
    .show()
