from pyspark.sql import SparkSession
from pyspark.sql.functions import col, expr, concat, countDistinct, to_timestamp, year, min, max, month, avg, \
    weekofyear, count
from pyspark.sql.types import StructType, ArrayType, StringType, StructField, IntegerType, FloatType, BooleanType
from pyspark import find_spark_home

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('Hello Spark') \
    .getOrCreate()
print(find_spark_home._find_spark_home())

review_schema = StructType([StructField("business_id", StringType(), True),
                            StructField("cool", IntegerType(), True),
                            StructField("date", StringType(), True),
                            StructField("funny", IntegerType(), True),
                            StructField("review_id", StringType(), True),
                            StructField("stars", FloatType(), True),
                            StructField("text", StringType(), True),
                            StructField("useful", IntegerType(), True),
                            StructField("user_id", StringType(), True),

                            ]
                           )
review_df = spark.read.schema(review_schema).option("header", True).json("../dataset/review.json")
review_df.createTempView("review")

review_df.select('cool')\
    .where("cool>'0'")\
    .agg(count(col('cool')).alias('cool'))\
    .show(truncate=False)
review_df.select('funny')\
    .where("funny>'0'")\
    .agg(count(col('funny')).alias('funny'))\
    .show(truncate=False)
review_df.select('useful')\
    .where("useful>'0'")\
    .agg(count(col('useful')).alias('useful'))\
    .show(truncate=False)

