# 1) 拿到Spark入口对象 SparkSession
# yarn/mesos/local
# local : 在本地内存运行
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import when, col, avg, max, min, udf, to_timestamp, year, dayofweek, count, dense_rank
from pyspark.sql.types import StructType, StructField, IntegerType, DoubleType, StringType, BooleanType, FloatType

spark = SparkSession.builder \
    .appName("HelloSpark") \
    .master("local") \
    .getOrCreate()

dataset = [
    ("Thin", "Cell phone", 6000),
    ("Normal", "Tablet", 1500),
    ("Mini", "Tablet", 5500),
    ("Ultra thin", "Cell phone", 5000),
    ("Very thin", "Cell phone", 6000),
    ("Big", "Tablet", 2500),
    ("Bendable", "Cell phone", 3000),
    ("Foldable", "Cell phone", 3000),
    ("Pro", "Tablet", 4500),
    ("Pro2", "Tablet", 6500)
]

df = spark.createDataFrame(dataset, ['product', 'category', 'revenue'])

window = Window.partitionBy('category').orderBy(col('revenue').desc())

df.withColumn('rank', dense_rank().over(window)) \
    .filter(col('rank') <= 2) \
    .show()

# 最优差值
window = Window.partitionBy('category')

df.withColumn('max_revenue', max('revenue').over(window)) \
    .withColumn('diff', col('max_revenue') - col('revenue')) \
    .show()

df.withColumn('max_revenue', max('revenue').over(window) - col('revenue')) \
    .show()


HiveContext