# 1) 拿到Spark入口对象 SparkSession
# yarn/mesos/local
# local : 在本地内存运行
# 哈哈哈哈
from pyspark import HiveContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import count, col, explode, split

spark = SparkSession.builder\
    .appName("HelloSpark")\
    .master("local")\
    .getOrCreate()
# 2) 提交大数据分析任务

rdd = spark.sparkContext.parallelize([('tom', 20), ('jack', 40)])
df = rdd.toDF(['name', 'age'])

print(df.count())
hc = HiveContext(spark.sparkContext)
b_df = hc.table('business')

result = b_df.groupBy('name')\
    .agg(count().alias('cnt'))\
    .orderBy(col('cnt').desc())\
    .limit(20)

result.show()

result = b_df.select(explode(split(col('categories'), ', ')).alias('category'))\
    .groupBy('category')\
    .agg(count('category').alias('cnt'))\
    .orderBy(col('cnt').desc())\
    .limit(20)

result


x = 10
x = "string"