from pyspark.sql import SparkSession
from pyspark.sql.functions import col, expr, concat
from pyspark.sql.types import StructType, ArrayType, StringType, StructField, IntegerType

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()

# 1. map 转换list
rdd = spark.sparkContext.parallelize([1, 2, 3, 4])
print(rdd.map(lambda x: x * 2).collect())

# 2. filter/where
rdd = spark.sparkContext.parallelize([('tom', 20), ('jack', 18)])
df = rdd.toDF(['name', 'age'])

df.where('age < 20').show()

# 3. 分组函数
rdd = spark.sparkContext.parallelize([('tom', 20), ('jack', 18), ('tom', 21)])
df = rdd.toDF(['name', 'age'])

df.groupby('name').count().show(truncate=False)

# 4. random 分组
rdd = spark.sparkContext.parallelize([1, 2, 3, 4, 5, 6, 7])
splits = rdd.randomSplit([0.8, 0.2])
for split in splits:
    print(split.collect())

# 5. orderBy排序
# df.sort(col('Hits').desc()).show(truncate=False)

# 6. distinct去重
rdd = spark.sparkContext.parallelize([('tom', 20), ('jack', 18), ('tom', 21)])
df = rdd.toDF(['name', 'age'])

df.select('name').distinct().show(truncate=False)


#7. alias 别名
rdd = spark.sparkContext.parallelize([('tom', 20), ('jack', 18), ('tom', 21)])
df = rdd.toDF(['name', 'age'])

df.select(col('name').alias('COOLNAME')).show()
