from pyspark.sql import SparkSession
from pyspark.sql.functions import col, expr, concat
from pyspark.sql.types import StructType, ArrayType, StringType, StructField, IntegerType

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()

schema = StructType([
    StructField("First", StringType(), False),
    StructField("Last", StringType(), False),
    StructField("Url", StringType(), False),
    StructField("Published", StringType(), False),
    StructField("Hits", IntegerType(), False),
    StructField("Campaigns", ArrayType(StringType()), False),
])

df = spark.read \
    .schema(schema) \
    .json("dataset/blogs.txt")

# 1. 打印列的信息
print(df.columns)

# 2. 获取列的对象
print(col('First'))

# 3. 使用col进行值的计算
df.select(col('First'), col('Hits'), col('Hits') * 2).show(truncate=False)

# 4. 使用expr进行计算
df.select(expr("Hits * 2")).show(truncate=False)
# SELECT (Hits * 2) FROM XXX

# 5. 添加一列
df.withColumn('Bigger Hits', expr('Hits > 10000')).show(truncate=False)

# 6. concat
df.withColumn('Full Name', concat(col('First'), col('Last'))).show(truncate=False)

# 7. sort 排序
df.sort(col('Hits').desc()).show(truncate=False)
