# 1) 拿到Spark入口对象 SparkSession
# yarn/mesos/local
# local : 在本地内存运行
from pyspark.sql import SparkSession
from pyspark.sql.functions import when, col, avg, max, min, udf
from pyspark.sql.types import StructType, StructField, IntegerType, DoubleType

spark = SparkSession.builder \
    .appName("HelloSpark") \
    .master("local") \
    .getOrCreate()

cities = spark.sparkContext.parallelize([(0, "Beijing", 3000), (1, "Shanghai", 4000), (2, "Guangzhou", 2500)]) \
    .toDF(["id", "name", 'salary'])

# udf: user defined function / 需求业务逻辑&底层算法实现
f = lambda x: x * x
udf_func = udf(f)

cities.withColumn('SalaryAfter', udf_func(col('salary'))).show()
