# !/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date, datetime

from pyspark import Row
from pyspark.sql import SparkSession
from pyspark.sql.functions import col


def create_dataframe():
    df = spark.createDataFrame([
        Row(id=1, height=176., name='ccc', birth=date(2000, 1, 1), create_time=datetime(2000, 1, 1, 12, 0)),
        Row(id=2, height=181., name='aaa', birth=date(2000, 2, 1), create_time=datetime(2000, 1, 2, 12, 0)),
        Row(id=4, height=165., name='bbb', birth=date(2000, 3, 1), create_time=datetime(2000, 1, 3, 12, 0))
    ])
    df.show()

    data = [("Tom", 28), ("John", 20), ("Mike", 30), ("Sara", 25)]
    columns = ["Name", "Age"]
    df2 = spark.createDataFrame(data, columns)
    print(df2.printSchema())


def col_action():
    data = [("Tom", 28), ("John", 20), ("Mike", 30), ("Sara", 25)]
    columns = ["Name", "Age"]
    df2 = spark.createDataFrame(data, columns)
    df2 = df2.withColumn("Age", col("Age").cast("int"))
    print(df2.printSchema())

    df2 = df2.withColumn("Age", col("Age") + 1)

    # 追加一列
    df2 = df2.withColumn("Age2", col("Age").cast("int"))
    df2.show(1)

    # 重命名
    df_renamed = df2.withColumnRenamed("Age2", "Age3")
    df_renamed.show(1)

    # 大量列重命名
    new_columns = ["Name1", "Age1"]
    new_df = df2.selectExpr(*[f"{origin_col} as {new_col}" for origin_col, new_col in zip(columns, new_columns)])
    new_df.show(1)


if __name__ == '__main__':
    spark = SparkSession.builder.appName("base_learn").master("local[*]").getOrCreate()
    # create_dataframe()
    col_action()
    spark.stop()
