# -*- coding: utf-8 -*-
from pyspark import SparkContext, SparkConf
from datetime import datetime, date
from pyspark.sql import Row, SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType


# pyspark创建RDD的方式主要有两种，
# 一种是通过spark.sparkContext.textFile 或者 sparkContext.textFile读取生成RDD数据；
# 另一种是通过spark.sparkContext.parallelize创建RDD数据。
def base(sc: SparkContext):
    rdd = sc.textFile("../../data/student.txt")
    rdd.foreach(lambda line: print(line))
    # mixed，可以元组、列表或者混合，子元素的长度也不一定要一样
    data = [('Alex', 'male', 3), ('Nancy', 'female', 6), ['Jack', 'male', 9]]
    rdd2 = spark.sparkContext.parallelize(data)
    print(type(rdd2))


def df_to_rdd(ss: SparkSession):
    df = ss.createDataFrame([
        Row(a=1, b=2., c='string1', d=date(2000, 1, 1), e=datetime(2000, 1, 1, 12, 0)),
        Row(a=2, b=3., c='string2', d=date(2000, 2, 1), e=datetime(2000, 1, 2, 12, 0)),
        Row(a=4, b=5., c='string3', d=date(2000, 3, 1), e=datetime(2000, 1, 3, 12, 0))
    ])
    df.printSchema()
    print(type(df))
    print(type(df.rdd))


def rdd_to_df(ss: SparkSession):
    rdd = ss.sparkContext.textFile("../../data/student.txt")
    rdd2 = rdd.map(lambda line: line.split(",")).map(lambda arr: Row(id=int(arr[0]), name=arr[1], age=int(arr[2])))
    schema = StructType([
        StructField("id", IntegerType(), True),
        StructField("name", StringType(), True),
        StructField("age", IntegerType(), True)
    ])
    df = ss.createDataFrame(rdd2, schema)
    df.printSchema()
    df.show()

    # 值类型要一一对应
    # rdd.map(lambda line: line.split(",")).toDF("id: int, name: string, age: int")，会报错
    df2 = rdd.map(lambda line: line.split(",")).map(lambda arr: (int(arr[0]), arr[1], int(arr[2])))\
        .toDF("id: int, name: string, age: int")
    df2.printSchema()
    df2.show()


if __name__ == '__main__':
    # conf = SparkConf().setAppName("rdd_learn").setMaster("local[*]")
    # spark = SparkContext(conf=conf)
    spark = SparkSession.builder.appName("rdd_learn").master("local[*]").getOrCreate()
    base(spark.sparkContext)
    df_to_rdd(spark)
    rdd_to_df(spark)
    spark.stop()
