from pyspark.sql.session import SparkSession
from pyspark.sql.types import Row

# 创建spark sql执行环境
spark = SparkSession \
    .builder \
    .master("local") \
    .appName("rdd") \
    .getOrCreate()

# 获取spark rdd的环境
sc = spark.sparkContext

# 读取数据
lines_rdd = sc.textFile("../../data/students.txt")


def map_fun(line):
    split = line.split(",")
    id = split[0]
    name = split[1]
    age = int(split[2])
    sex = split[3]
    clazz = split[4]
    return id, name, age, sex, clazz


students_rdd = lines_rdd.map(map_fun)

# 1、将类型为元组的RDD转换成DF
lines_df = spark.createDataFrame(data=students_rdd, schema=["id", "name", "age", "sex", "clazz"])

lines_df.show()


def map_fun_row(line):
    split = line.split(",")
    id = split[0]
    name = split[1]
    age = int(split[2])
    sex = split[3]
    clazz = split[4]
    return Row(id=id, name=name, age=age, sex=sex, clazz=clazz)


# 2、将类型为ROW对象的RDD转换成DF
students_row_rdd = lines_rdd.map(map_fun_row)
spark.createDataFrame(students_row_rdd).show()

# 3、将DF转换成RDD
# 返回的RDD是ROW类型
# Row(id='1500100999', name='钟绮晴', age=23, sex='女', clazz='文科五班')
lines_rdd = lines_df.rdd
lines_rdd.foreach(print)
lines_rdd.map(lambda row: row.clazz).foreach(print)
