from pyspark.sql import SparkSession, Column
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, FloatType
import os
os.environ['PYSPARK_PYTHON'] = "python3"

spark = SparkSession.builder.getOrCreate()

# df = spark.read.format("jdbc").option("url","jdbc:mysql://bigdata/spark").option("driver", "com.mysql.jdbc.Driver").option("user", "spark").option("password", "123456").option("dbtable", "user").load()
# df.show()

schema = StructType([
    StructField("uid", IntegerType(), nullable=False),
    StructField("username", StringType(), nullable=False),
    StructField("gender", IntegerType(), nullable=False),
    StructField("age", IntegerType(), nullable=False)
])

user = spark.read.format("csv").load("file:///Users/sonto/Workspace/P1905/spark_example/user.csv",
                                     schema=schema)

user1 = spark.read.format("csv").load("file:///Users/sonto/Workspace/P1905/spark_example/user-1.csv",
                                      schema=StructType([
                                         StructField("uid", IntegerType(), False),
                                         StructField("username", StringType(), False),
                                         StructField("gender", IntegerType(), False),
                                         StructField("age", IntegerType(), False)
                                      ]))

score = spark.read.format("csv").load("file:///Users/sonto/Workspace/P1905/spark_example/score.csv",
                                      schema=StructType([
                                         StructField("uid", IntegerType(), False),
                                         StructField("subject", StringType(), False),
                                         StructField("score", FloatType(), False),
                                      ]))


# user.show()
# user1.show()
# 联合
users = user.union(user1)
print(id(schema))
# score.show()
# score.where(score.score > 60).show()
# score.groupBy("subject").count().show()
# groupBy, select, where, count
# users.join(score, "uid", "inner").select("username", "subject", "score").show()

# users.drop("age").show()

# users.withColumn("age", users.age + 5).show()

# RDD to DataFrame

# rdd = spark.sparkContext.parallelize((('zhanshan', {"age":34, "gender": 1}), ('lisi', {"age":23, "gender": 0}), ('john', {"age":12, "gender": 1})))

# rdd.collect()

# 创建DataFrame通过RDD
# rdd_df = spark.createDataFrame(rdd)
# assert isinstance(rdd_df._2, Column)
# print(rdd_df._2.getItem(0))
# rdd_df.show(truncate=False)
# rdd_df.select(rdd_df._2.getItem(4)).show()
# print(rdd_df._2)
# rdd_df.show()

# rdd_df.select("_1", rdd_df._2.getItem("age")).show()

# 增加列
# rdd_df.withColumn("age", rdd_df._2.getItem("age")).withColumn("gender", rdd_df._2.getItem("gender")).show()
# 返回DataFrame的RDD
# rdd_df.rdd

# users.show()


# users.show()

# "Row"
# print(users.rdd.map(lambda x: x.age).reduce(lambda age1, age2: age1 + age2))
print(id(user.schema))
print(user.schema.jsonValue())
# user.printSchema()
# user1.printSchema()
# users.printSchema()

