from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType, IntegerType

if __name__ == '__main__':
    spark = SparkSession.builder. \
        appName("test"). \
        master("local[*]"). \
        config("spark.sql.shuffle.partitions", 2). \
        getOrCreate()

    schema = StructType().add("user_id", StringType()).add("movie_id", IntegerType()).add("rank", IntegerType()).add(
        "ts", StringType())
    df = spark.read.format("csv").option("sep", "\t").option("header", False).option("encoding", "utf-8").schema(
        schema).load("/Users/cdhuangchao3/tmp/spark_demo/u.data")

    # TODO 如果报错 java.sql.SQLException: No suitable driver
    # 解决方案： 将mysql-connector包copy到 /jdk/jre/lib/ext/下
    # mac: /Library/Java/JavaVirtualMachines/jdk1.8.0_231.jdk/Contents/Home/jre/lib/ext
    df.write.mode("overwrite") \
        .format("jdbc") \
        .option("url", "jdbc:mysql://localhost:3306/test?useSSL=false&useUnicode=true") \
        .option("dbtable", "movie_data") \
        .option("user", "root") \
        .option("password", "Password") \
        .save()

    df2 = spark.read.format("jdbc")\
        .option("url", "jdbc:mysql://localhost:3306/test?useSSL=false&useUnicode=true") \
        .option("dbtable", "movie_data") \
        .option("user", "root") \
        .option("password", "Password") \
        .load()

    df2.printSchema()
    df2.show()
