#coding:utf8



#导包
from pyspark.sql import SparkSession
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql.types import StructType,StructField,IntegerType,StringType,FloatType


if __name__ == '__main__':
    #构建
    spark = SparkSession.builder.appName("sparkSQL").master("local[*]"). \
        config("spark.sql.shuffle.partitions", 2). \
        config("spark.sql.warehouse.dir", "hdfs://master:9000/user/hive/warehouse"). \
        config("hive.metastore.uris", "thrift://master:9083"). \
        enableHiveSupport(). \
        getOrCreate()

    schema = StructType().add("title",StringType(),nullable=True). \
        add("movieImg", StringType(), nullable=True). \
        add("type", StringType(), nullable=True). \
        add("country", StringType(), nullable=True). \
        add("duration", IntegerType(), nullable=True). \
        add("releaseTime", StringType(), nullable=True). \
        add("rate", IntegerType(), nullable=True).\
        add("summary", StringType(), nullable=True). \
        add("director", StringType(), nullable=True). \
        add("actors", StringType(), nullable=True). \
        add("firstBoxOffice", IntegerType(), nullable=True). \
        add("allBoxOffice", IntegerType(), nullable=True). \
        add("detailUrl", StringType(), nullable=True)

    df = spark.read.format('csv').\
        option("sep",",").\
        option("header",True).\
        option("encoding","utf-8").\
        schema(schema=schema).\
        load("movieList.csv")


    df = spark.read.format("csv").\
        option("sep",",").\
        option("header",True).\
        option("encoding","utf-8").\
        schema(schema=schema).\
        load("./movieList.csv")

    df = df.withColumn("id",monotonically_increasing_id())

    #数据去重
    # df.drop_duplicates()
    #
    # df.na.drop()

    #sql
    df.write.mode("overwrite").\
        format("jdbc").\
        option("url","jdbc:mysql://master:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8").\
        option("dbtable","dian_ying_shu_ju").\
        option("user","root").\
        option("password","qinxiao123456").\
        option("encoding","utf-8").\
        save()

    df.write.mode("overwrite").option("comment","电影表").saveAsTable("dian_ying_shu_ju","orc")
    spark.sql("select * from catMovieData").show()
