# from pyspark.sql import SparkSession
# from pyspark.sql.functions import monotonically_increasing_id
# from pyspark.sql.types import StructType,StructField,StringType,IntegerType,FloatType
#
# if __name__ == '__main__':
#     spark = SparkSession.builder.appName("sparkSQL").master("local[*]").\
#         config("spark.sql.shuffle.partitions", 2).\
#         config("spark.sql.warehouse.dir", "hdfs://192.168.88.161:8020/user/hive/warehouse").\
#         config("hive.metastore.uris", "thrift://192.168.88.161:9083").\
#         enableHiveSupport().\
#         getOrCreate()
#     sc = spark.sparkContext
#
#     schema = StructType([
#         StructField("city", StringType(), nullable=True),
#         StructField("title", StringType(), nullable=True),
#         StructField("type", StringType(), nullable=True),
#         StructField("address", StringType(), nullable=True),
#         StructField("cover", StringType(), nullable=True),
#         StructField("totalComment", IntegerType(), nullable=True),
#         StructField("start", IntegerType(), nullable=True),
#         StructField("avgPrice", IntegerType(), nullable=True),
#         StructField("totalType", StringType(), nullable=True),
#         StructField("detailLink", StringType(), nullable=True),
#         StructField("tasterate", FloatType(), nullable=True),
#         StructField("envsrate", FloatType(), nullable=True),
#         StructField("serverate", FloatType(), nullable=True)
#     ])
#
#     df = spark.read.format("csv").\
#         option("sep", ",").\
#         option("header", True).\
#         option("encoding","utf-8").\
#         schema(schema = schema).\
#         load("./footdata.csv")
#     df = df.withColumn("id", monotonically_increasing_id())
#     df = df.dropDuplicates()
#     df = df.na.drop()
#     df.show()
#
#     try:
#         df.write.mode("overwrite").\
#         format("jdbc").\
#         option(key="url", value="jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8").\
#         option(key="dbtable", value="fooddata").\
#         option(key="user", value="root").\
#         option(key="password", value="123456").\
#         option(key="encoding", value="utf-8").\
#         save()
#
#         df.write.mode("overwrite").saveAsTable("fooddata","parquet")
#         spark.sql("select * from fooddata").show()
#     except Exception as e:
#         print(e)
from pyspark.sql import SparkSession
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql.types import StructType,StructField,StringType,IntegerType,FloatType

if __name__ == '__main__':
    spark = SparkSession.builder \
        .appName("sparkSQL") \
        .master("local[*]") \
        .config("spark.sql.shuffle.partitions", 2) \
        .config("spark.sql.warehouse.dir", "hdfs://192.168.88.161:8020/user/hive/warehouse") \
        .config("hive.metastore.uris", "thrift://192.168.88.161:9083") \
        .config("spark.jars", "/tmp/pycharm_project_88/spark/mysql-connector-java-5.1.32.jar") \
        .enableHiveSupport() \
        .getOrCreate()
    sc = spark.sparkContext

    schema = StructType([
        StructField("city", StringType(), nullable=True),
        StructField("title", StringType(), nullable=True),
        StructField("type", StringType(), nullable=True),
        StructField("address", StringType(), nullable=True),
        StructField("cover", StringType(), nullable=True),
        StructField("totalComment", IntegerType(), nullable=True),
        StructField("start", IntegerType(), nullable=True),
        StructField("avgPrice", IntegerType(), nullable=True),
        StructField("totalType", StringType(), nullable=True),
        StructField("detailLink", StringType(), nullable=True),
        StructField("tasterate", FloatType(), nullable=True),
        StructField("envsrate", FloatType(), nullable=True),
        StructField("serverate", FloatType(), nullable=True)
    ])

    df = spark.read.format("csv").\
        option("sep", ",").\
        option("header", True).\
        option("encoding","utf-8").\
        schema(schema = schema).\
        load("./footdata.csv")
    df = df.withColumn("id", monotonically_increasing_id())
    df = df.dropDuplicates()
    df = df.na.drop()
    df.show()

    try:
        df.write.mode("overwrite"). \
            format("jdbc"). \
            option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8mb4"). \
            option("dbtable", "fooddata"). \
            option("user", "root"). \
            option("password", "123456"). \
            option("driver", "com.mysql.jdbc.Driver"). \
            save()

        df.write.mode("overwrite").saveAsTable("fooddata","parquet")
        spark.sql("select * from fooddata").show()
    except Exception as e:
        print("Error:", e)
        raise  # 重新抛出异常以查看完整堆栈信息