# !/usr/bin/env python
# -*- coding: utf-8 -*-

from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, IntegerType, StructField, StringType


def read_json(ss: SparkSession):
    # 读取json数组或者复杂json
    df_multi = ss.read.option("multiLine", "true").json("../../data/multi.json")
    df_multi.show()

    # 读取简单json
    df_simple = ss.read.json("../../data/simple.json")
    df_simple.show()


def read_csv(ss: SparkSession):
    # 读取csv,设置第一行为表头,inferSchema类型推断
    df = ss.read.option("header", "true").option("delimiter", ",").csv("../../data/1.csv", inferSchema=True)
    df.show()
    print(df.printSchema())

    # 读取多个文件
    df2 = ss.read.option("header", "true").option("delimiter", ",").csv(["../../data/1.csv", "../../data/1.csv"])
    df2.show()


def read_txt(ss: SparkSession):
    # 读取txt
    df = ss.read.option("delimiter", ",").text("../../data/simple.txt")
    df.show()
    print(df.printSchema())


def read_jdbc(ss: SparkSession):
    # 读取txt
    properties = {
        "user": "root",
        "password": "123456"
    }

    df = ss.read.jdbc("jdbc:mysql://127.0.0.1:3306/test", table="test", properties=properties)
    df.show()
    print(df.printSchema())


# https://www.kaggle.com/datasets/timmofeyy/all-the-airport-across-the-world-dataset
if __name__ == '__main__':
    spark = SparkSession.builder.appName("read_learn").master("local[*]").getOrCreate()
    # read_json(spark)
    # read_csv(spark)
    # read_txt(spark)
    read_jdbc(spark)
    spark.stop()
