# !/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date, datetime
from pyspark import Row
from pyspark.sql import SparkSession, DataFrame


def write_json(df: DataFrame):
    df.write.json("../../out/json", mode="overwrite")


def write_csv(df: DataFrame):
    df.write.csv("../../out/csv", mode="overwrite")

    df.repartition(1).write.csv("../../out/csv2", mode="append")


def write_jdbc(df: DataFrame):
    properties = {
        "user": "root",
        "password": "123456"
    }
    df.write.jdbc("jdbc:mysql://127.0.0.1:3306/test", table="test", properties=properties, mode="overwrite")


# https://www.kaggle.com/datasets/timmofeyy/all-the-airport-across-the-world-dataset
if __name__ == '__main__':
    spark = SparkSession.builder.appName("write_learn").master("local[*]").getOrCreate()
    df = spark.createDataFrame([
        Row(id=1, height=176., name='ccc', birth=date(2000, 1, 1), create_time=datetime(2000, 1, 1, 12, 0)),
        Row(id=2, height=181., name='aaa', birth=date(2000, 2, 1), create_time=datetime(2000, 1, 2, 12, 0)),
        Row(id=4, height=165., name='bbb', birth=date(2000, 3, 1), create_time=datetime(2000, 1, 3, 12, 0))
    ])
    write_json(df)
    write_csv(df)
    spark.stop()
