package com.bianmaba.mysql;

import com.bianmaba.beans.Info;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * @program: hadoop-samples
 * @description:
 * @author: Chenjiabin
 * @create: 2019/6/11 17:35
 **/
public class WriteReadDemo {
    private static String driver = "com.mysql.cj.jdbc.Driver";
    private String url = "jdbc:mysql://10.28.1.252:3306?createDatabaseIfNotExist=true&serverTimezone=UTC&characterEncoding=utf8&useUnicode=true&useSSL=false";  //hostname: mysql所在的主机名或ip地址
    private String db = "spark_test";  //dbname: 数据库名
    private String user = "root"; //username: 数据库用户名
    private String pwd = "root"; //password: 数据库密码；若无密码，则为""
    private SparkSession spark = SparkSession.builder()
            .appName("Spark")
            .master("local[*]")
            .getOrCreate();


    public static void main(String[] args) {
        new WriteReadDemo().readTableBySparkOption("info");
        new WriteReadDemo().readTableBySparkOptions("info");
        new WriteReadDemo().readTableBySparkProperty("info");
        new WriteReadDemo().saveDataBySparkOption("info");
    }

    public void readTableBySparkOption(String tablename) {
        Dataset<Row> jdbcDF = spark.read()
                .format("jdbc")
                .option("url", url)
                .option("dbtable", db + "." + tablename)
                .option("user", user)
                .option("password", pwd)
                .load();
        jdbcDF.show();
        spark.stop();
    }

    public void readTableBySparkOptions(String tablename) {
        Map<String, String> map = new HashMap<String, String>() {{
            put("driver", driver);
            put("url", url);
            put("dbtable", db + "." + tablename);
            put("user", user);
            put("password", pwd);
        }};
        Dataset<Row> jdbcDF = spark.read().format("jdbc").options(map).load();
        jdbcDF.show();
        spark.stop();
    }

    public void readTableBySparkProperty(String tablename) {
        Properties connectedProperties = new Properties();
        connectedProperties.put("user", user);
        connectedProperties.put("password", pwd);
        connectedProperties.put("customSchema", "id STRING, name STRING");  //用来具体化表结构，去掉不影响程序执行
        Dataset<Row> jdbcDF2 = spark.read()
                .jdbc(url, db + "." + tablename, connectedProperties);
        jdbcDF2.show();
        spark.stop();
    }

    public void saveDataBySparkOption(String tablename) {
        Dataset<Row> df = createDF();
        df.write().mode(SaveMode.Overwrite).format("jdbc")
                .option("url", url)
                .option("dbtable", db + "." + tablename)
                .option("user", user)
                .option("password", pwd)
                .save();
    }

    public void saveDataBySparkProperty(String tablename) {
        Dataset<Row> df = createDF();
        Properties connectedProperties = new Properties();
        connectedProperties.put("user", user);
        connectedProperties.put("password", pwd);
        df.write()
                .option("createTableColumnTypes", "id char(20), name char(30)")
                .jdbc(url, db + "." + tablename, connectedProperties);
    }

    private Dataset<Row> createDF() {
        JavaRDD<Info> infoRDD = spark.read()
                .textFile("hdfs://hadoop-master:9000/input/info.txt")
                .javaRDD()
                .map(line -> {
                    String[] values = line.split(",");
                    Info info = new Info(values[0], values[1]);
                    return info;
                });
        Dataset<Row> infoDF = spark.createDataFrame(infoRDD, Info.class);
        infoDF.printSchema();
        return infoDF;
    }

}
