package wlw;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.Properties;

public class MyDemo {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder().appName("load data to mysql")
                .config("spark.sql.hive.convertMetastoreParquet", "true")
                .config("spark.sql.sources.commitProtocolClass", "org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol")
                .config("spark.sql.parquet.output.committer.class", "com.amazon.emr.committer.EmrOptimizedSparkSqlParquetOutputCommitter")
                .config("spark.sql.autoBroadcastJoinThreshold", "20971520")
                .config("spark.sql.adaptive.shuffle.targetPostShuffleInputSize", "true")
                .config("spark.sql.adaptive.shuffle.targetPostShuffleInputSize", "209715200")
                .config("spark.locality.wait", 10)
                .config("spark.debug.maxToStringFields", 55)
                .enableHiveSupport().getOrCreate();
        hive2db(spark);
        spark.close();
    }

    /*
     *   使用spark-sql从hive中读取数据, 然后写入mysql对应表.
     *
     * com.mysql.cj.jdbc.Driver
     * */
    public static void hive2db(SparkSession spark) {
//        String url = "jdbc:mysql://10.93.84.53:3306/big_data?characterEncoding=UTF-8";
        String url = "jdbc:mysql://test.ads-bill.ads.sg1.mysql:3306/ads_bill?characterEncoding=UTF-8";
        System.out.println(url);
        String table = "wlw_test_1";
        Properties props = new Properties();
        props.put("user", "jifei");
        props.put("password", "dbvqTuOkDFWBce");
        props.put("driver", "com.mysql.cj.jdbc.Driver");
        String query = "select pid from ads_dmp.dim_ads_adposition_another";
        Dataset<Row> rows = spark.sql(query).select("pid");
        rows.printSchema();
//        rows.show(50);
        rows.write().mode(SaveMode.Append).jdbc(url, table, props);
    }
}
