package com.ts.blog.batch.drools;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.Properties;
import java.util.UUID;

/**
 * @classname:
 * @description:
 * @author: yishiyong
 * @create: 2018-12-25
 */
public class SparkDrools {
    public static void main(String[] args) {
        SparkSession session = SparkSession.builder()
                .master("local[5]")
                .appName("SparkMysql")
                .getOrCreate();
        //读取mysql数据
        readMySQLRemote(session);

        //停止SparkContext
        session.stop();
    }


    private static void readMySQLRemote(SparkSession session){
        long start = System.currentTimeMillis();
        //jdbc.url=jdbc:mysql://localhost:3306/database
        String url = "jdbc:mysql://172.18.101.97:3306/lnaudit";
        //查找的表名R
        String table = "blog";
        //增加数据库的用户名(user)密码(password),指定test数据库的驱动(driver)
        Properties connectionProperties = new Properties();
        connectionProperties.put("user","lnaudit");
        connectionProperties.put("password","lnaudit");
        connectionProperties.put("driver","com.mysql.jdbc.Driver");

        //SparkJdbc读取Postgresql的products表内容
        System.out.println("读取lnaudit数据库中的blog表内容");
        // 读取表中所有数据
        Dataset<Row> jdbcDF = session.read().option("header", "true").jdbc(url,table,connectionProperties).select("*");
        //显示数据
        jdbcDF.show();
        long start1 = System.currentTimeMillis();
        jdbcDF.javaRDD().mapPartitions(new DroolsHandle("com.ts.blog.kie", "Blog"))
                //Use save instead of collect if you have too many records.
                .saveAsTextFile("D://spark//"+ UUID.randomUUID());
        long end1 = System.currentTimeMillis();
        System.out.println("处理数据时间："+(end1-start1));

                      //  collect();
       // System.out.println(t.size());
        long end = System.currentTimeMillis();
        System.out.println("加载数据和处理数据总时间："+(end-start));


        //forEach(s-> System.out.println(s));


     /*   jdbcDF.javaRDD().mapPartitions(new DroolsHandle("com.ts.blog.kie", "Blog"))
                //Use save instead of collect if you have too many records.
                .collect().forEach(s-> System.out.println(s));*/
    }

}

