package org.example.com.atguigu.day05;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.Properties;

public class ReaderFromMysql1 {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder().master("local[4]").appName("test").getOrCreate();
        String url = "jdbc:mysql://hadoop102:3306/gmall?useUnicode=true&characterEncoding=UTF-8";
        // TODO 查询指定条件数据
        String tableName = "(select * from user_info where id>=1 and id <= 5) user";
        // String tableName = "user_info";
        Properties props = new Properties();
        props.setProperty("user", "root");
        props.setProperty("password", "000000");
        // todo 读取MySQL的第一种方式, 此种方式读取MySQL的分区=1
        // todo 如果数据量较大,用一个分区处理则很慢,所以此种方式只能用于小数据量场景
        Dataset<Row> ds = spark.read().jdbc(url, tableName, props);
        // 读取分区数
        System.out.println(ds.javaRDD().getNumPartitions()); // 1个
        ds.show();
    }
}
