package com.bianmaba.hive;

import com.bianmaba.beans.Info;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;


/**
 * 用java语言开发spark程序
 * 第一个学习程序 wordcount
 *
 * @author 18521
 */
public class WriteDemo {
    public SparkSession sparkSession = SparkSession.builder()
            .appName("test")
            .master("local")
            .enableHiveSupport()
            .getOrCreate();

    public static void main(String[] args) {
        System.setProperty("HADOOP_USER_NAME", "root");
        new WriteDemo().run();
    }

    public void run() {
        JavaRDD<Info> rdd = createRDD();
        Dataset<Row> ds = sparkSession.createDataFrame(rdd, Info.class);
        ds.printSchema();

        sparkSession.sql("create table if not exists info(id string,name string)").show();
        sparkSession.sql("show tables").show();

        //以下两种方式都可实现数据插入
        ds.write().mode(SaveMode.Overwrite).saveAsTable("info");
        // ds.createOrReplaceTempView("info_tem");
        // sparkSession.sql("insert into info select id,name from info_tem");

        Dataset<Row> datas = sparkSession.sql("select * from info");
    }

    private JavaRDD<Info> createRDD() {
        JavaRDD<Info> infoRDD = sparkSession.read()
                .textFile("hdfs://hadoop-master:9000/input/info.txt")
                .javaRDD()
                .map(line -> {
                    String[] values = line.split(",");
                    Info info = new Info(values[0], values[1]);
                    return info;
                });
        return infoRDD;
    }

}