package com.roy.sparkDemos.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import static org.apache.spark.sql.functions.col;

/**
 * ./spark-submit --master yarn --class "com.roy.sparkDemos.sql.JSparkSqlDemo"
 * ./runSparkDemos.sh com.roy.sparkDemos.sql.JSparkSqlDemo
 */

public class JSparkSqlDemo {
    private static String jsonfilePath = "hdfs://master:9000/roy/sql/people.json";
    private static String txtfilePath = "hdfs://master:9000/roy/sql/people.txt";
    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf();
        sparkConf.setAppName("JSparkSqlDemo")
                .setMaster("local[4]");

//        SparkSession spark = SparkSession.builder().appName("JSparkSqlDemo").config(sparkConf).getOrCreate();
        SparkSession sparkSession = SparkSession.builder().appName("JSparkSqlDemo").appName("JSparkSqlDemo")
                .config(sparkConf).getOrCreate();

        basicDataFrameExample(sparkSession);
        datasetCreationExample(sparkSession);
        inferSchemaExample(sparkSession);
        programmaticSchmeaExample(sparkSession);
        //关闭应用，如果不关闭，控制台会抛异常。
        sparkSession.stop();
    }
    //自定义序列化

    /**
     * 关于StructType:可以理解为是个类型数组，StructField[]
     * @param sparkSession
     */
    private static void programmaticSchmeaExample(SparkSession sparkSession) {
        //将每一行数据读入RDD
        JavaRDD<String> peopleRDD = sparkSession.sparkContext().textFile(txtfilePath, 1).toJavaRDD();
        //按这个顺序来进行序列化
        String schemaString = "name string";
        List<StructField> fields = new ArrayList<>();
        for(String filedName: schemaString.split(" ")){
            StructField structField = DataTypes.createStructField(filedName, DataTypes.StringType, true);
            fields.add(structField);
        }
        StructType structType = DataTypes.createStructType(fields);
        //把RDD中每一行转成Row
        JavaRDD<Row> rowJavaRDD = peopleRDD.map((Function<String, Row>) record -> {
            String[] attributes = record.split(",");
            Row row = RowFactory.create(attributes[0], attributes[1]);
            return row;
        });

        //关键就是这个API
        Dataset<Row> peopleDs = sparkSession.createDataFrame(rowJavaRDD, structType);
        peopleDs.createOrReplaceTempView("people");
        Dataset<Row> peopleDs2 = sparkSession.sql("SELECT NAME,AGE FROM people");
        peopleDs2.show();
        Dataset<String> peopleDs3 = peopleDs2.map((MapFunction<Row, String>) row -> {
            return "Name: " + row.getString(0);
        }, Encoders.STRING());
        peopleDs3.show();
    }

    //RDD转换成DataFrame
    private static void inferSchemaExample(SparkSession sparkSession) {
        JavaRDD<Person> personRDD = sparkSession.read().textFile(txtfilePath).javaRDD()
                .map(line -> {
                    String[] parts = line.split(",");
                    Person person = new Person();
                    person.setName(parts[0]);
                    person.setAge(Integer.parseInt(parts[1].trim()));
                    return person;
                });
        Dataset<Row> personDs = sparkSession.createDataFrame(personRDD, Person.class);
        personDs.createOrReplaceTempView("people");
        Dataset<Row> peopleDs2 = sparkSession.sql("SELECT NAME FROM people where age > 19");
        Encoder<String> stringEncoder = Encoders.STRING();
        Dataset<String> peopleDs3 = peopleDs2.map((MapFunction<Row, String>) row -> {
            return row.getString(0); //byIndex
//            row.<String>getAs("name"); //by Field
        }, stringEncoder);
        peopleDs3.show();
    }

    //使用Encoder将json解析为对象。
    private static void datasetCreationExample(SparkSession sparkSession) {
        //这里面都是转成DataSet。 DataSet无法foreach或者map，需要转成RDD。 调用.javaRDD()方法转成java的RDD
        Person person1 = new Person();
        person1.setName("test1");
        person1.setAge(11);

        Person person2 = new Person();
        person2.setName("test2");
        person2.setAge(12);
        //对象类型
        Encoder<Person> personEncoder = Encoders.bean(Person.class);
        Dataset<Person> personDs = sparkSession.createDataset(Arrays.asList(person1, person2), personEncoder);
        personDs.show();

//        String filePath = "hdfs://master:9000/roy/sql/people.json";
        //将Row类型解析成Person类型
        Dataset<Person> personDs2 = sparkSession.read().json(jsonfilePath).as(personEncoder);
        personDs2.show();
        //基础类型
        Encoder<String> stringEncoder = Encoders.STRING();
        Dataset<String> javaDs = sparkSession.createDataset(Arrays.asList("a", "b", "c"), stringEncoder);
        javaDs.show();
    }

    //基础DataFrame查询，从文件系统读取json
    private static void basicDataFrameExample(SparkSession sparkSession) {
        // read返回一个数据读取工具对象
        DataFrameReader read = sparkSession.read();
        //可以读取 text,textFile,parquet,jdbc,csv,json,table
        Dataset<Row> dataset = read.json(jsonfilePath);

        dataset.show();
        dataset.select("name").show();
        //col函数与Column是等价的。
        dataset.select(col("name"),new Column("age").plus("1")).show();

        dataset.filter(col("age").lt(30)).show();
        dataset.filter("age>15").show();
        dataset.where(col("age").gt(15)).show();
        dataset.groupBy(col("age")).count().show();
        //创建临时表进行SQL查询：
        //TempView:只能Session内访问，临时表
        //GolbalTempView:跨Session访问，全局表
        dataset.createOrReplaceGlobalTempView("people");
        dataset.sqlContext().sql("select * from people").show();
        sparkSession.sql("select * from people").show();
    }

}
