package com.sqlTest;

import com.bean.Person;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;

import java.util.Arrays;

/**
 * @program: day0311
 * @description:
 * @author: CoreDao
 * @create: 2021-03-15 16:07
 **/

public class SqlCreate {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder()
                .appName("ds")
                .master("local[*]")
                .getOrCreate();

        /**
         * json文件、rdd。格式特殊在元+原
         * json不能套json
         * 首先spark读取的json格式不能太复杂
         * dataSet和RDD可以互相转换
         * schema信息，ascii排序
         * 通过session对象鞋sql需要注册表
         * 原生api了解即可，比较复杂，Java更加复杂
         */
        /*Dataset<Row> json = spark.read().json("src/main/resources/data/json");
        JavaRDD<Row> javaRDD = json.javaRDD();
        javaRDD.foreach(x-> System.out.println(x));
        json.show();
        json.printSchema();*/


        /**
         * Java中需要额外创建Java的
         */
        /*SparkContext sc = spark.sparkContext();
        JavaSparkContext jsc = new JavaSparkContext(sc);
        JavaRDD<String> personRDD = jsc.parallelize(Arrays.asList(
                "{'name':'zs',\"age\":'18'}",
                "{'name':'lisi',\"age\":'20'}",
                "{'name':'wangwu',\"age\":'24'}"
        ));
        JavaRDD<String> scoreRDD = jsc.parallelize(Arrays.asList(
                "{'name':'zs',\"score\":'180'}",
                "{'name':'lisi',\"score\":'200'}",
                "{'name':'wangwu',\"score\":'240'}"
        ));
        Dataset<Row> df = spark.read().json(personRDD);
        df.createOrReplaceTempView("personTable");
        spark.read().json(scoreRDD).createOrReplaceTempView("scoreTable");
        //注意sql语句每个""最后有个空格
        spark.sql("select personTable.name,personTable.age,scoreTable.score " +
                "from personTable join scoreTable on personTable.name = scoreTable.name "+
                "where scoreTable.score > 210").show();
        jsc.stop();*/
        

        /**
         * 非json数据
         * rdd也不是row对象
         * 反射beanClass注意数据格式
         *
         * Row类型？
         * 动态创建，自己制定的schame信息
         */
        SparkContext sc = spark.sparkContext();
        JavaSparkContext jsc = new JavaSparkContext(sc);

        //第一种
        //将string变成person对象，再将rdd转变成df
        //将当前读取出来的数据直接转换成row对象，看看怎么创建df
        JavaRDD<String> lineRDD = jsc.textFile("src/main/resources/data/person.txt");
        JavaRDD<Row> rowJavaRDD = lineRDD.map(new Function<String, Row>() {
            @Override
            public Row call(String line) throws Exception {
                String[] split = line.split(",");
                return RowFactory.create(
                        split[0],
                        split[1],
                        Integer.valueOf(split[2])
                );
            }
        });

        //需要先创建schema信息， StructTyoe 中再去描述具体的字段信息StructField
        StructType schema = DataTypes.createStructType(Arrays.asList(
                DataTypes.createStructField("id", DataTypes.StringType, true),
                DataTypes.createStructField("name", DataTypes.StringType, true),
                DataTypes.createStructField("age", DataTypes.StringType, true)
        ));
        Dataset<Row> df = spark.createDataFrame(rowJavaRDD,schema);
        df.printSchema();
        df.show();



        /*dataFrame.javaRDD().map(new Function<Row, String>() {

            @Override
            public String call(Row row) throws Exception {
                //通过下标获取值，在不清楚数据类型的情况下，慎用
                row.getLong();
                return row.getAs("id");
            }
        }).foreach(x -> System.out.println(x));*/

       /* Dataset<Row> dataFrame = spark.createDataFrame(rowJavaRDD, Person.class);

        JavaRDD<Person> rowJavaRDD = lineRDD.map(new Function<String, Person>() {
            @Override
            public Person call(String v1) throws Exception {
                Person person = new Person();
                String[] split = v1.split(",");
                person.setId(split[0]);
                person.setName(split[1]);
                person.setAge(Integer.parseInt(split[2]));

                return person;
            }
        });

        dataFrame.printSchema();
        dataFrame.show();
        dataFrame.createOrReplaceTempView("person");
        spark.sql("select * from person where id = 2").show();

        //第二种
        //通过ds来做
        Dataset<String> stringDataset = spark.read().textFile("src/main/resources/data/person.txt");
        Dataset<Person> map = stringDataset.map(new MapFunction<String, Person>() {
            @Override
            public Person call(String value) throws Exception {
                Person person = new Person();
                String[] split = value.split(",");
                person.setId(split[0]);
                person.setName(split[1]);
                person.setAge(Integer.parseInt(split[2]));
                return person;
            }
        }, Encoders.bean(Person.class));
        map.toDF();*/


        /**
         * row类型
         * 动态创建
         */









        spark.stop();


    }
}
