package com.tod.spark.springbootspark.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;

import java.util.ArrayList;
import java.util.List;

public class JSONDataSource {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setAppName("dataframe").setMaster("local");
        JavaSparkContext jsc = new JavaSparkContext(conf);
        SQLContext sqlContext = new SQLContext(jsc);
        Dataset<Row> peopleDS = sqlContext.read().json("src/main/resources/static/people.json");
        //针对学生成绩信息的Dataset,注册临时表，查询分数大于80分学生的姓名
        peopleDS.registerTempTable("people");
        Dataset<Row> resultDR = sqlContext.sql("select name,age from people where age >=23");
        List<String> resPeoples  = resultDR.javaRDD().map(row -> row.getString(0)).collect();

        List<String> peopleInfoJSONs = new ArrayList<>();
        peopleInfoJSONs.add("{\"name\":\"Justin\", \"score\":103}");
        peopleInfoJSONs.add("{\"name\":\"Andy\", \"score\":120}");
        peopleInfoJSONs.add("{\"name\":\"Tod\", \"score\":110}");
        peopleInfoJSONs.add("{\"name\":\"Jack\", \"score\":108}");

        JavaRDD<String> peopleInfosRDD = jsc.parallelize(peopleInfoJSONs);
        Dataset<Row> peopleInfosDS = sqlContext.read().json(peopleInfosRDD);
        peopleInfosDS.registerTempTable("people_score");

        StringBuilder sql = new StringBuilder();
        sql     .append("select name,age from people_score where name in (")
                .append(String.join(",",resPeoples))
                .append(")");
        Dataset<Row> goodStudentInfosDF = sqlContext.sql(sql.toString());

        //然后将两份数据的DataFrame数据执行join的转行算子操作
       // peopleInfosDS.javaRDD().mapToPair()
    }
}
