package com.zzl.spark.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;

import java.util.Arrays;
import java.util.List;

public class DataFrameOpsFromJsonRdd {
    public static void main(String[] args) {
        System.setProperty("hadoop.home.dir", "E:\\hadoop");
        List<String> nameList = Arrays.asList("{'name':'lisi','age':30}"
                ,"{'name':'wangwu','age':29}"
                ,"{'name':'zhangsan','age':29}"
                ,"{'name':'bbb','age':29}"
                ,"{'name':'aaa','age':29}"
                ,"{'name':'lisi2','age':30}");

        List<String> scoreList = Arrays.asList("{'name':'zhangsan','score':100}"
                ,"{'name':'lisi','score':99}");
        SparkConf conf = new SparkConf().setMaster("local").setAppName("PartitionerByOperator");
        JavaSparkContext sc = new JavaSparkContext(conf);
        JavaRDD<String> nameRDD = sc.parallelize(nameList);
        JavaRDD<String> scoreRDD = sc.parallelize(scoreList);

        SQLContext sqlContext = new SQLContext(sc);
        Dataset<Row> nameDF = sqlContext.read().json(nameRDD);
        Dataset<Row> scoreDF = sqlContext.read().json(scoreRDD);

        nameDF.join(scoreDF,nameDF.col("name").$eq$eq$eq(scoreDF.col("name"))).select(nameDF.col("name"),nameDF.col("age"),scoreDF.col("score")).show();

        nameDF.registerTempTable("name");
        scoreDF.registerTempTable("score");

        String sql = "select name.name,name.age,score.score from name name join score score on name.name =score.name";
        Dataset<Row> sql1 = sqlContext.sql(sql);
        sql1.show();
        sc.stop();
    }
}
