package com.zzl.spark.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.ArrayList;

public class RDD2DataFrameByProgrammatically {

    public static void main(String[] args) {
        System.setProperty("hadoop.home.dir", "E:\\hadoop");
        SparkConf conf = new SparkConf().setMaster("local").setAppName("RDD2DataFrameByReflection");
        JavaSparkContext sc = new JavaSparkContext(conf);
        SQLContext sqlContext = new SQLContext(sc);
        JavaRDD<String> lines = sc.textFile("peoples.txt");
        JavaRDD<Row> rowJavaRDD = lines.map((l) -> {
            String[] split = l.split(",");
            return RowFactory.create(Integer.valueOf(split[0]), split[1], Integer.valueOf(split[2]));
        });

        ArrayList<StructField> structFields = new ArrayList<>();

        structFields.add(DataTypes.createStructField("id",DataTypes.IntegerType,true));
        structFields.add(DataTypes.createStructField("name",DataTypes.StringType,true));
        structFields.add(DataTypes.createStructField("age",DataTypes.IntegerType,true));
        StructType schema = DataTypes.createStructType(structFields);
        Dataset<Row> dataFrame = sqlContext.createDataFrame(rowJavaRDD, schema);

        dataFrame.show();

        dataFrame.createOrReplaceTempView("persons");

        Dataset<Row> sql = sqlContext.sql("select * from persons where age >7");

        sql.show();

        JavaRDD<Row> rowJavaRDD1 = sql.javaRDD();
        rowJavaRDD1.foreach(r->{
        });

        sc.stop();
    }
}
