package com.zhou;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;

/**
 * @Description:
 * @Author: ZhOu
 * @Date: 2018/5/28
 */
public class SqlContextDemo {

    private static final String RES_PATH = SqlContextDemo.class.getClass().getResource("/").getPath();

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local").setAppName("P_App");
        JavaSparkContext sc = new JavaSparkContext(conf);
        SQLContext sqlContext = new SQLContext(sc);

        DataFrame df = sqlContext.read().json(RES_PATH + "person.json");
        //显示解析后的数据，以字段的首字母的ascii排序，如果缺少字段，该字段的值为null
        df.show();

        //打印结构，包括字段名和类型，类型是由spark自己推算的
        df.printSchema();

        //select name from person
        df.select("name").show();

        // select name,age+10 as pluAge from person
        df.select(df.col("name"), df.col("age").plus(10).as("plugAge")).show();

        // select * from person where age>20
        df.filter(df.col("age").gt(20)).show();

        // select age,count(*) from person group by age
        df.groupBy(df.col("age")).count().show();

    }
}
