package JJJJJJava.ParquetLoadData;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;

import java.util.List;

/**
 * @Auther: Mengkunxuan
 * @Date:2018/9/2617:11
 * @Description:
 * Parquet数据源之使用编程方式加载数据
 *
 */
public class ParquetLoadDataJava {
    public static void main(String[] args) {
        SparkConf conf  = new SparkConf().setAppName("ParquetLoadData").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(conf);
        SQLContext sqlcontext = new SQLContext(sc);
        //读取parquet文件中的数据,创建一个DataFrame
        DataFrame userDF = sqlcontext.read().parquet(
                "C:\\Users\\Administrator\\Desktop\\第一阶段代码\\第76讲-Spark SQL：数据源之通用的load和save操作\\文档\\users.parquet");
        //将DataFrame注册为临时表,然后使用SQL查询需要的数据
        userDF.registerTempTable("users");
        DataFrame userNamesDF = sqlcontext.sql("select name from users");
        //对查询出来的DataFrame进行transformation操作,处理数据,然后打印出来
        //通过spark的action操作函数：collect函数可以提取出所有rdd里的数据项
        List<String> userNames = userNamesDF.javaRDD().map(new Function<Row, String>() {

             @Override
             public String call(Row row) throws Exception {
            return "Name" + row.getString(0);
             }
         }).collect();
        for (String userName:userNames){
            System.out.println(userName);
        }

}
}