package misssad.simple_project;

import java.util.Arrays;
import java.util.Collections;

import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;

import model.Person;

public class SimpleData {
	
	public static void main(String[] args) {
		
		System.setProperty("hadoop.home.dir", "D:\\Program Files\\hadoop-2.7.6");
		SparkSession spark = SparkSession.builder().appName("喻涛").getOrCreate();
		Person person = new Person();
		person.setAge(32);
		person.setName("Andy");
		//编码器
		Encoder<Person> encoder = Encoders.bean(Person.class);
		//通过java Bean 来创建数据集
		Dataset<Person> dataset = spark.createDataset(Collections.singletonList(person), encoder);
		dataset.show();
		//系统提供了许多常用的类编码器，比如Encoders.INT()
		Encoder<Integer> intEncoder = Encoders.INT();
		Dataset<Integer> primitiveDS = spark.createDataset(Arrays.asList(1, 2, 3), intEncoder);
		Dataset<Integer> transformedDS = primitiveDS.map((MapFunction<Integer, Integer>) value -> value + 1, intEncoder);
//		Object collect = transformedDS.collect();
//		System.out.println(collect.toString());
		String path = "D:\\Program Files\\spark-2.3.1-bin-hadoop2.7\\examples\\src\\main\\resources\\people.json";
		//只需要提供一个类，就可以将DataFrames转为DataSet
		Dataset<Person> as = spark.read().json(path).as(encoder);
		as.show();
		
		spark.stop();
	}
}
