package org.shj.spark.dataframe;

import java.util.ArrayList;
import java.util.List;

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.ForeachFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.shj.spark.entity.Student;

/**
 * 把 RDD 转化成 Dataset 后，我们就可以简单的使用sql语句对数据进行操作
 * 注意：
 * 1. 转化时用到的JavaBean，必须实现序列化，并且必须是 public 的
 * 2. 从Dataset转化回来后，字段的顺序被重新排序了，看下面代码中的注释
 *
 */
public class RDD2DataFrame {

	public static void main(String[] args) {
		SparkSession ss = SparkSession.builder().appName("RDD2DataFrame").master("local").getOrCreate();
		ss.sparkContext().setLogLevel("WARN");
		
		JavaSparkContext jsc = new JavaSparkContext(ss.sparkContext());
		
		JavaRDD<String> rdd = jsc.textFile("E:/workspace/scala/sparkjava/src/main/resources/student.txt",2);
		
		// 1. 使用 JavaBean 进行转化
		JavaRDD<Student> stus = rdd.map(new Function<String, Student>() {
			private static final long serialVersionUID = -3663392560216330104L;

			public Student call(String line) throws Exception {
				String[] arr = line.split(",");
				Student s = new Student();
				s.setAge(Integer.parseInt(arr[1]));
				s.setName(arr[0]);
				return s;
			}
			
		});
		
		Dataset<Row> ds = ss.createDataFrame(stus, Student.class);
		ds.orderBy("age").show();
		System.out.println("===============");
		
		SQLContext sqlCtx = ss.sqlContext();
		sqlCtx.registerDataFrameAsTable(ds, "student");
		
		Dataset<Row> sqlDs = sqlCtx.sql("select * from student where age > 18");
		JavaRDD<Row> javaRDD = sqlDs.javaRDD();
		
		JavaRDD<Student> map = javaRDD.map(new Function<Row, Student>() {
			private static final long serialVersionUID = 1L;

			public Student call(Row row) throws Exception {
				Student s = new Student();
				/**
				 * 下面注释的两行代码是有问题的，问题在于：虽然上面我们读进来的数据中name在前面，age在后面，但是经过转换后，
				 * Spark会对字段名字进行排序，这样就导致 row.getInt(1)时，1的位置其实是name的值，所以运行起来后会报
				 * 类型转换异常。
				 */
				//s.setAge(row.getInt(1));
				//s.setName(row.getString(0));
				s.setAge(row.getInt(row.fieldIndex("age")));
				s.setName(row.getString(row.fieldIndex("name")));
				return s;
			}
		});
		
		map.foreach(new VoidFunction<Student>() {
			private static final long serialVersionUID = -3788911141130992451L;

			@Override
			public void call(Student s) throws Exception {
				System.out.println(s);
			}
		});
		
		System.out.println("===============");
		
		//2. 不用JavaBean
		JavaRDD<Row> rowRdd = rdd.map(new Function<String, Row>() {
			private static final long serialVersionUID = 1L;

			@Override
			public Row call(String line) throws Exception {
				String[] arr = line.split(",");
				return RowFactory.create(arr[0], Integer.parseInt(arr[1]));
			}
		});
		
		List<StructField> structFields = new ArrayList<StructField>();
		structFields.add(DataTypes.createStructField("name", DataTypes.StringType, true));
		structFields.add(DataTypes.createStructField("age", DataTypes.IntegerType, true));
		
		StructType structType = DataTypes.createStructType(structFields);
		
		Dataset<Row> dataFrame = ss.createDataFrame(rowRdd, structType);
		long cnt = dataFrame.filter("age > 18").count();
		System.out.println(cnt);
		
		//把 dataFrame转化成一个临时表
		dataFrame.createOrReplaceTempView("stu");
		Dataset<Row> result = sqlCtx.sql("select * from stu where age > 18");
		result.foreach(new ForeachFunction<Row>() {
			private static final long serialVersionUID = 2493912665803166835L;

			@Override
			public void call(Row row) throws Exception {
				System.out.println(row);
			}
		});
		
		jsc.close();
		ss.stop();
	}

}
