package com.youbu.demo;

import com.youbu.demo.model.UserIntegralRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.storage.StorageLevel;

import java.text.SimpleDateFormat;
import java.util.Date;


/**
 * 基于HDFS文件的，彩Spark SQL方式进行数据统计
 * @author sunfangwei
 **/
public class IntegralCountDemoThree {

	public static void main(String[] args) {
		String filePath = args[0];
		String saveFilePath = args[1];
		SparkConf config = new SparkConf();
		config.setAppName("IntegralCountSql");
		config.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
		Class[] cs={UserIntegralRecord.class};
		config.registerKryoClasses(cs);
		//创建Spark SQL Seeesion
		SparkSession sparkSession = SparkSession.builder().config(config).getOrCreate();
		//读取文件
		JavaRDD<String> input = sparkSession.read().textFile(filePath).javaRDD();
		//清洗，转换数据为RDD
		JavaRDD<UserIntegralRecord> countRdd = input
				.map(x -> {
					UserIntegralRecord uir = new UserIntegralRecord(x.split(",")[0], Integer.valueOf(x.split(",")[1]));
					return uir;
				}).persist(StorageLevel.MEMORY_AND_DISK_SER());
		//将RDD数据集转换为DataSet,并进行统计
		Dataset<Row> jdbcDF2 = sparkSession
				.createDataFrame(countRdd, UserIntegralRecord.class)
				.select("userId", "amount")
				.groupBy("userId").sum("amount")
				.withColumnRenamed("sum(amount)", "amount");
		//保存数据
		SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
		jdbcDF2.orderBy(jdbcDF2.col("amount").desc()).javaRDD().saveAsTextFile(saveFilePath + "/" + sdf.format(new Date()));
		//关闭session
		sparkSession.close();
	}
}
