package org.shj.spark.streaming;

import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;

/**
 * 演示把Streaming RDD转化成基于Sql 的RDD
 * 
 * @author Administrator
 *
 */
public class Stream2Sql {

	public static void main(String[] args) throws Exception{
		SparkConf conf = new SparkConf().setAppName("Stream2Sql");
		conf.setMaster("local[*]");
		
		final JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(10));
		jsc.sparkContext().setLogLevel("WARN");
		
		Queue<JavaRDD<String>> queue = new LinkedList<JavaRDD<String>>();
		prepareTestData(queue, jsc.sparkContext());
		
		JavaDStream<String> input = jsc.queueStream(queue);
		
		//此例中对Item的总数的合计也放到了sql中
		JavaDStream<Row> map = input.map(new Function<String, Row>() {
			private static final long serialVersionUID = 6044388961034156755L;

			@Override
			public Row call(String v1) throws Exception {
				String[] arr = v1.split("\\s+");
				return RowFactory.create(arr[0], arr[1], Integer.parseInt(arr[2]));
			}
		});
		
		map.foreachRDD(new VoidFunction<JavaRDD<Row>>() {
			private static final long serialVersionUID = 7011268667652142162L;

			@Override
			public void call(JavaRDD<Row> rdd) throws Exception {
				if(rdd.isEmpty()) {
					return;
				}
				List<StructField> list = Arrays.asList(
						DataTypes.createStructField("Category", DataTypes.StringType, true),
						DataTypes.createStructField("Item", DataTypes.StringType, true),
						DataTypes.createStructField("SaleNum", DataTypes.IntegerType, true));
				StructType schema = DataTypes.createStructType(list);
				
				SparkSession spark = SparkSession.builder()
						.config(rdd.context().getConf())
						.enableHiveSupport()
						.getOrCreate();
				Dataset<Row> df = spark.createDataFrame(rdd, schema);
				
				df.sqlContext().registerDataFrameAsTable(df, "tmptbl");
				String sql = "select Category, Item, total"
						+ "   from (select Category, Item, total, "
						+ "               row_number() over (partition by Category order by total desc) rank "
						+ "         from (select Category, Item, sum(SaleNum) total from tmptbl "
						+ "              group by Category, Item) "
						+ "        ) tmp "
						+ "   where rank <=2";
				df.sqlContext().sql(sql).show();
			}
		});
		
		jsc.start();
		jsc.awaitTermination();
		jsc.close();
	}

	private static void prepareTestData(Queue<JavaRDD<String>> queue, JavaSparkContext sc) {
		List<String> list = Arrays.asList("pc ASUS 50",
				"pc dell 90",
				"pc Lenove 88",
				"pc apple 95",
				"pc ASUS 51",
				"mobile HUAWEI 60",
				"mobile MI 70",
				"mobile apple 80",
				"mobile meizu 90",
				"mobile HUAWEI 50",
				"jiadian haier 89",
				"jiadian media 99",
				"jiadian ximenzi 70",
				"jiadian TCL 80",
				"xifashui bawang 90",
				"xifashui xiashiliang 100",
				"mianmo lanzi 60",
				"mianmo lankou 90",
				"mianmo yashilandai 80");
		queue.add(sc.parallelize(list, 2));
	}
}
