package cn.lsh.spark.sql;

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;
import scala.reflect.ClassTag;
import scala.reflect.ManifestFactory;

import java.util.Arrays;
import java.util.List;
import java.util.Random;

/**
 * Copyright: Copyright (c) 2020 Asiainfo
 *
 * @ClassName: cn.lsh.spark.sql.UDFTest
 * @Description: 该类的功能描述
 * @version: v1.0.0
 * @author: 52879
 * @date: 2020/11/27 16:58
 * <p>
 * Modification History:
 * Date         Author          Version            Description
 * ------------------------------------------------------------
 * 2020/11/27      liush5          v1.0.0               修改原因
 */
public class UDFTest {

	public static void main(String[] args) throws AnalysisException {
		// test();
		// testUDTF();
		// testWindowFunc();
		testWindowFuncToHive();
	}

	public static void test() throws AnalysisException {
		SparkSession sparkSession = SparkSession.builder().master("local").appName("UDF test").getOrCreate();
		JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());
		JavaRDD<String> parallelize = jsc.parallelize(Arrays.asList("zhangsan", "lisi", "liudehua", "wangchaowei"));
		JavaRDD<Row> rowRdd = parallelize.map(RowFactory::create);
		List fields = Arrays.asList(DataTypes.createStructField("name", DataTypes.StringType, false));
		StructType structType = DataTypes.createStructType(fields);
		Dataset<Row> dataFrame = sparkSession.createDataFrame(rowRdd, structType);
		dataFrame.registerTempTable("users");
		// testUdf(sparkSession);
		// testUdaf(sparkSession);
		testGroupUdaf(sparkSession);
		sparkSession.close();
	}

	public static void testUdf(SparkSession sparkSession) {
		//这里不能使用StrLenUdf::new，因为其构建的是无参的UDF0接口的匿名内部类
		// sparkSession.udf().register("strLength", StrLenUdf::new, DataTypes.IntegerType);
		//第三个参数为返回值类型
		sparkSession.udf().register("strLength", new StrLenUdf(), DataTypes.IntegerType);
		// sparkSession.sql("select strLength('123')").show();
		sparkSession.sql("select name, strLen(name) as length from users").show();
	}

	public static void testUdaf(SparkSession sparkSession) {
		sparkSession.udf().register("countLength", new CountStrLen());
		sparkSession.sql("select countLength(name) as length from users").show();
		sparkSession.udf().register("avgLen", new AvgLen());
		sparkSession.sql("select avgLen(name) as avg_length from users").show();
	}

	public static void testGroupUdaf(SparkSession sparkSession) {
		JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());
		JavaRDD<String> lines = jsc.textFile("file:/bigdata/hadoop-test/input/spark/user.txt");
		JavaRDD<Row> rowRDD = lines.map(l -> {
			String[] arr = l.split(",");
			int age = 0;
			if (arr.length > 3) {
				age = Integer.parseInt(arr[3]);
			}
			return RowFactory.create(arr[0], arr[1], Integer.parseInt(arr[2]), age);
		});
		StructType structType = DataTypes.createStructType(Arrays.asList(
				DataTypes.createStructField("id", DataTypes.StringType, false),
				DataTypes.createStructField("name", DataTypes.StringType, false),
				DataTypes.createStructField("sex", DataTypes.IntegerType, false),
				DataTypes.createStructField("age", DataTypes.IntegerType, true)
		));
		Dataset<Row> dataFrame = sparkSession.createDataFrame(rowRDD, structType);
		dataFrame.registerTempTable("user_2");
		sparkSession.udf().register("countLength", new CountStrLen());
		sparkSession.sql("select sex, countLength(name) as length from user_2 group by sex").show();
	}

	public static void testUDTF() {
		//存在Spark sql在对hive 自定义udf 二次调用导致的异常的BUG
		SparkSession sparkSession = SparkSession.builder().enableHiveSupport()
				.master("local")
				.appName("test udtf")
				.config("hive.metastore.uris", "thrift://node00:9083")
				.getOrCreate();
		JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());
		JavaRDD<String> parallelize = jsc.parallelize(Arrays.asList("zhang san", "li si", "liu de hua", "wang chao wei"));
		JavaRDD<Row> rowRdd = parallelize.map(RowFactory::create);
		List fields = Arrays.asList(DataTypes.createStructField("name", DataTypes.StringType, false));
		StructType structType = DataTypes.createStructType(fields);
		Dataset<Row> dataFrame = sparkSession.createDataFrame(rowRdd, structType);
		dataFrame.registerTempTable("users");
		//通过执行sql的方式注册方法到hive
		sparkSession.sql("CREATE TEMPORARY FUNCTION UserDefinedUDTF as 'cn.lsh.spark.sql.UserDefinedUDTF'");
		sparkSession.sql("select UserDefinedUDTF(name) from users").show();
		sparkSession.stop();
	}

	/**
	 * hive的开窗函数row_number
	 * row_number() over (partition by xx order by xxx desc) as rank
	 */
	public static void testWindowFuncToHive() {
		SparkSession sparkSession = SparkSession.builder().enableHiveSupport()
				.master("local")
				.appName("test udtf")
				.config("hive.metastore.uris", "thrift://node00:9083")
				.getOrCreate();
		sparkSession.sql("use test");
		sparkSession.sql("create table if not exists sales (s_date int, type string, price int)" +
				" row format delimited fields terminated by ' '");
		sparkSession.sql("load data inpath '/test/input/spark/salves.txt' into table sales");
		Dataset<Row> result = sparkSession.sql("select s_date, type, price, row_number() over (partition by type order by price desc ) rank from sales");
		result.write().mode(SaveMode.Overwrite).saveAsTable("sale_result");
		sparkSession.sql("select s_date, type, price from sale_result where rank <= 3").show(100);
		sparkSession.close();
	}

	public static void testWindowFunc() {
		SparkSession sparkSession = SparkSession.builder().master("local").appName("test udtf").getOrCreate();
		Dataset<String> lines = sparkSession.read().textFile("file:/bigdata/hadoop-test/input/spark/salves.txt");
		Dataset<Row> dataFrame = lines.map(l -> RowFactory.create(Integer.parseInt(l.split(" ")[0]),
				l.split(" ")[0], Integer.parseInt(l.split(" ")[0])), new Encoder<Row>() {

			private static final long serialVersionUID = -2924852966459752422L;

			@Override
			public StructType schema() {
				return DataTypes.createStructType(Arrays.asList(
						DataTypes.createStructField("s_date", DataTypes.IntegerType, false),
						DataTypes.createStructField("type", DataTypes.StringType, false),
						DataTypes.createStructField("price", DataTypes.IntegerType, false)));
			}

			@Override
			public ClassTag<Row> clsTag() {
				return null;
			}
		});
		dataFrame.registerTempTable("salves");
		//spark本身不能执行窗口函数，只能依靠hive执行
		Dataset<Row> result = sparkSession.sql("select s_date, type, price, row_number() over (partition by type order by price desc ) rank from salves");
		result.show();

		sparkSession.close();
	}

	public static void testData() {
		char[] cs = new char[] { 'A', 'B', 'C', 'D', 'E', 'F' };
		Random random = new Random();
		for (int i = 0; i < 100; i++) {
			int r = random.nextInt(10) + 1;
			int s = random.nextInt(6);
			int p = random.nextInt(8) + 1;
			System.out.println(r + " " + cs[s] + " " + p * 100);
		}
	}

}

