package com.sparksql;

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.junit.Test;

import java.util.*;

import static org.apache.hadoop.hdfs.server.namenode.ListPathsServlet.df;

public class TestSparkSQL {

    @Test
    public void testSparkSession() {
        SparkSession spark = SparkSession.builder().appName("testSparkSession").
                master("local").getOrCreate();

        //获取数据创建
        Dataset<Row> df = spark.read().json("D:/word/person.json");
        df.show();
        //df.printSchema();

        //df.select("age", "name").show();
        //1.DSL操作风格
        Column age = new Column("age");
        Column name = new Column("name");
        Dataset<Row> df2 = df.select(name, age.plus(1));
        //df2.show();

        //df.filter(age.gt(21)).show();//过滤出年龄大于21的
        //df.filter(age.isNotNull()).show();//过滤空值

        //df.groupBy("age", "name").count().show();//按 age和name进行分组统计

        //df.show(2);

        Column count = new Column("count");
//        Column age = new Column("age");
        Dataset<Row> ranking = df.groupBy("name", "age").count().orderBy(count.desc(), age.desc());
//        List<Row> rows = ranking.takeAsList(3);
//        for (Row t: rows) {
//            //取值方式一
//            //System.out.println(t.get(0));
//            //取值方式二
//            System.out.println(t.getAs("name").toString());
//            System.out.println(t.fieldIndex("name"));
//        }

//        Dataset<Row> limit = ranking.limit(3);
//        limit.foreach(t -> {
//            System.out.println(t.getString(0));
////            System.out.println(t.getLong(1));
//        });

//        List<Row> rows = ranking.collectAsList();
//        for (Row row: rows) {
//            System.out.println(row.get(0));
//        }

        //df.where("id > 4 and age = 30").show();
        //df.where(age.isNotNull().and(age.gt(29))).show();
        ranking.sort("count").show();
        ranking.sort(count.desc()).show();
    }

    @Test
    public void testSparkSQL() throws AnalysisException {
        SparkSession spark = SparkSession.builder().appName("testSparkSession").
                master("local").getOrCreate();

        //获取数据创建
        Dataset<Row> df = spark.read().json("D:/word/person.json");
        df.show();

        //1.SQL 操作风格
        //使用SQL风格操作前提需要将DataFrame注册成一个临时表。
        df.registerTempTable("t_person");
        //spark.sql("select * from t_person where id > 5").show();

        df.createOrReplaceTempView("t_person2");
        //spark.sql("select * from t_person2 where age is not null").show();

        //DESC 降序，  ASC 升序
        //spark.sql("select name, age, count(*) as count from t_person group by name, age order by age").show();

        //创建全局的表
        df.createGlobalTempView("person");
        spark.newSession().sql("select * from global_temp.person").show();
    }


    @Test
    public void testDataSet() {
        SparkSession spark = SparkSession.builder().appName("test").master("local").getOrCreate();
//        Person zs = new Person(1, 23, "zs");
        ArrayList<Person> list = new ArrayList<>();
        list.add(new Person(1, 23, "zs"));
        list.add(new Person(2, 13, "az"));
        list.add(new Person(3, 33, "lisi"));
        list.add(new Person(1, 33, "zs"));
        list.add(new Person(2, 24, "az"));
        list.add(new Person(3, 25, "lisi"));

        Dataset<Person> dataset = spark.createDataset(list, Encoders.bean(Person.class));
        dataset.show();
//        dataset.printSchema();//打印表结构
        //dataset.select("id", "name").show();
        dataset.groupBy("id").max("age").show();

    }

    @Test
    public void testagg() {
        SparkSession spark = SparkSession.builder().appName("testSparkSession").
                master("local").getOrCreate();

        spark.udf().register("myavg", new MyAverage());
        Dataset<Row> df = spark.read().json("data/1.json");
        df.show();
        df.createOrReplaceTempView("emp");
        spark.sql("select myavg(salary) as avg_sal from emp").show();
    }

    @Test
    public void testJoin() {
        SparkSession spark = SparkSession.builder().config("spark.sql.crossJoin.enabled", "true")
                .appName("join").master("local").getOrCreate();

        ArrayList<Person> list1 = new ArrayList<>();
        list1.add(new Person(1, 13, "zs", 23));
        list1.add(new Person(2, 23, "az", 45));
        list1.add(new Person(3, 63, "lisi", 78));

        ArrayList<Person> list2 = new ArrayList<>();
        list2.add(new Person(1, 13, "zs", 43));
        list2.add(new Person(2, 23, "az",15));
        list2.add(new Person(3, 63, "lisi", 42));

        Dataset<Row> df1 = spark.createDataFrame(list1, Person.class);
        Dataset<Row> df2 = spark.createDataFrame(list2, Person.class);

        df1.createOrReplaceTempView("df1");
        df2.createOrReplaceTempView("df2");

        Dataset<Row> join = df1.join(df2).where(df1.col("id").$eq$eq$eq(df2.col("id")));
        join.select(df1.col("id"), df1.col("name"), df1.col("age"),
                df1.col("count").as("view"), df2.col("count").as("cart")).show();
    }

    @Test
    public void test() {
        SparkSession spark = SparkSession.builder().appName("test").master("local").getOrCreate();
        ArrayList<Person> list = new ArrayList<>();
        list.add(new Person(1, 23, "zs"));
        list.add(new Person(2, 13, "az"));
        list.add(new Person(3, 33, "lisi"));

        //生成RDD的方式一：
        JavaSparkContext context = new JavaSparkContext(spark.sparkContext());
        JavaRDD<Person> rdd = context.parallelize(list);
        rdd.foreach(t -> System.out.println(t));

        //方式二
//        JavaRDD<Row> rdd1 = spark.read().json("D:/word/person.json").javaRDD();
//        rdd1.foreach(t -> System.out.println(t));
//
        //RDD转换成DataFrame
        Dataset<Row> df1 = spark.createDataFrame(rdd, Person.class);
        df1.show();
        //RDD转换成DataSet
        Dataset<Person> ds1 = spark.createDataset(rdd.rdd(), Encoders.bean(Person.class));
        ds1.show();

        //DF转RDD
        JavaRDD<Row> rdd1 = df1.toJavaRDD();
        rdd1.foreach(t -> System.out.println(t));
        //DF转DS
        Dataset<Person> ds2 = df1.as(Encoders.bean(Person.class));
        ds2.show();

        //DS转RDD
        JavaRDD<Person> rdd2 = ds1.toJavaRDD();
        rdd2.foreach(t -> System.out.println(t));
        //DS转DF
        Dataset<Row> df2 = ds1.toDF();
        df2.show();

//
//        Dataset<Row> df = spark.read().json("D:/word/person.json");
////        System.out.println(json);
//
//        //从RDD转换成DataSet
//        //把JavaRDD转换成RDD
//        Dataset<Person> dataset = spark.createDataset(rdd.rdd(), Encoders.bean(Person.class));
//        JavaRDD<Row> rdd2 = dataFrame.toJavaRDD();
//        rdd2.foreach(t -> System.out.println(t));
//
//        //把DF转换成DS
//        Dataset<Row> df2 = spark.read().json("data/1.json");
//        Dataset<Person> ds = df2.as(Encoders.bean(Person.class));
//        ds.show();
//
//        Dataset<Person> map = df2.map(new MapFunction<Row, Person>() {
//            @Override
//            public Person call(Row row) throws Exception {
//                return new Person(row.getAs("id"), row.getAs("name"),
//                        row.getAs("age"), row.getAs("count"), row.getAs("salary"));
//
//            }
//        }, Encoders.bean(Person.class));
//
//        //DS转成DF
//        Dataset<Row> df1 = ds.toDF();



    }

    @Test
    public void testWeather() {
        SparkSession spark = SparkSession.builder().appName("weather").master("local").getOrCreate();

        Dataset<Row> load = spark.read().format("jdbc")
                .option("url", "jdbc:mysql://localhost:3306/weather")
                .option("driver", "com.mysql.jdbc.Driver")
                .option("user", "root").option("password", "qaz3357375")
                .option("dbtable", "data_table").load();
//        load.show();
//        load.select("time", "place", "low_temperature", "high_temperature", "state", "air_quality").show();

        Properties properties = new Properties();
        properties.setProperty("driver", "com.mysql.jdbc.Driver");
        properties.setProperty("user", "root");
        properties.setProperty("password", "qaz3357375");
        Dataset<Row> data_table = spark.read().jdbc("jdbc:mysql://localhost:3306/weather", "data_table", properties);
        data_table.show();
    }

    @Test
    public void addData() {
        SparkSession spark = SparkSession.builder().appName("add").master("local").getOrCreate();
        Depart depart = new Depart(13, "消防部", "dedif", "2001-10-11", 0);
        JavaSparkContext context = new JavaSparkContext(spark.sparkContext());

        Dataset<Row> df = spark.createDataFrame(Collections.singletonList(depart), Depart.class);
//        df.write().mode(SaveMode.Append).format("jdbc")
//                .option("url", "jdbc:mysql://localhost:3306/oadb")
//                .option("driver", "com.mysql.jdbc.Driver")
//                .option("user", "root").option("password", "qaz3357375")
//                .option("dbtable", "depart").save();

        Properties properties = new Properties();
        properties.setProperty("driver", "com.mysql.jdbc.Driver");
        properties.setProperty("user", "root");
        properties.setProperty("password", "qaz3357375");
        df.write().mode(SaveMode.Append).jdbc("jdbc:mysql://localhost:3306/oadb", "depart", properties);

    }
}
