package top.doe.spark_sql;

import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.serializer.KryoSerializer;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;

import javax.xml.ws.BindingType;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.CountDownLatch;

public class _03_Explain {

    public static void main(String[] args) throws IOException, InterruptedException, KeeperException {

        SparkConf conf = new SparkConf();

        SparkSession spark = SparkSession.builder()
                .master("local")
                .appName("datasource")
                .config(conf)
                .config("spark.sql.shuffle.partitions",2)
                .config("spark.serializer", KryoSerializer.class.getName())
                //.config("spark.sql.adaptive.enabled", "true")
                //.enableHiveSupport()  // 开启hive支持
                .getOrCreate();

        //-------------HIVE表读写-----------------------------------------------------
        Dataset<Row> ds7 = spark.read().json("sql_data/datasource/order.data");
        //ds7.show();

        /*
        +---+-------+----+---+
        | id|    mth|shop|amt|
        +---+-------+----+---+
        |  1|2024-01|   a|100|
        |  2|2024-01|   a|200|
        |  3|2024-02|   a|100|
        |  4|2024-02|   a|200|
        |  5|2024-02|   a|300|
        |  6|2024-03|   a|100|
        |  7|2024-03|   a|400|
        |  8|2024-01|   b|100|
        |  9|2024-01|   b|300|
        | 10|2024-01|   b|200|
        | 11|2024-02|   b|400|
        | 12|2024-02|   b|200|
        | 13|2024-02|   b|200|
        | 14|2024-03|   b|200|
        | 15|2024-03|   b|500|
        +---+-------+----+---+
         */
        // 随手练习sql
        // 求每个店铺的每个月份的总销售额，及累计到当月的累计总额
        // TODO
        ds7.createOrReplaceTempView("ds7");
//        Dataset<Row> accu =
//                spark.sql("select\n" +
//                        "    shop,\n" +
//                        "    mth,\n" +
//                        "    sum(amt) as amt, \n" +
//                        "    sum(sum(amt)) over(partition by shop order by mth) as accu_amt\n" +
//                        "from ds7\n" +
//                        "group by shop,mth");
//        accu.explain();

        System.out.println("-----------------------------------------------------------");
//        spark.sql("with tmp as (\n" +
//                "    select\n" +
//                "        shop,\n" +
//                "        mth,\n" +
//                "        sum(amt) as amt \n" +
//                "    from ds7\n" +
//                "    group by shop,mth\n" +
//                ")\n" +
//                "select\n" +
//                "    shop,\n" +
//                "    mth,\n" +
//                "    amt,\n" +
//                "    sum(amt) over(partition by shop order by mth) as accu_amt\n" +
//                "from tmp\n").explain("formatted");

        System.out.println("-----------------------------------------------------------");
//        spark.sql(
//                "select\n" +
//                        "    id,\n" +
//                        "    mth,\n" +
//                        "    shop,\n" +
//                        "    amt,\n" +
//                        "    row_number() over(partition by shop order by amt desc) as rn ,\n" +
//                        "    sum(amt) over(partition by shop order by id desc)  as accu\n" +
//                        "from ds7"
//        )
//                .explain("extended");


        System.out.println("-----------------------------------------------------------");
        Dataset<Row> csv = spark.read().option("inferSchema","true").option("header","true").csv("sql_data/datasource/a.csv");
        csv.printSchema();
        csv.createOrReplaceTempView("user");


        SparkContext sc = spark.sparkContext();
        JavaSparkContext jsc = new JavaSparkContext(sc);
        JavaRDD<Bean> rdd = jsc.parallelize(Arrays.asList(new Bean("aaa",18), new Bean("bbb",19)));
        Dataset<Bean> dataset = spark.createDataset(rdd.rdd(), Encoders.bean(Bean.class));
        dataset.printSchema();
        dataset.show();


        spark.sql("select a.*,b.* from ds7 a join user b on a.uid=b.id").show();


    }

    @Data
    @NoArgsConstructor
    @AllArgsConstructor
    public static class Bean {
        private String name;
        private int age;
    }

}
