package com.sparksql;

import com.alibaba.fastjson.JSONObject;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.sources.In;
import org.junit.Test;

import java.io.Serializable;

public class EmployeeTest implements Serializable {
    public static void main(String[] args) {

        SparkSession spark = SparkSession.builder().appName("").master("local").getOrCreate();

        Dataset<Row> df = spark.read().json("data/employee.json");

        //(1)查询所有数据；
        df.show();
        //(2)查询所有数据，并去除重复的数据；
        //df.distinct().show();
        //(3)查询所有数据，打印时去除id字段；
        //df.select("age", "name").show();
        //(4)筛选出age>30的记录；
        //df.where("age > 30").show();
        //(5)将数据按age分组；
        //df.groupBy("age").count().show();
        //(6)将数据按name升序排列；
        //df.sort("name").show();
        //df.orderBy("name").show();
        //(7)取出前3行数据；
        //df.show(3);
        //df.limit(3).show();
        //(8)查询所有记录的name列，并为其取别名为username；
        //df.select(df.col("name").as("username")).show();
        //(9)查询年龄age的平均值；
        df.createOrReplaceTempView("emp");
        spark.sql("select avg(age) from emp").show();
        df.selectExpr("avg(age)").show();
        //(10)查询年龄age的最小值
        spark.sql("select min(age) from emp").show();
        df.selectExpr("min(age)").show();

    }

    @Test
    public void a() {
        System.out.println((int)'E');
        System.out.println('B');

    }
    

    @Test
    public void pv() {
        SparkSession spark = SparkSession.builder().config("spark.driver.allowMultipleContexts", "true")
                .appName("pv")
                .master("local").getOrCreate();
        //SparkConf conf = new SparkConf().setAppName("").setMaster("local");
        JavaSparkContext context = new JavaSparkContext(spark.sparkContext());
        JavaRDD<String> rdd = context.textFile("D://word/user_session.log");
        JavaRDD<PV> uid = rdd.map(t -> {
                JSONObject json = (JSONObject) JSONObject.parse(t);
            Integer uid1 = json.getInteger("uid");
            JSONObject odate = json.getJSONObject("odate");
            String date = odate.getString("year") + "-"
                    + odate.getString("monthValue") + "-"
                    + odate.getString("dayOfMonth");
            return new PV(date, uid1);
        });
        Dataset<Row> df = spark.createDataFrame(uid, PV.class);

        //计算总访问量
        //df.selectExpr("count(uid) as count").show();
//        df.selectExpr("count(distinct(uid)) as count").show();
//        df.show();

        //UV根据天来分组，统计一共有多少人。年－月－日。
//        Dataset<Row> count = df.distinct().groupBy("date").count();
//        count.show();
//        Column count1 = new Column("count");
//        count.sort(count1.desc()).show();

        //long count1 = df.select("uid").distinct().count();//1000
        //long count2 = df.distinct().count();//1815903


        //每个用户访问网址的次数
        df.groupBy("uid").count().show();

    }

    @Test
    public void provincePV() {
        SparkSession spark = SparkSession.builder().config("spark.driver.allowMultipleContexts", "true")
                .appName("provincePV")
                .master("local").getOrCreate();
        //SparkConf conf = new SparkConf().setAppName("").setMaster("local");
        JavaSparkContext context = new JavaSparkContext(spark.sparkContext());
        JavaRDD<String> rdd = context.textFile("D://word/user_session.log");
        JavaRDD<ProPV> uid = rdd.map(t -> {
            JSONObject json = (JSONObject) JSONObject.parse(t);
            String province = json.getString("province");
            return new ProPV(province);
        });
        Dataset<Row> df = spark.createDataFrame(uid, ProPV.class);
//        df.show();

        //统计每个省份的操作量
        df.groupBy("province").count().show();
    }


}
