package com.bigdata.spark.flume

import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.types.{ArrayType, IntegerType, StringType, StructType}
import org.apache.spark.sql.functions.{col, column, expr}
/**
 * @author Gerry chan
 * @version 1.0
 * spark-sql 分析nginx日志：https://www.imooc.com/video/19727
 * 使用spark-sql 解析数据并按天以parquet格式存储
 */
object SparkNginx {
  def main(args: Array[String]): Unit = {
    val sparkSession = SparkSession.builder()
      .getOrCreate()

    //通过自定义schema 实现RDD转换为DataFrame
    //从json格式可以看出，user也是StructType格式
    val schema = new StructType().add(
      "user",
      new StructType()
        .add("uid", StringType)
        .add("name", StringType)
        .add("province", StringType)
        .add("city", StringType)
        .add("city_level", IntegerType)
        .add("op_phone", ArrayType(StringType))
        .add("add_time", StringType)
        .add("login_source", StringType)
        .add("total_mark", IntegerType)
        .add("used_mark", IntegerType)
    )
    val df = sparkSession
      .read
      .schema(schema)
      .json("hdfs://ns/nginx-ori/2019/07/02/*")

    //查询dataframe字段
    df.select(
      col("user").getField("uid").as("uid"),
      col("user").getField("name").as("name"),
      col("user").getField("province").as("province"),
      col("user").getField("city").as("city"),
      col("user").getField("city_level").as("city_level"),
      col("user").getField("op_phone").as("op_phone"),
      col("user").getField("add_time").as("add_time"),
      col("user").getField("login_source").as("login_source"),
      col("user").getField("total_mark").as("total_mark"),
      col("user").getField("used_mark").as("used_mark")
    ).write
      .mode(SaveMode.Overwrite)
      .save("hdfs://ns/nginx-tmp"+System.currentTimeMillis()+".parquet")

    sparkSession.stop();

  }

}
