package com.atguigu.util

import org.apache.spark.sql.SparkSession

/**
 * description ：hive 共用类
 * author      ：剧情再美终是戏 
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/3/6 15:00
 * modified By ：
 * version:    : 1.0
 */
object HiveUtil {

  /**
   * 开启压缩
   *
   * @Author 剧情再美终是戏
   * @Date 2020/3/6 15:02
   * @param spark
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   * @Version 1.0
   **/
  def openCompression(spark: SparkSession) = {
    spark.sql("set mapred.output.compress=true")
    spark.sql("set hive.exec.compress.output=true")
  }

  /**
   * 开启动态分区，非严格模式
   *
   * @Author 剧情再美终是戏
   * @Date 2020/3/6 15:02
   * @param spark
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   * @Version 1.0
   **/
  def openDynamicPartition(spark: SparkSession) = {
    spark.sql("set hive.exec.dynamic.partition=true")
    spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")
  }

  /**
   * 使用snappy压缩
   *
   * @Author 剧情再美终是戏
   * @Date 2020/3/8 15:02
   * @param spark
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   * @Version 1.0
   **/
  def useSnappyCompression(spark: SparkSession) = {
    spark.sql("set mapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.SnappyCodec");
    spark.sql("set mapreduce.output.fileoutputformat.compress=true")
    spark.sql("set mapreduce.output.fileoutputformat.compress.codec=org.apache.hadoop.io.compress.SnappyCodec")
  }
}
