package com.hdaccp.log

import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{LongType, StringType, StructField, StructType}

/**
 * 访问日志转换(输入==>输出)工具类
 */
object AccessConvertUtil {
  //定义的输出的字段
  val struct = StructType(
    Array(
      StructField("url",StringType),
      StructField("ip",StringType),
      StructField("time",StringType),
      StructField("cmsType",StringType),
      StructField("cmsId",StringType),
      StructField("day",StringType)
    )
  )

  /**
   * 根据输入的每一行信息转换成输出的样式
   * @param log  输入的每一行记录信息
    *
    *
    * 143.124.29.30	2018-08-15 15:43:01	"GET learn/821 HTTP/1.0"	-	404
    *
    *100.167.30.132	2018-08-15 15:43:01	"GET class/145.html HTTP/1.0"	https://www.baidu.com/s?wd=Hadoop基础	302
    *
    * 29.156.167.10	2018-08-15 15:43:01	"GET course/list HTTP/1.0"	-	302
   */
  def parseLog(log:String) = {

    try{
      val splits = log.split("\t")

      val url = splits(2)
      val ip = splits(0)

      val time = splits(1)
      val day = time.substring(0,10).replaceAll("-","")

      val types  = url.split(" ")

      val a = types(1)
      val aa = a.split("/")
      val cmsType = aa(0)
      val a1 = aa(1)
      var cmsId = ""
      if(a1.indexOf(".")== -1){
        cmsId = a1
      }
      else{
        cmsId = a1.substring(0,a1.indexOf("."))
      }


     // val cmsId = a1.split(".")(0)

      //这个row里面的字段要和struct中的字段对应上
      Row(url,  ip, time,cmsType,cmsId,day)
    } catch {
      case e:Exception => Row(0)
    }
  }
}
