package com.spark.sql

import java.text.SimpleDateFormat
import java.util
import java.util.Date

import org.apache.spark.graphx.Graph
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{LongType, StringType, StructField, StructType}


/**
  * Created by zhaochao on 2017/5/19.
  */
object sqlWordCount {

  def getHour(time: String) = {
    val date = new Date(Integer.valueOf(time) * 1000);
    val sf = new SimpleDateFormat("HH");
    sf.format(date)
  }


  def main(args: Array[String]): Unit = {


     //创建sparkSession
    val sparkSession = SparkSession.builder
      .config("spark.sql.warehouse.dir", "D:\\WorkSpace\\spark\\spark-learning\\spark-warehouse")
      .master("local")
      .appName("spark session example")
      .getOrCreate()


    //加载结构化数据
    //获取文件路径
    val path = "E:\\file\\website_formal.csv"
    //读取文件
    val df = sparkSession.read.option("header", "true").csv(path)
    //将加载的数据临时命名为log
    df.createOrReplaceTempView("news")


    df.show(5)

    val ipCountSQL = "select IR_HKEY,CONTENT ,DOCTITLE    from news "

    //查询独立IP总数

    val rddData = sparkSession.sql(ipCountSQL)

    var list=new util.ArrayList[String]

    rddData.foreach(x=>{
      val line=x.get(0)+"===="+x.get(1).toString.replaceAll("\\pP|\\pS", "").replaceAll("\n","")+"===="+x.get(2)
      println(line)
    })




  }

}
