package newstage

import java.text.SimpleDateFormat

import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql._
/**
  * Created by Alex on 2016/12/17.
  */
object Driver {
  val path = "D://userData/c%d/*"
  val fields = (1 to 9).map("col"+_).reduce(_+" "+ _)  //fields="col1 col2 col3 col4 col5 col6 col7 col8 col9"
  println(fields)
  def formatPath( clzz:Int):Tuple2[String,Int] =  {
    (path.format(clzz),clzz)
  }
  def main(args:Array[String]): Unit ={
//    val conf = new SparkConf().setAppName("WordCount").setMaster("local[*]")
//    val sc = new SparkContext(conf)
//    val sdf = new SimpleDateFormat("yyyyMMdd")
//    val rows = (1 to 15).map(formatPath).map(x=>{
//      sc.textFile(x._1).map(y=>(y,x._2))
//    }).reduce((x,y)=>x.union(y))
//      .map(line => {
//        val tag = line._2
//        val p = line._1.split("\t")
//        try {
//          Some(Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), sdf.parse(p(9)), sdf.parse(p(10))))
//        } catch {
//          case e: Exception => None
//        }
//      }).filter(x=> !x.isEmpty).map(x=>x.get)
//    val sqlContext = new SQLContext(sc)
//
//    val schema = sqlContext.applySchema(
//      rows,StructType(fields.split(" ").map(x=>StructField(x,StringType,true))))
//    schema.registerTempTable("dataset")
//    sqlContext.sql("select col6 as WeAcc,count(*) as Cnt from dataset group by col6 order by Cnt desc ")
//        .take(5).foreach(println)
//
//
//    val tokenizer = new Tokenizer().setInputCol("col6").setOutputCol("words")
//    val wordsData = tokenizer.transform(schema)
//    wordsData.take(10).foreach(println)
//    val tf = new HashingTF().transform(wordsData)
//    val idf = new IDF().fit(tf)
//    val tfidf = idf.transform(tf)
//
//    tfidf.take(20).foreach(println)
//
//

  }
}
