package com.qing.spark

import java.io.{File, PrintWriter}

import com.qing.spark.dao._

import scala.collection.mutable._
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer
import scala.util.Random

/**
  * Created by wuliao on 2018/3/12.
  */
object Test {
  def main(args: Array[String]): Unit = {


    //    val files = new File("/mnt/disk/sflow").listFiles()
    //
    //
    //    for (file <- files) {val path = file.getAbsolutePath; var str = Source.fromFile(path).getLines().next(); val writer = new PrintWriter(new File("/mnt/disk/sflow1/"+file.getName)); str = str.replaceAll("\"}", "\"}\n"); str = str.substring(0, str.length - 1); writer.write(str); writer.close();}
    //      val file = Source.fromFile("/Users/wuliao/Documents/sflow_2018-03-08.log")
    //      print(file.getLines().next())

    //    val conf = new SparkConf()
    //      .setAppName("LoggerStreaming")
    //      .setJars(Array("file:///mnt/disk/jar/streaming-logger-0.0.1-jar-with-dependencies.jar"))
    //      .setMaster("local[6,10000]")
    //      .setMaster("spark://0.0.0.0:7077")
    //      .setMaster("spark://175.102.18.112:7077")


    //    val hbaseConf = HBaseConfiguration.create()
    ////    hbaseConf.set("hbase.master", "impala01:60000")
    //    hbaseConf.set("hbase.zookeeper.quorum", "impala02,impala03,impala04")
    //    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    //
    //    val jobConf = new JobConf(hbaseConf)
    //    jobConf.setOutputFormat(classOf[TableOutputFormat])
    //    jobConf.set(TableOutputFormat.OUTPUT_TABLE, "logger")
    //
    //
    //    val sc = new SparkContext(conf)
    //
    //    val list = List(("mobin", 22), ("kpop", 20), ("lufei", 23))
    //    sc.parallelize(list)
    //      .map(s => {
    //        val put = new Put(("asd" + new Random().nextInt(10)).getBytes)
    //        put.addColumn("data".getBytes, "key".getBytes, "value".getBytes)
    //        (new ImmutableBytesWritable, put)
    //      }).saveAsHadoopDataset(jobConf)

    //    val sc = new SparkContext(conf)
    //
    //    val list = List(("mobin", 22), ("kpop", 20), ("lufei", 23))
    //    sc.parallelize(list)
    //      .foreachPartition(s => {
    //        val table = HBaseUtils.getInstance.getTable("logger")
    //        val put = new Put(("asd" + new Random().nextInt(10)).getBytes)
    //        put.addColumn("data".getBytes, "key".getBytes, "value".getBytes)
    //        table.put(put)
    //      })


    val buffer = new ListBuffer[DayCount]
    buffer += new DayCount(1, 2, 3)
    buffer += new DayCount(1, 2, 4)
    buffer += new DayCount(2, 2, 4)

//    DayCountDao.save(buffer)

  }


}
