package cn.xiaoniu.dmp.report

import java.util

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import utils.JedisPoolUtils

object Test01 {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("媒体分析")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    val file: RDD[String] = sc.textFile("D:\\app_dict.txt")
    val frame: DataFrame = sqlContext.read.parquet("D:/dmp/parquet")
    val ss = "704651\t绝代幽兰之倾君心\tA02\t\tcn.xs8.single.book12980\t关外凤凰林的一次偶遇，平北大将军沈逸云对那个飘逸的蒙面女子魂牵梦绕……\t\t\t"
    val value: RDD[String] = file.filter(_.split("\\\t",-1).length>=2)
    val strings: Array[String] = value.collect()
    println(strings.length)



//
//    val ssd: Array[String] = ss.split("\\\t+",-1)
//    println(ssd.length)
//
//    val value2: RDD[String] = file.filter(_.split("\\\t+",-1).length<2)
//    val strings2: Array[String] = value2.collect()
////    strings2.foreach(println)
//    println(strings2.length)


  }
}
