package core

import com.kennycason.kumo.WordFrequency
import org.apache.spark.SparkContext
import utils.WordCloudUtil

import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._

object FoodsCount {

  def main(args: Array[String]): Unit = {

    val spark = new SparkContext("local",
      "food")

    // 读取数据，word就是刚创建的文件
    val data = spark.textFile("data/A.csv")

    data.take(10).foreach(println(_))

    val foodTypes = data.map(
      line => {
        val arr = line.split(",")
        (arr(3),1)
      }
    )

    val result = foodTypes.countByKey()
    result.foreach(println(_))

    // 创建词云数据
    var wordFrequencies = ArrayBuffer[WordFrequency]()
    // 遍历result，把对应数据存入词云数据中
    result.foreach{
      case(str,num) => {
        wordFrequencies = wordFrequencies :+ (new WordFrequency(str,num.toInt))
      }
    }
    // 开始调用，注意提前导入import scala.collection.JavaConverters._
    // 因为要使用asJava方法把scala对象转换为java对象
    WordCloudUtil.createWordCloud(wordFrequencies.asJava)
  }

}
