package com.xf.day04

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCount2 {
  def main(args: Array[String]): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)
    val rdda = sc.parallelize(List(1,3,3,4,4,5))
    val rddb = sc.parallelize(List(2,3,4,3,6))
    val result = rdda.intersection(rddb)
    val ints = result.collect().toBuffer
    println(ints)

//    // 读取本地文件
//    val path = "D:\\tmp\\wd.txt"
//    // 读取HDFS文件并创建RDD
//    // val path = "hdfs://master:9000/tmp/wd.txt"
//    // 创建一个RDD, 名字是 linesRDD
//    val linesRDD : RDD[String] = sc.textFile(path)
//
//    val value = linesRDD.filter(line => line.contains("hadoop"))
//
//    val cnt = value.count()
//
//    println(cnt)

    // 暂停 10 秒，方便查看 UI
    Thread.sleep(10000000)
  }
}
