package com.inspur.spark;
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.hadoop.io.IntWritable
import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.hadoop.io.Text

object wordcount {
  def main(args:Array[String]):Unit={
    val conf=new SparkConf()
    conf.setMaster("local")
    conf.setAppName("wordcount")
    val sc=new SparkContext(conf)
    val file="file:///d:/words.txt"
    val in_path="hdfs://namenode:8020/user/words1.txt"
    
    //val tesFile=sc.textFile(file)
   // val tesFile=sc.textFile(in_path)
   //val wordcount=tesFile.flatMap(line=>line.split(" ")).map(word=>(word,1)).reduceByKey((a,b)=>a+b)
    //wordcount.foreach(println)
     //wordcount.saveAsTextFile("hdfs://namenode:8020/user/wordcount01")//指定的文件不存在
    
     val array=Array(1,2,3,4,5)
    val rdd_array=sc.parallelize(array)
    val list=List(1,2,3,4,5)
    val rdd_list=sc.parallelize(list)
    val lines = sc.textFile("file:///d:/words.txt")
  }
  
}