package com.alan.test

import breeze.linalg.split
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.SparkContext._;
import scala.io.Source ;
import java.io.File ;


object WordCount {
  val hdfs_master = "hdfs://10.19.2.28:9000" ;
  val inputFile = "hdfs://10.19.2.28:9000/spark/alan/data/input001.txt" ;
  val outputFile = inputFile+".result" ;

  def wordCount(inputFile: String, sc: SparkContext): Unit = {
    println("Hello World, I am alan!")
    var file = sc.textFile(inputFile)
    val result = file.flatMap(line => {println("line is " + line);line.split(" ");}).map(word => (word, 1)).reduceByKey(_ + _)
    result.foreach(println)

    val output = new org.apache.hadoop.fs.Path(outputFile);
    val hdfs = org.apache.hadoop.fs.FileSystem.get(
      new java.net.URI(hdfs_master), new org.apache.hadoop.conf.Configuration())
    if (hdfs.exists(output)) hdfs.delete(output, true)

    result.saveAsTextFile(outputFile)
    file = sc.textFile(outputFile);
    file.foreach(println)

  }

  def main(args: Array[String]): Unit = {
    /*    if (args.length < 1) {
          System.err.println("usage:<file>");
          System.exit(1)
        }*/
    val conf = new SparkConf();
    conf.setAppName("wordcount");
    conf.setMaster("local");
    conf.setExecutorEnv("user","hadoopuser")
    val sc = new SparkContext(conf);

    //var inputFile = "file:///D:/temp/spark/input01.txt";
    //var inputFile = WordCount.getClass.getClassLoader.getResource("data/input01.txt").toString();

    System.out.println("inputFile:"+inputFile)
    wordCount(inputFile,sc);
    sc.stop()

  }
}
