package WordCount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.Map;


public class HDFSWCApp01 {

    public static void main(String[] args) throws Exception {

        FileSystem fs = FileSystem.get(new URI("hdfs://bigdata-pro-master:8020"),new Configuration());
        Path path = new Path("/wordCount_job/words.txt");
        RemoteIterator<LocatedFileStatus>iterator = fs.listFiles(path,false);

        BaseContext baseContext = new BaseContext();
        BaseMapper baseMapper = new WordCountMapper();

        while (iterator.hasNext()){
            LocatedFileStatus file = iterator.next();
            FSDataInputStream in = fs.open(file.getPath());
            // 装饰者模式
            BufferedReader reader = new BufferedReader(new InputStreamReader(in));
            String line = "";
            while ((line = reader.readLine()) != null){
               baseMapper.map(line,baseContext);
            }
            reader.close();
            in.close();
        }

        Path target = new Path("/wordCount_job/result/");
        FSDataOutputStream out = fs.create(new Path(target,new Path("wc.txt")));
        Map<String,Integer> contextMap = baseContext.getCacheMap();
        for (Map.Entry<String,Integer> entry : contextMap.entrySet()){
            out.write((entry.getKey() + "\t" + entry.getValue() + "\n").getBytes());
        }
        out.close();
        fs.close();

        System.out.println("执行完成。。。。");
    }
}
