package modular.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.log4j.BasicConfigurator;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;

public class HdfsTest {


    public static void main(String[] args) throws Exception {
        BasicConfigurator.configure();

        FileSystem fs = FileSystem.get(new URI("hdfs://hdmaster:9000"), new Configuration(), "root");
        RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(new Path("/yue/"), false);

        ContextMe contextMe = new ContextMe();
        Mapper mapper = new WordCountMapper();

        /*处理数据*/
        while (iterator.hasNext()) {
            LocatedFileStatus next = iterator.next();
            FSDataInputStream in = fs.open(next.getPath());
            BufferedReader br = new BufferedReader(new InputStreamReader(in));
            String line = null;
            while ((line = br.readLine()) != null) {
                mapper.map(line, contextMe);
            }

            br.close();
            in.close();
        }


        HashMap<Object, Object> contextMap = contextMe.getContextMap();
        Path outPath = new Path("/yue");
        if (!fs.exists(outPath)) {
            fs.mkdirs(outPath);
        }
        FSDataOutputStream out = fs.create(new Path("/yue/out1.dat"));
        Set<Map.Entry<Object, Object>> entries = contextMap.entrySet();
        for (Map.Entry entry : entries) {
            out.write((entry.getKey().toString() + "\t" + entry.getValue() + "\n").getBytes());
        }

        out.close();
        fs.close();

        System.out.println("数据统计完成");


    }
}
