package com.shujia.bigdata.word;

import java.io.*;
import java.util.HashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

public class Map {
    public static void main(String[] args) {


        /**
         * 线程池
         *  可以启动多个线程
         *
         */

        //创建线程池
        ExecutorService threadPool = Executors.newFixedThreadPool(4);


        /**
         * 通过多行程处理多个文件
         *
         */


        File pfile = new File("java/data/word/");
        File[] files = pfile.listFiles();

        if (files == null) {
            return;
        }

        for (int i = 0; i < files.length; i++) {
            Task task = new Task(files[i], i, files.length);


            //通过线程池启动线程
            threadPool.submit(task);
        }


        //关闭线程池
        threadPool.shutdown();

    }
}

class Task extends Thread {

    private File file;
    //reducetask的数量
    private Integer numPartition;
    private Integer taskId;

    public Task(File file, int taskId, int numPartition) {
        this.file = file;
        this.numPartition = numPartition;
        this.taskId = taskId;
    }

    @Override
    public void run() {
        System.out.println(Thread.currentThread().getName());


        try {
            /**
             * 统计每个文件每个班级的人数
             *
             */
            FileReader fileReader = new FileReader(file);
            BufferedReader bufferedReader = new BufferedReader(fileReader);

            HashMap<String, Integer> map = new HashMap<>();
            String line;

            while ((line = bufferedReader.readLine()) != null) {

                String[] split = line.split(",");

                for (String word : split) {

                    Integer integer = map.get(word);
                    if (integer == null) {
                        map.put(word, 1);
                    } else {
                        map.put(word, integer + 1);
                    }
                }
            }


            /**
             *
             * 生成多个文件
             *  每一个maptask都会为每一个reducetask生成一个文件
             *
             */

            /**
             * 1、创建所有文件的输出流对象
             *
             */


            HashMap<Integer, BufferedWriter> hashMap = new HashMap<>();


            for (int i = 0; i < numPartition; i++) {

                String path = "java/data/word_tmp2/" + taskId + "-" + i;

                FileWriter fileWriter = new FileWriter(path);

                BufferedWriter bufferedWriter = new BufferedWriter(fileWriter);

                hashMap.put(i, bufferedWriter);
            }


            for (String word : map.keySet()) {
                Integer count = map.get(word);
                String kv = word + "," + count;
                int partition = Math.abs(word.hashCode()) % numPartition;

                //通过分区编号获取对应的输出流
                BufferedWriter bufferedWriter = hashMap.get(partition);
                bufferedWriter.write(kv);
                bufferedWriter.newLine();

            }

            //关闭所有输出流
            for (BufferedWriter bufferedWriter : hashMap.values()) {
                bufferedWriter.close();
            }


            bufferedReader.close();

            System.out.println("=====");


        } catch (Exception e) {
            e.printStackTrace();
        }


    }
}
