package com.test;

import java.io.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;

/**
 * 使用bufferedReader,bufferedWriter，效率明显更高
 */
public class CalculateURL {
    public static void main(String[] args) {
//        calculateByThreads(); //2G数据，测试calculate finish,耗时：302秒
//        verifyResult("http://l41.baidu.com");
    }

    public static void calculateByThreads() {
        long beginTime = System.currentTimeMillis();
        try {
            ConcurrentHashMap<String, Long> topHash = new ConcurrentHashMap<>(); //临时存储

            System.out.println("切分文件开始...");
            List<String> files = splitFile("urls.txt"); //切分文件，采用总文件大小/16M个文件，将总文件的内容取模对应小文件，应该保证单个文件在16M左右，这里并未保证,完成之后少了一个直接切分过程
            System.out.println("切分文件结束");

            List<List<String>> result = splitList(files, 32); //将文件集合分成32个一组，32*16M=512M，暂时使用512M

            for (List<String> calPerTimeList : result) {
                ConcurrentHashMap<String, Long> onePartHashMap = new ConcurrentHashMap<>(); //一次最多处理32个线程临时结果

                CountDownLatch cdl = new CountDownLatch(calPerTimeList.size());
                for (String calOneFile : calPerTimeList) {
                    new Thread(() -> {
                        try {
                            calculateAndPutHash(onePartHashMap, calOneFile);
                        } finally {
                            cdl.countDown();
                        }
                    }).start();
                }
                cdl.await();

                //计算最多32个线程的临时前100名结果
                List<URLStatistics> urlStatistics = getTop(onePartHashMap, 100);
                listToHash(urlStatistics, topHash);//将临时结果保存
            }
            //计算最终结果
            List<URLStatistics> urlStatistics = getTop(topHash, 100);
            urlStatistics.forEach(e -> System.out.println(e.getUrl() + ":" + e.getNumbers()));
        } catch (Exception e) {
            throw new RuntimeException("calculateByThreads err", e);
        }

        long endTime = System.currentTimeMillis();
        System.out.println("calculate finish,耗时：" + ((endTime - beginTime) / 1000) + "秒");
    }

    public static void calculateAndPutHash(ConcurrentHashMap<String, Long> tempHash, String fileName) {
        HashMap<String, Long> storage = new HashMap<>();
        //将每个部分文件前100放入临时hash
        System.out.println(fileName + "文件计算开始...");
        calculateFilePart(storage, fileName);//将读取的url存入hashmap
        System.out.println(fileName + "文件计算结束");
        System.out.println("获取前100个：" + fileName);
        List<URLStatistics> topList = getTop(storage, 100); //获取文件部分的前100
        listToHash(topList, tempHash); //将临时结果保存
    }

    public static void listToHash(List<URLStatistics> topList, ConcurrentHashMap<String, Long> map) {
        topList.stream().forEach(s -> {
            Long numbers = map.get(s.getUrl());
            map.put(s.getUrl(), numbers == null ? s.getNumbers() : numbers + s.getNumbers());
        });
    }

    public static void calculate() {
        System.out.println("切分文件开如...");
        List<String> files = splitFile("urls.txt");
        System.out.println("切分文件结束");

        ConcurrentHashMap<String, Long> topTempHash = new ConcurrentHashMap<>();
        for (String file : files) {
            calculateAndPutHash(topTempHash, file);
        }

        System.out.println("计算最后结果");
        //计算最后结果
        List<URLStatistics> urlStatistics = getTop(topTempHash, 100);
        urlStatistics.forEach(u -> System.out.println(u.getUrl() + " " + u.getNumbers())); //打印结果
    }

    //将小文件读取出来，直接放入hashmap.
    public static void calculateFilePart(HashMap<String, Long> storage, String file) {
        try (BufferedReader br = new BufferedReader(new FileReader(new File(file)))) {
            String line;
            while ((line = br.readLine()) != null) {
                calculateNumbers(storage, line);
            }
        } catch (Exception e) {
            throw new RuntimeException("calculateFilePart err", e);
        }
    }

    //统计字符串出现的个数
    public static HashMap<String, Long> calculateNumbers(HashMap<String, Long> storage, String str) {
        Long numbers = storage.get(str);
        storage.put(str, numbers == null ? 1 : numbers + 1);
        return storage;
    }

    //获取hashmap中排名前numbers的url,思路为：遍历hashmap,将前numbers放入priorityQueue,再将priorityQueue中的数据反向转成list
    public static List<URLStatistics> getTop(Map<String, Long> storage, int numbers) {
        PriorityQueue<URLStatistics> priorityQueue = new PriorityQueue<>(numbers, (s1, s2) -> (int) (s1.getNumbers() - s2.getNumbers()));//优先队列,大小为numbers
        for (Map.Entry<String, Long> entry : storage.entrySet()) {
            URLStatistics urlStatistic = new URLStatistics(entry.getKey(), entry.getValue());
            if (priorityQueue.size() < numbers) {  //当小于排名个数时，直接add
                priorityQueue.add(urlStatistic);
            } else if (entry.getValue() > priorityQueue.peek().getNumbers()) { //当遍历的值大于最优队列的最小值
                priorityQueue.remove();
                priorityQueue.add(urlStatistic);
            }
        }
        //将结果反向保存为list
        List<URLStatistics> result = new ArrayList<>();
        while (priorityQueue.size() > 0) {
            result.add(0, priorityQueue.remove());
        }
        return result;
    }

    public static List<String> splitFile(String fileStr) {
        List<String> result = new ArrayList<>();
        try {
            File dir = new File("urlpart");
            if (!dir.exists()) {
                dir.mkdir();
            }
            //删除目录下的文件
            String[] children = dir.list();
            for (int i = 0; i < children.length; i++) {
                new File(dir, children[i]).delete();
            }

            long capacity = 16 * 1024 * 1024; //16M,20G最多1280个文件

            HashMap<Integer, PrintWriter> pwHash = new HashMap<>();
            File file = new File(fileStr);
            int num = (int)((file.length()-1) / capacity + 1);
            for (int i = 0; i < num; i++) {
                String fileName = "urlpart/urlpart." + i;
                pwHash.put(i, new PrintWriter(new BufferedWriter(new FileWriter(fileName))));
            }
            try (BufferedReader br = new BufferedReader(new FileReader(file))) {
                String line;
                while ((line = br.readLine()) != null) {
                    int mod = (line.hashCode()& Integer.MAX_VALUE) % num; //放入特定文件
                    PrintWriter pw = pwHash.get(mod);
                    pw.println(line);
                }
            }
            //关闭printWriter
            for(Map.Entry<Integer,PrintWriter> entry: pwHash.entrySet()){
                PrintWriter pw = entry.getValue();
                if(pw != null){
                    pw.close();
                }
            }

            String[] childrenNew = dir.list();
            Arrays.stream(childrenNew).forEach(e->result.add("urlpart/" + e));
        } catch (Exception e) {
            throw new RuntimeException("splitFile err", e);
        }
        return result;
    }

    public static <T> List<List<T>> splitList(List<T> list, int len) {
        if (list == null || list.size() == 0 || len < 1) {
            return null;
        }

        List<List<T>> result = new ArrayList<List<T>>();

        int size = list.size();
        int count = (size + len - 1) / len;

        for (int i = 0; i < count; i++) {
            List<T> subList = list.subList(i * len, ((i + 1) * len > size ? size : len * (i + 1)));
            result.add(subList);
        }
        return result;
    }

    public static void verifyResult(String str) {
        try (BufferedReader br = new BufferedReader(new FileReader(new File("urls.txt")))) {
            String line;
            long numbers = 0;
            while ((line = br.readLine()) != null) {
                if (line.equals(str)) {
                    numbers++;
                }
            }
            System.out.println(str + ":" + numbers);
        } catch (Exception e) {
            throw new RuntimeException("verifyResult err", e);
        }
    }
}
