package wheel.arithmetic;

import cn.hutool.core.util.RandomUtil;

import java.io.*;
import java.util.*;

/**
 * 归并排序-多路归并排序
 *
 * 在超大数据量时，内存不足以存放全部排序数据，需要使用外存时，这种算法称之为外排序算法。
 * 把需要排序的数据，分为N份，排好序在外存中存为文件。
 * 然后在内存中把这些文件各载入一部分（用完就需要载入，直到这个文件的末尾），
 * 最后在内存进行比较排序，排序好一定数量的数据就输出一份文件。（1）
 * 往复循环，把海量的数据排好了序。
 *
 * （1）每个数据的排序都需要比较各文件中的第一个数据（最大的或最小的，按要排顺序），
 * 这里的比较就有不同的算法，
 * 比如
 * 直接一个个比较，
 * 使用二叉堆？
 * 还有网上讲的胜者树败者树（就是二叉堆的定制版，在弹出操作后的堆调整有所不同）
 *
 * 和二路归并不同，多路归并不需要递归。
 *
 *
 * 难点在于，我对文件的操作不熟悉，(可以看下Hutool有无适用的，或者自己实现一个)
 * 要更优的话，还要配合JVM的内存配置，（外存）机械硬盘的扇区，减少外存的IO操作。
 */
public class MoreMergeSort {




    static String startDataFileName = "d:\\startData";
    static String endDataFileName = "d:/endData";
    static String chunkFileName = "d:/chunkData-";

    //一块数据最多包含元素的数量(根据可用的内存大小及具体排序的算法而定)
    static int chunkMaxSize = 10000*1000;

    static long arrayLength = 10000*10000*10;
    //理论上，等于chunkMaxSize是IO操作性能最优的
    static int batchMaxSize = chunkMaxSize;


    public static void main(String[] args) throws IOException {


        long startTime = System.currentTimeMillis();
        try{
//        createUnOrderArrayFile();
//        readTop100();
//        chunkSort();
            compareSort();

        } catch (Exception e){
            e.printStackTrace();
        }


        System.out.println("总耗时 : "+(System.currentTimeMillis()-startTime)+"ms");
    }

    private static void readTop100() throws IOException {
        File file = new File(endDataFileName);
        DataInputStream dataInputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(file), batchMaxSize*8));
        for(int i=1; i<=1000000000; i++){
            long m = dataInputStream.readLong();
            if(i%1000==0)
                System.out.println("arr["+(i-1)+"]= "+m);
        }
    }


    public static void createUnOrderArrayFile() throws IOException {
        File file = new File(startDataFileName);
        DataOutputStream dataOutputStream = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(file), batchMaxSize*8));

        for(long i=0; i<arrayLength; i++){
            if(i%1000==0)
                System.out.println("i=> "+i);
            dataOutputStream.writeLong(RandomUtil.randomLong(Long.MIN_VALUE, Long.MAX_VALUE));
        }
        dataOutputStream.close();
    }






    public static void chunkSort() throws IOException {
        File startFile = new File(startDataFileName);
        DataInputStream dataInputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(startFile), batchMaxSize*8));


        List<File> chunkFiles = new ArrayList<>();
        Long[] batchArray = new Long[batchMaxSize];
        while(true){
            long size = readLongArr(dataInputStream, batchArray);
            if(size!=0){
                /**
                 *
                 */
                if(size<0)
                    return;
                long startTime = System.currentTimeMillis();
                QuickSort.execute(batchArray);
                long endTime = System.currentTimeMillis();
                File chunkFile = new File(chunkFileName+(chunkFiles.size()+1)); chunkFiles.add(chunkFile);
                DataOutputStream chunckDataOutputStream = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(chunkFile)));
                for(int i=0; i<size; i++){
                    chunckDataOutputStream.writeLong(batchArray[i]);
                }
                chunckDataOutputStream.close();
                System.out.println(chunkFile.getName()+", size:"+size+", 耗时: "+(endTime-startTime)+"ms");
            }

            if(size<batchMaxSize)
                break;
        }
    }






    public static void compareSort() throws IOException {
        List<File> files = new ArrayList<>();
        while(true) {
            File file = new File(chunkFileName + (files.size() + 1));
            if (!file.exists())
                break;
            files.add(file);
        }

        List<DataQueue> fileDataQueues = new ArrayList<>();
        DataInputStream dataInputStream = null;
        for(int i=0; i<files.size(); i++){
            dataInputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(files.get(i)), (batchMaxSize/files.size())*8));
            FileDataQueue queue = new FileDataQueue(i, dataInputStream);
            fileDataQueues.add(queue);
        }


        FailTree failTree = new FailTree(fileDataQueues);
        /**
         *
         */
        File endFile = new File(endDataFileName);
        DataOutputStream dataOutputStream = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(endFile), batchMaxSize*8));

        Long currVal = null, beforeValue = null;
        int count = 0; int tempCount = 0; long startTime = System.currentTimeMillis();
        while((currVal = failTree.poll())!=null){
            tempCount++;
            if(tempCount==1000000){
                count += tempCount;
                System.out.println("array["+count+"] => "+currVal+", 当前批次耗时: "+(System.currentTimeMillis()-startTime)+"ms");
                startTime = System.currentTimeMillis();
                tempCount = 0;
            }

            dataOutputStream.writeLong(currVal);

            //验证逻辑（升序）
            if(beforeValue!=null && beforeValue>currVal)
                throw new RuntimeException("算法输出结果不正确！！");
            beforeValue = currVal;
            //
        }


        dataOutputStream.close();

    }

    private static int readLongArr(DataInputStream dataInputStream, Long[] batchArray) throws IOException {
        int i=0;
        for(; i<batchArray.length; i++){
            try {
                batchArray[i] = dataInputStream.readLong();
            }catch (EOFException e){
                return i-1;
            }
        }
        return i;
    }
}





interface DataQueue{

    Long poll();
    int getSerial();
    Long peek();
    Long remove();
    Long getReadCount();
}

class FileDataQueue implements DataQueue {

    long readCount = 0;

    DataInputStream inputStream;
    Long peekCache;
    int serial;

    public FileDataQueue(int serial, DataInputStream inputStream) {
        this.inputStream = inputStream;
        this.serial = serial;
    }

    @Override
    public Long poll() {
        if(peekCache!=null){
            Long temp = peekCache;
            peekCache = null;
            return temp;
        }
        return read();
    }

    Long read(){
        try{
            Long value = inputStream.readLong();
            readCount++;
            return value;
        }catch (Exception e){
//            e.printStackTrace();
            return null;
        }
    }

    public Long peek(){
        if(peekCache!=null)
            return peekCache;
        return (peekCache = read());
    }

    public Long remove(){
        if(peekCache!=null){
            Long temp = peekCache;
            peekCache = null;
            return temp;
        }
        return poll();
    }

    @Override
    public Long getReadCount() {
        return readCount;
    }

    @Override
    public int getSerial() {
        return serial;
    }

}


/**
 * chunkSort, 分块数据排序使用快速排序算法。为什么快速排序算法呢？因为它是常用的排序算法中最快的，比堆排序和二路归并排序还快还省空间，O(1)常数级的空间复杂度。
 * chunkData-1, size:10000000, 耗时: 4937ms
 * chunkData-2, size:10000000, 耗时: 7268ms
 * chunkData-3, size:10000000, 耗时: 5438ms
 * chunkData-4, size:10000000, 耗时: 6028ms
 * chunkData-5, size:10000000, 耗时: 5746ms
 * chunkData-6, size:10000000, 耗时: 5945ms
 * chunkData-7, size:10000000, 耗时: 5911ms
 * chunkData-8, size:10000000, 耗时: 5919ms
 * chunkData-9, size:10000000, 耗时: 5308ms
 * chunkData-10, size:10000000, 耗时: 5420ms
 * chunkData-11, size:10000000, 耗时: 5920ms
 * chunkData-12, size:10000000, 耗时: 5412ms
 * chunkData-13, size:10000000, 耗时: 5065ms
 * chunkData-14, size:10000000, 耗时: 4789ms
 * chunkData-15, size:10000000, 耗时: 5543ms
 * chunkData-16, size:10000000, 耗时: 5206ms
 * chunkData-17, size:10000000, 耗时: 5283ms
 * chunkData-18, size:10000000, 耗时: 5195ms
 * chunkData-19, size:10000000, 耗时: 5288ms
 * chunkData-20, size:10000000, 耗时: 5155ms
 * chunkData-21, size:10000000, 耗时: 5214ms
 * chunkData-22, size:10000000, 耗时: 5937ms
 * chunkData-23, size:10000000, 耗时: 5808ms
 * chunkData-24, size:10000000, 耗时: 5278ms
 * chunkData-25, size:10000000, 耗时: 5315ms
 * chunkData-26, size:10000000, 耗时: 5298ms
 * chunkData-27, size:10000000, 耗时: 5662ms
 * chunkData-28, size:10000000, 耗时: 5982ms
 * chunkData-29, size:10000000, 耗时: 6098ms
 * chunkData-30, size:10000000, 耗时: 6112ms
 * chunkData-31, size:10000000, 耗时: 4869ms
 * chunkData-32, size:10000000, 耗时: 5016ms
 * chunkData-33, size:10000000, 耗时: 4775ms
 * chunkData-34, size:10000000, 耗时: 4940ms
 * chunkData-35, size:10000000, 耗时: 5532ms
 * chunkData-36, size:10000000, 耗时: 4925ms
 * chunkData-37, size:10000000, 耗时: 5149ms
 * chunkData-38, size:10000000, 耗时: 5429ms
 * chunkData-39, size:10000000, 耗时: 5603ms
 * chunkData-40, size:10000000, 耗时: 5525ms
 * chunkData-41, size:10000000, 耗时: 5935ms
 * chunkData-42, size:10000000, 耗时: 4932ms
 * chunkData-43, size:10000000, 耗时: 5444ms
 * chunkData-44, size:10000000, 耗时: 5265ms
 * chunkData-45, size:10000000, 耗时: 5761ms
 * chunkData-46, size:10000000, 耗时: 5288ms
 * chunkData-47, size:10000000, 耗时: 5156ms
 * chunkData-48, size:10000000, 耗时: 5228ms
 * chunkData-49, size:10000000, 耗时: 5304ms
 * chunkData-50, size:10000000, 耗时: 5340ms
 * chunkData-51, size:10000000, 耗时: 5478ms
 * chunkData-52, size:10000000, 耗时: 4638ms
 * chunkData-53, size:10000000, 耗时: 4916ms
 * chunkData-54, size:10000000, 耗时: 4819ms
 * chunkData-55, size:10000000, 耗时: 4997ms
 * chunkData-56, size:10000000, 耗时: 5278ms
 * chunkData-57, size:10000000, 耗时: 5257ms
 * chunkData-58, size:10000000, 耗时: 5252ms
 * chunkData-59, size:10000000, 耗时: 5421ms
 * chunkData-60, size:10000000, 耗时: 5611ms
 * chunkData-61, size:10000000, 耗时: 5640ms
 * chunkData-62, size:10000000, 耗时: 4815ms
 * chunkData-63, size:10000000, 耗时: 4754ms
 * chunkData-64, size:10000000, 耗时: 4838ms
 * chunkData-65, size:10000000, 耗时: 4978ms
 * chunkData-66, size:10000000, 耗时: 5348ms
 * chunkData-67, size:10000000, 耗时: 5264ms
 * chunkData-68, size:10000000, 耗时: 5044ms
 * chunkData-69, size:10000000, 耗时: 5129ms
 * chunkData-70, size:10000000, 耗时: 5320ms
 * chunkData-71, size:10000000, 耗时: 5481ms
 * chunkData-72, size:10000000, 耗时: 4512ms
 * chunkData-73, size:10000000, 耗时: 4701ms
 * chunkData-74, size:10000000, 耗时: 4788ms
 * chunkData-75, size:10000000, 耗时: 4941ms
 * chunkData-76, size:10000000, 耗时: 5361ms
 * chunkData-77, size:10000000, 耗时: 5025ms
 * chunkData-78, size:10000000, 耗时: 5269ms
 * chunkData-79, size:10000000, 耗时: 5222ms
 * chunkData-80, size:10000000, 耗时: 5398ms
 * chunkData-81, size:10000000, 耗时: 5439ms
 * chunkData-82, size:10000000, 耗时: 5683ms
 * chunkData-83, size:10000000, 耗时: 4682ms
 * chunkData-84, size:10000000, 耗时: 4948ms
 * chunkData-85, size:10000000, 耗时: 4866ms
 * chunkData-86, size:10000000, 耗时: 5565ms
 * chunkData-87, size:10000000, 耗时: 5280ms
 * chunkData-88, size:10000000, 耗时: 5183ms
 * chunkData-89, size:10000000, 耗时: 5544ms
 * chunkData-90, size:10000000, 耗时: 5604ms
 * chunkData-91, size:10000000, 耗时: 5436ms
 * chunkData-92, size:10000000, 耗时: 4614ms
 * chunkData-93, size:10000000, 耗时: 4786ms
 * chunkData-94, size:10000000, 耗时: 4855ms
 * chunkData-95, size:10000000, 耗时: 4903ms
 * chunkData-96, size:10000000, 耗时: 5497ms
 * chunkData-97, size:10000000, 耗时: 5377ms
 * chunkData-98, size:10000000, 耗时: 5506ms
 * chunkData-99, size:10000000, 耗时: 5301ms
 * chunkData-100, size:10000000, 耗时: 5415ms
 * 总耗时 : 682289ms
 */


/**
 * mergeSort, 一百个数据块文件，一百个数据队列，每十万个数的排序（采用直接比较的方式获取最小值）耗时500ms左右，每到一千万数时引起磁盘的IO操作会多500ms左右。
 * array[987000000] => 8983421342955492054, 当前批次耗时: 527ms
 * array[988000000] => 9001887705242378776, 当前批次耗时: 527ms
 * array[989000000] => 9020326867465662265, 当前批次耗时: 531ms
 * array[990000000] => 9038803633665970968, 当前批次耗时: 545ms
 * array[991000000] => 9057282013027524420, 当前批次耗时: 1062ms
 * array[992000000] => 9075719333905270596, 当前批次耗时: 530ms
 * array[993000000] => 9094175792548958679, 当前批次耗时: 525ms
 * array[994000000] => 9112623018184882780, 当前批次耗时: 521ms
 * array[995000000] => 9131049738499974662, 当前批次耗时: 526ms
 * array[996000000] => 9149521507121258526, 当前批次耗时: 531ms
 * 总耗时 : 584809ms
 * 总耗时 : 621581ms
 *
 *
 *
 *
 *
 */

//TODO 多路归并排序（快速排序和直接比较版）， 分块排序耗时700秒左右， 合并比较耗时600秒左右， 合计1300秒左右。




//TODO 采用败者树后，合并比较耗时203秒、206秒, 快三倍。

/*

败者树实现
array[7000000] => -9094147410347193611, 当前批次耗时: 107ms
array[8000000] => -9075707035232993338, 当前批次耗时: 110ms
array[9000000] => -9057244412579398004, 当前批次耗时: 121ms
array[10000000] => -9038801773905902790, 当前批次耗时: 153ms
array[11000000] => -9020345627695443689, 当前批次耗时: 2224ms
array[12000000] => -9001872462328622014, 当前批次耗时: 110ms
array[13000000] => -8983421896713783920, 当前批次耗时: 108ms
array[14000000] => -8964971465308686361, 当前批次耗时: 108ms
array[15000000] => -8946518397477897905, 当前批次耗时: 110ms
array[16000000] => -8928051297002201290, 当前批次耗时: 108ms
array[17000000] => -8909607626905694341, 当前批次耗时: 107ms
array[18000000] => -8891181779070039491, 当前批次耗时: 108ms
array[19000000] => -8872721819200433900, 当前批次耗时: 110ms
array[20000000] => -8854261442170350362, 当前批次耗时: 135ms
array[21000000] => -8835825788963741885, 当前批次耗时: 1633ms
array[22000000] => -8817384815811694380, 当前批次耗时: 122ms
array[23000000] => -8798918403746568251, 当前批次耗时: 121ms
array[24000000] => -8780468486365338888, 当前批次耗时: 116ms


在failtree.replenish()中加入try...catch， debug状态下运行程序，耗时暴增八九倍
array[84000000] => -7673797130116645530, 当前批次耗时: 863ms
array[85000000] => -7655376505376012444, 当前批次耗时: 890ms
array[86000000] => -7636942276886655701, 当前批次耗时: 919ms
array[87000000] => -7618507952716796346, 当前批次耗时: 926ms
array[88000000] => -7600047380650430357, 当前批次耗时: 883ms
array[89000000] => -7581583381376801003, 当前批次耗时: 907ms
array[90000000] => -7563133698677395273, 当前批次耗时: 898ms




 */


//TODO 现在读写都是使用BufferStream的实现，可否实现多线程，以减少耗时。
//在内存条件的限制下，写其实是不能优化的，除非一次写的大小变小。
//读的话倒是可以设置缓冲区剩一半时，异步读文件补充。   还是要测量时间上的量!!!!   不能纯靠感觉优化。。。。。
//不过加入并发执行后，可能有些地方要注意线程安全了。