package com.hsh;

import jdk.management.resource.internal.inst.SocketOutputStreamRMHooks;
import org.rocksdb.*;
import org.rocksdb.util.SizeUnit;

import javax.print.attribute.standard.PrinterResolution;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

public class RdbQueue {
    static {
        RocksDB.loadLibrary();
    }

    static RocksDB db;
    private static String path = "/Users/hsh/rocksdb_queue";
    private ColumnFamilyHandle defaultHandler;

    private ReadOptions totalOrderReadOption;
    private WriteOptions writeOption;

    private static Statistics stat;


    public static void main(String[] args) throws RocksDBException {


        //System.out.println(FileIOUtils.createOrExistsDir(new File(path)));
        boolean result = FileIOUtils.createOrExistsDir(new File(path));
        assert (result != false);
        RdbQueue rdbqueue = new RdbQueue();

        final DBOptions init = rdbqueue.init();
        //add 1562932376_3121
        final ScheduledExecutorService s = Executors.newSingleThreadScheduledExecutor();
        s.scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {
                final Random random = new Random();
                final long current = System.currentTimeMillis() / 1000;
                for (int i = 0; i < 1; i++) {
                    final String key = current + "_" + i;
                    System.out.println("add " + key);
                    Pair pair = new Pair();
                    pair.setKey(key);
                    pair.setValue("value");
                    rdbqueue.add(pair);
                    System.out.println("add succ");
                }
            }
        }, 1, 1, TimeUnit.SECONDS);



        Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {

                System.out.println("stat stat ------------------------------------------------");
                try {
                    System.out.println(db.getProperty("rocksdb.stats"));
                } catch (Exception e) {
                    e.printStackTrace();
                }

                //System.out.println("keys " + stat.getTickerCount(TickerType.NUMBER_KEYS_WRITTEN));
                //System.out.println(stat.getHistogramString(HistogramType.DB_WRITE));
                System.out.println("stat end ----------------------------------------------------");
            }
        }, 3, 1, TimeUnit.SECONDS);

//        while (true) {
//            final List<Pair> list = rdbqueue.scan();
//            if (list.isEmpty()) {
//                try {
//                    TimeUnit.SECONDS.sleep(1);
//                } catch (InterruptedException e) {
//                    e.printStackTrace();
//                }
//            }
//            for (Pair pair : list) {
//                System.out.println(pair.getKey() + "  ---  " + pair.getValue());
//                rdbqueue.delete(pair.getKey());
//            }
//        }
    }

    private void add(Pair pair) {
        try {
            db.put(writeOption, pair.getKey().getBytes(), pair.getValue().getBytes());
        } catch (RocksDBException e) {
            e.printStackTrace();
        }
    }

    private List<Pair> scan() {
        List<Pair> result = new ArrayList<>();
        int maxNum = 0;
        try (RocksIterator it = db.newIterator(defaultHandler, totalOrderReadOption)) {
            it.seekToFirst();
            while (it.isValid() && maxNum < 1000) {
                maxNum++;
                Pair pair = new Pair();
                pair.setKey(new String(it.key()));
                pair.setValue(new String(it.value()));
                result.add(pair);
                it.next();
            }
            return result;
        }
    }

    private void delete(String key) {
        try {
            db.delete(writeOption, key.getBytes());
        } catch (RocksDBException e) {
            e.printStackTrace();
        }
    }

    static class Pair {
        private String key;
        private String value;

        public String getKey() {
            return key;
        }

        public void setKey(String key) {
            this.key = key;
        }

        public String getValue() {
            return value;
        }

        public void setValue(String value) {
            this.value = value;
        }
    }


    private DBOptions init() throws RocksDBException {
        stat = new Statistics();
        totalOrderReadOption = new ReadOptions();
        totalOrderReadOption.setTotalOrderSeek(true);

        writeOption = new WriteOptions();
        writeOption.setSync(true);

        List<ColumnFamilyDescriptor> descs = new ArrayList<>();
        descs.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, createColumnFamilyOptions()));
        List<ColumnFamilyHandle> handlers = new ArrayList<>();
        final DBOptions defaultRocksDBOptions = getDefaultRocksDBOptions();
        defaultRocksDBOptions.setStatistics(stat);
        db = RocksDB.open(defaultRocksDBOptions, this.path, descs, handlers);
        defaultHandler = handlers.get(0);
        return defaultRocksDBOptions;
    }

    public static ColumnFamilyOptions createColumnFamilyOptions() {
        final BlockBasedTableConfig tConfig = createTableConfig();
        final ColumnFamilyOptions options = getDefaultRocksDBColumnFamilyOptions();
        return options.useFixedLengthPrefixExtractor(8). //
                setTableFormatConfig(tConfig). //
                setMergeOperator(new StringAppendOperator());
    }


    public static ColumnFamilyOptions getDefaultRocksDBColumnFamilyOptions() {
        final ColumnFamilyOptions opts = new ColumnFamilyOptions();

        // Flushing options:
        // write_buffer_size sets the size of a single mem_table. Once mem_table exceeds
        // this size, it is marked immutable and a new one is created.
        opts.setWriteBufferSize(64 * SizeUnit.MB);

        // Flushing options:
        // max_write_buffer_number sets the maximum number of mem_tables, both active
        // and immutable.  If the active mem_table fills up and the total number of
        // mem_tables is larger than max_write_buffer_number we stall further writes.
        // This may happen if the flush process is slower than the write rate.
        opts.setMaxWriteBufferNumber(3);

        // Flushing options:
        // min_write_buffer_number_to_merge is the minimum number of mem_tables to be
        // merged before flushing to storage. For example, if this option is set to 2,
        // immutable mem_tables are only flushed when there are two of them - a single
        // immutable mem_table will never be flushed.  If multiple mem_tables are merged
        // together, less data may be written to storage since two updates are merged to
        // a single key. However, every Get() must traverse all immutable mem_tables
        // linearly to check if the key is there. Setting this option too high may hurt
        // read performance.
        opts.setMinWriteBufferNumberToMerge(1);

        // Level Style Compaction:
        // level0_file_num_compaction_trigger -- Once level 0 reaches this number of
        // files, L0->L1 compaction is triggered. We can therefore estimate level 0
        // size in stable state as
        // write_buffer_size * min_write_buffer_number_to_merge * level0_file_num_compaction_trigger.
        opts.setLevel0FileNumCompactionTrigger(10);

        // Soft limit on number of level-0 files. We start slowing down writes at this
        // point. A value 0 means that no writing slow down will be triggered by number
        // of files in level-0.
        opts.setLevel0SlowdownWritesTrigger(20);

        // Maximum number of level-0 files.  We stop writes at this point.
        opts.setLevel0StopWritesTrigger(40);

        // Level Style Compaction:
        // max_bytes_for_level_base and max_bytes_for_level_multiplier
        //  -- max_bytes_for_level_base is total size of level 1. As mentioned, we
        // recommend that this be around the size of level 0. Each subsequent level
        // is max_bytes_for_level_multiplier larger than previous one. The default
        // is 10 and we do not recommend changing that.
        opts.setMaxBytesForLevelBase(512 * SizeUnit.MB);

        // Level Style Compaction:
        // target_file_size_base and target_file_size_multiplier
        //  -- Files in level 1 will have target_file_size_base bytes. Each next
        // level's file size will be target_file_size_multiplier bigger than previous
        // one. However, by default target_file_size_multiplier is 1, so files in all
        // L1..LMax levels are equal. Increasing target_file_size_base will reduce total
        // number of database files, which is generally a good thing. We recommend setting
        // target_file_size_base to be max_bytes_for_level_base / 10, so that there are
        // 10 files in level 1.
        opts.setTargetFileSizeBase(64 * SizeUnit.MB);

        // If prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
        // create prefix bloom for memtable with the size of
        // write_buffer_size * memtable_prefix_bloom_size_ratio.
        // If it is larger than 0.25, it is santinized to 0.25.
        opts.setMemtablePrefixBloomSizeRatio(0.125);


        opts.setCompressionType(CompressionType.LZ4_COMPRESSION) //
                .setCompactionStyle(CompactionStyle.LEVEL) //
                .optimizeLevelStyleCompaction();
        return opts;
    }

    private static BlockBasedTableConfig createTableConfig() {
        return new BlockBasedTableConfig(). //
                setIndexType(IndexType.kHashSearch). // use hash search(btree) for prefix scan.
                setBlockSize(4 * SizeUnit.KB).//
                setFilter(new BloomFilter(16, false)). //
                setCacheIndexAndFilterBlocks(true). //
                setBlockCacheSize(512 * SizeUnit.MB). //
                setCacheNumShardBits(8);
    }

    public DBOptions getDefaultRocksDBOptions() {
        // Turn based on https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide
        final DBOptions opts = new DBOptions();

        // If this value is set to true, then the database will be created if it is
        // missing during {@code RocksDB.open()}.
        opts.setCreateIfMissing(true);

        // If true, missing column families will be automatically created.
        opts.setCreateMissingColumnFamilies(true);

        // Number of open files that can be used by the DB.  You may need to increase
        // this if your database has a large working set. Value -1 means files opened
        // are always kept open.
        opts.setMaxOpenFiles(-1);

        // The maximum number of concurrent background compactions. The default is 1,
        // but to fully utilize your CPU and storage you might want to increase this
        // to approximately number of cores in the system.
        opts.setMaxBackgroundCompactions(Math.min(Runtime.getRuntime().availableProcessors(), 4));

        // The maximum number of concurrent flush operations. It is usually good enough
        // to set this to 1.
        opts.setMaxBackgroundFlushes(1);



        return opts;
    }

}
