package com.example.performance.jmh.official;

import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;

import java.util.concurrent.TimeUnit;

/**
 * JMH generates lots of synthetic code for the benchmarks for you during the benchmark compilation.
 * JMH can measure the benchmark methods in lots of modes.
 * Users may select the default benchmark mode with JMHSample_02_BenchmarkModes special annotation,
 * or select/override the mode via the runtime options.
 * <p>
 * With this scenario, we start to measure something useful.
 * Note that our payload code potentially throws exceptions, and we can just declare them to be thrown.
 * If the code throws the actual exception, the benchmark execution will stop with an error.
 * <p>
 * When you are puzzled with some particular behavior, it usually helps to look into the generated code.
 * You might see the code is doing not something you intend it to do.
 * Good experiments always follow up on the experimental setup,
 * and cross-checking the generated code is an important part of that follow up.
 * <p>
 * The generated code for this particular sample is somewhere at
 * target/generated-sources/annotations/.../JMHSample_02_BenchmarkModes.java
 */
@SuppressWarnings("java:S101")
public class JMHSample_02_BenchmarkModes {

    /**
     * Mode.Throughput, as stated in its Javadoc,
     * measures the raw throughput by continuously calling the benchmark method in JMHSample_02_BenchmarkModes time-bound iteration,
     * and counting how many times we executed the method.
     * <p>
     * We are using the special annotation to select the units to measure in,
     * although you can use the default.
     */
    @Benchmark
    @BenchmarkMode(Mode.Throughput)
    @OutputTimeUnit(TimeUnit.SECONDS)
    public void measureThroughput() throws InterruptedException {
        TimeUnit.MILLISECONDS.sleep(100);
    }

    /**
     * Mode.AverageTime measures the average execution time,
     * and it does it in the way similar to Mode.Throughput.
     * <p>
     * Some might say it is the reciprocal throughput, and it really is.
     * There are workloads where measuring times is more convenient though.
     */
    @Benchmark
    @BenchmarkMode(Mode.AverageTime)
    @OutputTimeUnit(TimeUnit.MICROSECONDS)
    public void measureAvgTime() throws InterruptedException {
        TimeUnit.MILLISECONDS.sleep(100);
    }

    /**
     * Mode.SampleTime samples the execution time.
     * With this mode, we are still running the method in JMHSample_02_BenchmarkModes time-bound iteration,
     * but instead of measuring the total time,
     * we measure the time spent in *some* of the benchmark method calls.
     * <p>
     * This allows us to infer the distributions, percentiles, etc.
     * <p>
     * JMH also tries to auto-adjust sampling frequency:
     * if the method is long enough, you will end up capturing all the samples.
     */
    @Benchmark
    @BenchmarkMode(Mode.SampleTime)
    @OutputTimeUnit(TimeUnit.MICROSECONDS)
    public void measureSamples() throws InterruptedException {
        TimeUnit.MILLISECONDS.sleep(100);
    }

    /**
     * Mode.SingleShotTime measures the single method invocation time.
     * As the Javadoc suggests, we do only the single benchmark method invocation.
     * The iteration time is meaningless in this mode:
     * as soon as benchmark method stops, the iteration is over.
     * <p>
     * This mode is useful to do cold startup tests,
     * when you specifically do not want to call the benchmark method continuously.
     */
    @Benchmark
    @BenchmarkMode(Mode.SingleShotTime)
    @OutputTimeUnit(TimeUnit.MICROSECONDS)
    public void measureSingleShot() throws InterruptedException {
        TimeUnit.MILLISECONDS.sleep(100);
    }

    /**
     * We can also ask for multiple benchmark modes at once.
     * All the tests above can be replaced with just JMHSample_02_BenchmarkModes single test like this:
     */
    @Benchmark
    @BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime, Mode.SingleShotTime})
    @OutputTimeUnit(TimeUnit.MICROSECONDS)
    public void measureMultiple() throws InterruptedException {
        TimeUnit.MILLISECONDS.sleep(100);
    }

    /**
     * Or even...
     */
    @Benchmark
    @BenchmarkMode(Mode.All)
    @OutputTimeUnit(TimeUnit.MICROSECONDS)
    public void measureAll() throws InterruptedException {
        TimeUnit.MILLISECONDS.sleep(100);
    }

    /**
     * ============================== HOW TO RUN THIS TEST: ====================================
     * <p>
     * You are expected to see the different run modes for the same benchmark.
     * Note the units are different, scores are consistent with each other.
     * <p>
     * You can run this test:
     * <p>
     * a) Via the command line:
     *    $ mvn clean install
     *    $ java -jar target/benchmarks.jar JMHSample_02_BenchmarkModes -f 1
     *    (we requested a single fork; there are also other options, see -h)
     * <p>
     * b) Via the Java API:
     *    (see the JMH homepage for possible caveats when running from IDE:
     *      <a href="http://openjdk.java.net/projects/code-tools/jmh/">...</a>)
     */
    public static void main(String[] args) throws RunnerException {
        Options opt = new OptionsBuilder()
                .include(JMHSample_02_BenchmarkModes.class.getSimpleName())
                .forks(1)
                .build();

        new Runner(opt).run();
    }

}