package com.usoft.thrift.benchmark;

import com.usoft.thrift.benchmark.util.IdGenerator;
import com.usoft.thrift.benchmark.util.Utils;
import org.HdrHistogram.Histogram;
import org.HdrHistogram.HistogramIterationValue;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TCompactProtocol;
import org.apache.thrift.protocol.TJSONProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.protocol.TProtocolFactory;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.transport.TFramedTransport;
import org.apache.thrift.transport.TNonblockingSocket;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;

/**
 * 1秒(s) = 1000 毫秒(ms) = 1,000,000 微秒(μs)(microsecond) = 1,000,000,000 纳秒(ns)
 * --address=127.0.0.1:9998
 * --server=T_NONBLOCKING_SERVER
 * --transport=T_SOCKET
 * --protocol=T_COMPACT_PROTOCOL
 * Created by xinxingegeya on 16/2/25.
 */
public class ThriftQpsClient {

    private ClientConfiguration config;

    public ThriftQpsClient(ClientConfiguration config) {
        this.config = config;
    }

    public static void main(String... args) throws Exception {

        ClientConfiguration.Builder configBuilder = ClientConfiguration
            .newBuilder();
        ClientConfiguration config;
        try {
            config = configBuilder.build(args);
        } catch (Exception e) {
            System.out.println(e.getMessage());
            configBuilder.printUsage();
            return;
        }
        ThriftQpsClient client = new ThriftQpsClient(config);
        client.run();
    }

    private void run() throws IOException, InterruptedException, TException,
            ExecutionException {

        if (config == null) {
            return;
        }

        System.out.println("server=" + config.server + ",transport="
            + config.transport + ",protocol=" + config.protocolType);

        SimpleRequest req = newRequest();

        List<TTransport> transports = new ArrayList<>(config.clientNums);
        for (int i = 0; i < config.clientNums; i++) {
            transports.add(newTransport(config));
        }

        // Do a warm up first. It's the same as the actual benchmark, except that
        // we ignore the statistics.
//        warmup(req, transports);

        long startTime = System.nanoTime();
        long endTime = startTime + TimeUnit.SECONDS.toNanos(config.duration);
        List<Histogram> histograms = doBenchmark(req, transports, endTime);
        long elapsedTime = System.nanoTime() - startTime;

        Histogram merged = merge(histograms);

        printStats(merged, elapsedTime);
        if (config.histogramFile != null) {
            saveHistogram(merged, config.histogramFile);
        }
        shutdown(transports);

    }

    private void shutdown(List<TTransport> transports) {
        for (TTransport transport : transports) {
            transport.close();
        }

    }

    private void warmup(SimpleRequest req, List<TTransport> transports)
            throws InterruptedException, TException, ExecutionException {

        long endTime = System.nanoTime()
            + TimeUnit.SECONDS.toNanos(config.warmupDuration);
        doBenchmark(req, transports, endTime);
        // I don't know if this helps, but it doesn't hurt trying. We sometimes run warmups
        // of several minutes at full load and it would be nice to start the actual benchmark
        // with a clean heap.
        System.gc();
    }

    private List<Histogram> doBenchmark(SimpleRequest req,
            List<TTransport> transports, long endTime) throws TException,
                    ExecutionException, InterruptedException {

        // Initiate the concurrent calls
        List<Future<Histogram>> futures = new ArrayList<Future<Histogram>>(
            config.outstandingRpcsPerClient);
        for (int i = 0; i < config.clientNums; i++) {
            TTransport transport = transports.get(i);
            TProtocol protocol = newProtocolFactory().getProtocol(transport);
            HelloService.Client client = new HelloService.Client(protocol);

            /**
             * transport open
             * 不能重复打开
             *
             * @see org.apache.thrift.transport.TTransport isOpen()方法
             */
            if (!transport.isOpen()) {
                transport.open();
            }

            futures.add(doRpcs(client, req, endTime));
//            for (int j = 0; j < config.outstandingRpcsPerClient; j++) {
//                futures.add(doRpcs(client, req, endTime));
//            }
        }
        // Wait for completion
        List<Histogram> histograms = new ArrayList<>(futures.size());
        for (Future<Histogram> future : futures) {
            histograms.add(future.get());
        }
        return histograms;

    }

    private Future<Histogram> doRpcs(HelloService.Client client,
            SimpleRequest req, long endTime) throws TException {
        switch (config.clientType) {
            case SYNC:
                return doSyncClientCall(client, req, endTime);
            case ASYNC:
                return doAsyncClientCall(client, req, endTime);
            default:
                throw new IllegalStateException("unsupported rpc type");
        }
    }

    /**
     * todo
     * 
     * @param client
     * @param req
     * @param endTime
     * @return
     */
    private Future<Histogram> doAsyncClientCall(HelloService.Client client,
            SimpleRequest req, long endTime) {
        return null;
    }

    private TProtocolFactory newProtocolFactory() {
        switch (config.protocolType) {
            case T_BINARY_PROTOCOL:
                return new TBinaryProtocol.Factory();
            case T_COMPACT_PROTOCOL:
                return new TCompactProtocol.Factory();
            case T_TUPLE_PROTOCOL:
                return new TTupleProtocol.Factory();
            case T_JSON_PROTOCOL:
                return new TJSONProtocol.Factory();
            default:
                throw new RuntimeException("not support protocol");
        }
    }

    private Future<Histogram> doSyncClientCall(final HelloService.Client client,
            final SimpleRequest req, final long endTime) throws TException {
        final Histogram histogram = new Histogram(Utils.HISTOGRAM_MAX_VALUE,
            Utils.HISTOGRAM_PRECISION);
        final HistogramFuture future = new HistogramFuture(histogram);

        ExecutorService executor = Executors.newSingleThreadExecutor();

        Runnable task = new Runnable() {
            long lastCall = System.nanoTime();
            long now;

            @Override
            public void run() {
                do {
                    try {
                        client.hello(req);
                        now = System.nanoTime();
                        histogram.recordValue((now - lastCall) / 1000);
                        lastCall = now;
                    } catch (TException e) {
                        e.printStackTrace();
                        future.cancel(true);
                    }
                } while (endTime > now);
                future.done();
            }
        };
        executor.execute(task);
        return future;
    }

    private TTransport newTransport(ClientConfiguration config)
            throws TTransportException {

        TTransport transport = null;
        InetSocketAddress address = (InetSocketAddress) config.address;
        switch (config.transport) {
            case T_SOCKET:
                if (config.server == Server.T_NONBLOCKING_SERVER
                    || config.server == Server.T_HS_HA_SERVER
                    || config.server == Server.T_THREAD_SELECTOR_SERVER) {
                    /**
                     * * To use this server, you MUST use a TFramedTransport at
                     * the outermost
                     * transport, otherwise this server will be unable to
                     * determine when a whole
                     * method call has been read off the wire.
                     * Clients must also use TFramedTransport.
                     */
                    transport = new TFramedTransport(
                        new TSocket(address.getHostName(), address.getPort()));
                } else {
                    transport = new TSocket(address.getHostName(),
                        address.getPort());
                }
                break;
            case T_NONBLOCKING_SOCKET:
                try {
                    transport = new TNonblockingSocket(address.getHostName(),
                        address.getPort());
                } catch (IOException e) {
                    e.printStackTrace();
                }
                break;
            case T_HTTP_CLIENT:
            default:
                throw new RuntimeException("not support transport");
        }

        return transport;

    }

    public SimpleRequest newRequest() {
        SimpleRequest request = new SimpleRequest();
        IdGenerator idGenerator = new IdGenerator(1);
        request.setId(idGenerator.nextId());
        request.setBody("Hello world,i have a dream");
        return request;

    }

    private static Histogram merge(List<Histogram> histograms) {
        Histogram merged = new Histogram(Utils.HISTOGRAM_MAX_VALUE,
            Utils.HISTOGRAM_PRECISION);
        for (Histogram histogram : histograms) {
            for (HistogramIterationValue value : histogram.allValues()) {
                long latency = value.getValueIteratedTo();
                long count = value.getCountAtValueIteratedTo();
                merged.recordValueWithCount(latency, count);
            }
        }
        return merged;
    }

    private void printStats(Histogram histogram, long elapsedTime) {
        long latency50 = histogram.getValueAtPercentile(50);
        long latency90 = histogram.getValueAtPercentile(90);
        long latency95 = histogram.getValueAtPercentile(95);
        long latency99 = histogram.getValueAtPercentile(99);
        long latency999 = histogram.getValueAtPercentile(99.9);
        long latencyMax = histogram.getValueAtPercentile(100);
        long queriesPerSecond = histogram.getTotalCount() * 1000000000L
            / elapsedTime;

        StringBuilder values = new StringBuilder();
        values.append("Channels:                       ")
            .append(config.clientNums).append('\n')
            .append("Outstanding RPCs per Channel:   ")
            .append(config.outstandingRpcsPerClient).append('\n')
            .append("50%ile Latency (in micros):     ").append(latency50)
            .append('\n').append("90%ile Latency (in micros):     ")
            .append(latency90).append('\n')
            .append("95%ile Latency (in micros):     ").append(latency95)
            .append('\n').append("99%ile Latency (in micros):     ")
            .append(latency99).append('\n')
            .append("99.9%ile Latency (in micros):   ").append(latency999)
            .append('\n').append("Maximum Latency (in micros):    ")
            .append(latencyMax).append('\n')
            .append("QPS:                            ").append(queriesPerSecond)
            .append('\n');
        System.out.println(values);
    }

    static void saveHistogram(Histogram histogram, String filename)
            throws IOException {
        File file;
        PrintStream log = null;
        try {
            file = new File(filename);
            if (file.exists() && !file.delete()) {
                System.err.println("Failed deleting previous histogram file: "
                    + file.getAbsolutePath());
            }
            log = new PrintStream(new FileOutputStream(file), false);
            histogram.outputPercentileDistribution(log, 1.0);
        } finally {
            if (log != null) {
                log.close();
            }
        }
    }
}
