// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "benchmark_runner.h"
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "benchmark_adjust_repetitions.h"
#include "internal_macros.h"

#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif

#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <utility>

#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "perf_counters.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"

DECLARE_bool(benchmark_enable_random_interleaving);
DECLARE_double(benchmark_random_interleaving_max_overhead);

namespace benchmark {

namespace internal {

MemoryManager* memory_manager = nullptr;

namespace {

static constexpr IterationCount kMaxIterations = 1000000000;

BenchmarkReporter::Run CreateRunReport(
    const benchmark::internal::BenchmarkInstance& b,
    const internal::ThreadManager::Result& results,
    IterationCount memory_iterations,
    const MemoryManager::Result& memory_result, double seconds,
    int repetition_index) {
  // Create report about this benchmark run.
  BenchmarkReporter::Run report;

  report.run_name = b.name();
  report.error_occurred = results.has_error_;
  report.error_message = results.error_message_;
  report.report_label = results.report_label_;
  // This is the total iterations across all threads.
  report.iterations = results.iterations;
  report.time_unit = b.time_unit();
  report.threads = b.threads();
  report.repetition_index = repetition_index;
  report.repetitions = b.repetitions();

  if (!report.error_occurred) {
    if (b.use_manual_time()) {
      report.real_accumulated_time = results.manual_time_used;
    } else {
      report.real_accumulated_time = results.real_time_used;
    }
    report.cpu_accumulated_time = results.cpu_time_used;
    report.complexity_n = results.complexity_n;
    report.complexity = b.complexity();
    report.complexity_lambda = b.complexity_lambda();
    report.statistics = &b.statistics();
    report.counters = results.counters;

    if (memory_iterations > 0) {
      report.has_memory_result = true;
      report.allocs_per_iter =
          memory_iterations ? static_cast<double>(memory_result.num_allocs) /
                                  memory_iterations
                            : 0;
      report.max_bytes_used = memory_result.max_bytes_used;
    }

    internal::Finish(&report.counters, results.iterations, seconds, b.threads());
  }
  return report;
}

// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into manager->results.
void RunInThread(const BenchmarkInstance* b, IterationCount iters,
                 int thread_id, ThreadManager* manager,
                 PerfCountersMeasurement* perf_counters_measurement) {
  internal::ThreadTimer timer(
      b->measure_process_cpu_time()
          ? internal::ThreadTimer::CreateProcessCpuTime()
          : internal::ThreadTimer::Create());
  State st =
      b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
  CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
      << "Benchmark returned before State::KeepRunning() returned false!";
  {
    MutexLock l(manager->GetBenchmarkMutex());
    internal::ThreadManager::Result& results = manager->results;
    results.iterations += st.iterations();
    results.cpu_time_used += timer.cpu_time_used();
    results.real_time_used += timer.real_time_used();
    results.manual_time_used += timer.manual_time_used();
    results.complexity_n += st.complexity_length_n();
    internal::Increment(&results.counters, st.counters);
  }
  manager->NotifyThreadComplete();
}

class BenchmarkRunner {
 public:
  BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
                  int outer_repetitions_,
                  int inner_repetitions_,
                  std::vector<BenchmarkReporter::Run>* complexity_reports_,
                  RunResults* run_results_)
      : b(b_),
        complexity_reports(*complexity_reports_),
        run_results(*run_results_),
        outer_repetitions(outer_repetitions_),
        inner_repetitions(inner_repetitions_),
        repeats(b.repetitions() != 0 ? b.repetitions() : inner_repetitions),
        has_explicit_iteration_count(b.iterations() != 0),
        pool(b.threads() - 1),
        iters(has_explicit_iteration_count ? b.iterations() : 1),
        perf_counters_measurement(
            PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))),
        perf_counters_measurement_ptr(perf_counters_measurement.IsValid()
                                          ? &perf_counters_measurement
                                          : nullptr) {
    run_results.display_report_aggregates_only =
        (FLAGS_benchmark_report_aggregates_only ||
         FLAGS_benchmark_display_aggregates_only);
    run_results.file_report_aggregates_only =
        FLAGS_benchmark_report_aggregates_only;
    if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
      run_results.display_report_aggregates_only =
          (b.aggregation_report_mode() &
           internal::ARM_DisplayReportAggregatesOnly);
      run_results.file_report_aggregates_only =
          (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);

      CHECK(FLAGS_benchmark_perf_counters.empty() ||
            perf_counters_measurement.IsValid())
          << "Perf counters were requested but could not be set up.";
    }

    for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
      DoOneRepetition(repetition_num);
    }

    // Calculate additional statistics
    run_results.aggregates_only = ComputeStats(run_results.non_aggregates);

    // Maybe calculate complexity report
    if ((b.complexity() != oNone) && b.last_benchmark_instance) {
      auto additional_run_stats = ComputeBigO(complexity_reports);
      run_results.aggregates_only.insert(run_results.aggregates_only.end(),
                                          additional_run_stats.begin(),
                                          additional_run_stats.end());
      complexity_reports.clear();
    }
  }

 private:
  const benchmark::internal::BenchmarkInstance& b;
  std::vector<BenchmarkReporter::Run>& complexity_reports;

  RunResults& run_results;

  const int outer_repetitions;
  const int inner_repetitions;
  const int repeats;
  const bool has_explicit_iteration_count;

  std::vector<std::thread> pool;

  IterationCount iters;  // preserved between repetitions!
  // So only the first repetition has to find/calculate it,
  // the other repetitions will just use that precomputed iteration count.

  PerfCountersMeasurement perf_counters_measurement;
  PerfCountersMeasurement* const perf_counters_measurement_ptr;

  struct IterationResults {
    internal::ThreadManager::Result results;
    IterationCount iters;
    double seconds;
  };
  IterationResults DoNIterations() {
    VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";

    std::unique_ptr<internal::ThreadManager> manager;
    manager.reset(new internal::ThreadManager(b.threads()));

    // Run all but one thread in separate threads
    for (std::size_t ti = 0; ti < pool.size(); ++ti) {
      pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
                             manager.get(), perf_counters_measurement_ptr);
    }
    // And run one thread here directly.
    // (If we were asked to run just one thread, we don't create new threads.)
    // Yes, we need to do this here *after* we start the separate threads.
    RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr);

    // The main thread has finished. Now let's wait for the other threads.
    manager->WaitForAllThreads();
    for (std::thread& thread : pool) thread.join();

    IterationResults i;
    // Acquire the measurements/counters from the manager, UNDER THE LOCK!
    {
      MutexLock l(manager->GetBenchmarkMutex());
      i.results = manager->results;
    }

    // And get rid of the manager.
    manager.reset();

    // Adjust real/manual time stats since they were reported per thread.
    i.results.real_time_used /= b.threads();
    i.results.manual_time_used /= b.threads();
    // If we were measuring whole-process CPU usage, adjust the CPU time too.
    if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads();

    VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
            << i.results.real_time_used << "\n";

    // By using KeepRunningBatch a benchmark can iterate more times than
    // requested, so take the iteration count from i.results.
    i.iters = i.results.iterations / b.threads();

    // Base decisions off of real time if requested by this benchmark.
    i.seconds = i.results.cpu_time_used;
    if (b.use_manual_time()) {
      i.seconds = i.results.manual_time_used;
    } else if (b.use_real_time()) {
      i.seconds = i.results.real_time_used;
    }

    return i;
  }

  IterationCount PredictNumItersNeeded(const IterationResults& i) const {
    // See how much iterations should be increased by.
    // Note: Avoid division by zero with max(seconds, 1ns).
    double multiplier =
        b.MinTime() * kSafetyMultiplier / std::max(i.seconds, 1e-9);
    // If our last run was at least 10% of FLAGS_benchmark_min_time then we
    // use the multiplier directly.
    // Otherwise we use at most 10 times expansion.
    // NOTE: When the last run was at least 10% of the min time the max
    // expansion should be 14x.
    bool is_significant = (i.seconds / b.MinTime()) > 0.1;
    multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
    if (multiplier <= 1.0) multiplier = 2.0;

    // So what seems to be the sufficiently-large iteration count? Round up.
    const IterationCount max_next_iters = static_cast<IterationCount>(
        std::lround(std::max(multiplier * static_cast<double>(i.iters),
                             static_cast<double>(i.iters) + 1.0)));
    // But we do have *some* sanity limits though..
    const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);

    VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
    return next_iters;  // round up before conversion to integer.
  }

  bool ShouldReportIterationResults(const IterationResults& i) const {
    // Determine if this run should be reported;
    // Either it has run for a sufficient amount of time
    // or because an error was reported.
    return i.results.has_error_ ||
           i.iters >= kMaxIterations ||  // Too many iterations already.
           i.seconds >= b.MinTime() ||   // The elapsed time is large enough.
                                         // CPU time is specified but the
                                         // elapsed real time greatly exceeds
                                         // the minimum time. Note that user
                                         // provided timers are except from this
                                         // sanity check.
           ((i.results.real_time_used >= 5 * b.MinTime()) &&
            !b.use_manual_time());
  }

  void DoOneRepetition(int repetition_index) {
    const bool is_the_first_repetition = repetition_index == 0;
    IterationResults i;

    // We *may* be gradually increasing the length (iteration count)
    // of the benchmark until we decide the results are significant.
    // And once we do, we report those last results and exit.
    // Please do note that the if there are repetitions, the iteration count
    // is *only* calculated for the *first* repetition, and other repetitions
    // simply use that precomputed iteration count.
    const auto exec_start = benchmark::ChronoClockNow();
    for (;;) {
      i = DoNIterations();
      const auto exec_end = benchmark::ChronoClockNow();

      // Do we consider the results to be significant?
      // If we are doing repetitions, and the first repetition was already done,
      // it has calculated the correct iteration time, so we have run that very
      // iteration count just now. No need to calculate anything. Just report.
      // Else, the normal rules apply.
      const bool results_are_significant = !is_the_first_repetition ||
                                           has_explicit_iteration_count ||
                                           ShouldReportIterationResults(i);

      if (results_are_significant) {
        // The number of repetitions for random interleaving may be reduced
        // to limit the increase in benchmark execution time. When this happens
        // the target execution time for each repetition is increased. We may
        // need to rerun trials to calculate iters according to the increased
        // target execution time.
        bool rerun_trial = false;
        // If random interleaving is enabled and the repetitions is not
        // initialized, do it now.
        if (FLAGS_benchmark_enable_random_interleaving &&
            !b.RandomInterleavingRepetitionsInitialized()) {
          InternalRandomInterleavingRepetitionsInput input;
          input.total_execution_time_per_repetition = exec_end - exec_start;
          input.time_used_per_repetition = i.seconds;
          input.real_time_used_per_repetition = i.results.real_time_used;
          input.min_time_per_repetition = GetMinTime();
          input.max_overhead = FLAGS_benchmark_random_interleaving_max_overhead;
          input.max_repetitions = GetRepetitions();
          b.InitRandomInterleavingRepetitions(
              ComputeRandomInterleavingRepetitions(input));
          // If the number of repetitions changed, need to rerun the last trial
          // because iters may also change. Note that we only need to do this
          // if accumulated_time < b.MinTime(), i.e., the iterations we have
          // run is not enough for the already adjusted b.MinTime().
          // Otherwise, we will still skip the rerun.
          rerun_trial =
              b.RandomInterleavingRepetitions() < GetRepetitions() &&
              i.seconds < b.MinTime() && !has_explicit_iteration_count;
        }

        if (!rerun_trial) break;  // Good, let's report them!
      }

      // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
      // iteration count, and run the benchmark again...

      iters = PredictNumItersNeeded(i);
      assert(iters > i.iters &&
             "if we did more iterations than we want to do the next time, "
             "then we should have accepted the current iteration run.");
    }

    // Oh, one last thing, we need to also produce the 'memory measurements'..
    MemoryManager::Result memory_result;
    IterationCount memory_iterations = 0;
    if (memory_manager != nullptr) {
      // Only run a few iterations to reduce the impact of one-time
      // allocations in benchmarks that are not properly managed.
      memory_iterations = std::min<IterationCount>(
          16 / outer_repetitions + (16 % outer_repetitions != 0), iters);
      memory_manager->Start();
      std::unique_ptr<internal::ThreadManager> manager;
      manager.reset(new internal::ThreadManager(1));
      RunInThread(&b, memory_iterations, 0, manager.get(),
                  perf_counters_measurement_ptr);
      manager->WaitForAllThreads();
      manager.reset();

      memory_manager->Stop(&memory_result);
    }

    // Ok, now actualy report.
    BenchmarkReporter::Run report =
        CreateRunReport(b, i.results, memory_iterations, memory_result,
                        i.seconds, repetition_index);

    if (!report.error_occurred && b.complexity() != oNone)
      complexity_reports.push_back(report);

    run_results.non_aggregates.push_back(report);
  }
};

}  // end namespace

void RunBenchmark(const benchmark::internal::BenchmarkInstance& b,
                  const int outer_repetitions, const int inner_repetitions,
                  std::vector<BenchmarkReporter::Run>* complexity_reports,
                  RunResults* run_results) {
  internal::BenchmarkRunner r(b, outer_repetitions, inner_repetitions,
                              complexity_reports, run_results);
}

}  // end namespace internal

}  // end namespace benchmark
