/*
 * topological-hex, a program to compute combinatorial hexahedral meshes.
 *
 * Copyright (C) <2018> <Université catholique de Louvain (UCL), Belgique>
 *
 * List of the contributors to the development, description and complete
 * License: see LICENSE file.
 *
 * This program (topological-hex) is free software:
 * you can redistribute it and/or modify it under the terms
 * of the GNU General Public License as published by the Free
 * Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program (see COPYING file).  If not,
 * see <http://www.gnu.org/licenses/>.
 */

#include <stdatomic.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdio.h>
#include <stdarg.h>

#include <omp.h>
#include <pthread.h>
#include <time.h>

#include "parallel_dfs.h"
#include "client.h"
#include "queue.h"

#include "dfs.h"
#include "shellable_dfs.h"
#include "lower_bound.h"

typedef struct frontier {
  solver_pool solvers;
  uint32_t size, capacity;
} frontier;

/**
 * State used to explore a search tree in parallel.
 *
 * This structure can be used to perform a breadth-first search to subdivide an
 * initial problem into enough subproblems for all threads. This involves the
 * use of the @ref thread_state::frontier and @ref thread_state::next_frontier
 * fields. The necessary fields are initialized using @ref thread_init.
 *
 * Another use of this structure is for distributed clients, where an array of
 * solutions is also maintaned. The necessary fields are initialized by @ref
 * distributed_state_init.
 *
 * In a distributed search, after sending the initial set of subproblems, the
 * @ref thread_state::frontier and @ref thread_state::next_frontier fields
 * become invalid. Instead, the @ref communication_environment structure should
 * be used to check the list of subproblems to explore.
 */
typedef struct thread_state {
  /**
   * List of subproblems generated by this thread.
   */
  frontier frontier;

  /**
   * List of subproblems that this thread is currently building.
   */
  frontier next_frontier;

  /**
   * State of the random number generator used in conjuction with @ref
   * xorshift128.
   */
  uint32_t rand_state[4];

  /**
   * Indicates whether or not this thread has encountered an error.
   */
  error_code status;

  /**
   * Pointer to user data to use when calling the callback hook if a solution is
   * found.
   */
  void *cb_data;

  /**
   * Array of solutions, used for the distributed clients only.
   *
   * Each solution is represented using @ref thread_state::element_size
   * integers. The first element of each solution is the number n of hexahedra
   * in the solution. This number is followed by the 8n vertices. The remaining
   * integers are unused.
   *
   * Access to this array and related fields (@ref thread_state::size, @ref
   * thread_state::capacity, @ref thread_state::element_size) should be
   * synchronized using @ref thread_state::solution_lock.
   */
  uint8_t *solution_data;

  /**
   * Number of solutions stored in the solution array.
   */
  uint32_t size;

  /**
   * Amount of space allocated for @ref thread_state::solution_data, in number
   * of solutions.
   */
  uint32_t capacity;

  /**
   * Returns the number of integer found in each solution stored in @ref
   * thread_state::solution_data.
   */
  uint32_t element_size;

  /**
   * Pointer to an atomic counter for the number of solutions found so far.
   *
   * If this number is non-zero, increments the atomic number each time a
   * solution is found.
   */
  atomic_uint_fast32_t *solution_counter;

  /**
   * Lock used to synchronize accesses to the solution array.
   */
  pthread_mutex_t solution_lock;

  uint8_t flags;
  upper_bounds_table ub_table;
} thread_state;

/**
 * Cheap random number generation algorithm which can be used in parallel.
 *
 * @param state Pointer to 4 consecutive 32-bit integers used as the state of
 *   the random number generator.
 * @return A random 32-bit integer
 */
static uint32_t xorshift128(uint32_t *state) {
  uint32_t t = state[3];
  t ^= t << 11;
  t ^= t >> 8;
  state[3] = state[2]; state[2] = state[1]; state[1] = state[0];
  t ^= state[0];
  t ^= state[0] >> 19;
  state[0] = t;
  return t;
}

/**
 * Initializes internal data used by each thread for parallel exploration.
 *
 * Each call to this function should be matched with a call to @ref
 * thread_release.
 */
static void thread_init(thread_state *state, const solver *solver);

/**
 * Releases memory allocated using @ref thread_init.
 */
static void thread_release(thread_state *state);

static error_code frontier_init(frontier *frontier, const solver *solver);
static error_code frontier_alloc(frontier *frontier, uint32_t n,
                                 uint32_t num_vertices, uint32_t num_quads,
                                 uint32_t num_hexes, uint32_t num_symmetries,
                                 uint32_t num_boundary_vertices, uint8_t flags);
static void frontier_release(frontier *frontier);

static
error_code frontier_push(frontier *queue, solver **solver);

static
void frontier_pop(frontier *queue);

static
void frontier_get(frontier *queue, uint32_t i, solver **solver);

/**
 * Adds all children of a subproblem to the @p output array.
 *
 * @see expand_shellable_subproblem
 */
static
error_code expand_subproblem(solver *solver, frontier *output);

/**
 * Adds all children of a subproblem to the @p output array,
 * but restricts the search to children that can be added so that the solution
 * remains a shelling.
 *
 * @see expand_subproblem
 */
static
error_code expand_shellable_subproblem(solver *solver, frontier *output);

/**
 * Places the problem @p solver in the array of subproblems of @p state.
 */
static
void enqueue_initial_problem(thread_state *state, const solver *solver);

/**
 * Performs a parallel breadth-first traversal of the search tree until reaching
 * a level with @p target_num_problems subproblems.
 *
 * This is used in order to create enough work for all threads to explore in
 * parallel, without needing further communications.
 *
 * @param id Identifier of the current thread.
 * @param n Number of active threads.
 * @param target_num_problems Required number of subproblems to generate.
 * @param atomic_num_subproblems Atomic integer used to count the current number
 *   of subproblems. Should be initialized to 0.
 * @param [out] num_branches Number of branches executed by the algorithm
 */
static
void bfs_until(thread_state *state, uint32_t id, uint32_t n,
               uint32_t target_num_problems,
               atomic_uint_fast32_t *atomic_num_subproblems,
               size_t *num_branches);

/**
 * Replaces the nodes in the frontiers of all threads with their children. This
 * corresponds to one iteration of breadth-first traversal.
 *
 * @param id Identifier of the current thread.
 * @param n Number of active threads.
 */
static
void bfs_step(thread_state *states, uint32_t id, uint32_t n);

/**
 * Shuffles the array of subproblems in @p state. This helps divide the work
 * more evenly.
 */
static
void shuffle_subproblems(thread_state *state);

/**
 * Explores the subproblems in the frontier of each state in parallel.
 *
 * @param n Number of active threads.
 */
static
void explore_subproblems(thread_state *states, uint32_t n);

error_code solver_run_parallel(
  solver *solver, solver_callback cb, void *user_data) {
  solver->cb = cb;
  solver->user_data = user_data;

  if ((solver->flags & SOLVER_SHELLABLE_ONLY) == 0 && !propagate(solver))
    return SUCCESS;

  uint32_t num_threads = omp_get_max_threads();
  uint32_t target_num_problems = 2048 * num_threads * 1;

  thread_state *states = malloc(num_threads * sizeof(*states));
  if (!states)
    return OUT_OF_MEMORY;

  atomic_uint_fast32_t atomic_num_subproblems;
  atomic_init(&atomic_num_subproblems, 0);

  size_t num_branches = 0;

  #pragma omp parallel reduction(+: num_branches)
  {
    uint32_t id = omp_get_thread_num();
    uint32_t n = omp_get_num_threads();

    thread_init(&states[id], solver);

    #pragma omp single
    enqueue_initial_problem(&states[id], solver);

    #pragma omp barrier

    size_t added_branches;
    bfs_until(states, id, n, target_num_problems, &atomic_num_subproblems,
              &added_branches);
    shuffle_subproblems(&states[id]);

    #pragma omp barrier

    explore_subproblems(states, n);

    #pragma omp barrier

    num_branches += added_branches;

    for (size_t i = 0; i < states[id].frontier.size; i++) {
      num_branches +=
        solver_pool_get(&states[id].frontier.solvers, i)->num_branches;
    }

    thread_release(&states[id]);
  }

  solver->num_branches = num_branches;

  free(states);

  return SUCCESS;
}

static void thread_init(thread_state *state, const solver *solver) {
  state->status = SUCCESS;

  frontier_init(&state->frontier, solver);
  frontier_init(&state->next_frontier, solver);

  state->cb_data = solver->user_data;

  state->solution_counter = NULL;

  for (int i = 0; i < 4; i++) {
    do {
      state->rand_state[i] = rand();
    } while (state->rand_state[i] == 0);
  }

  state->flags = solver->flags;

  if (solver->flags & SOLVER_SHELLABLE_ONLY) {
    init_upper_bounds_table(&state->ub_table, solver->num_boundary_vertices - 2,
                            solver->num_vertices - solver->max_vertex_id);
  }
}

static void thread_release(thread_state *state) {
  if (state->flags & SOLVER_SHELLABLE_ONLY)
    release_upper_bounds_table(&state->ub_table);

  frontier_release(&state->frontier);
  frontier_release(&state->next_frontier);
}

static error_code frontier_init(frontier *frontier, const solver *solver) {
  frontier->size = 0;
  frontier->capacity = 8192;

  error_code status;
  if ((status = solver_pool_init(&frontier->solvers,
                                 solver, frontier->capacity)) != SUCCESS) {
    return status;
  }

  return SUCCESS;
}

static error_code frontier_alloc(frontier *frontier, uint32_t n,
                                 uint32_t num_vertices, uint32_t num_quads,
                                 uint32_t num_hexes, uint32_t num_symmetries,
                                 uint32_t num_boundary_vertices, uint8_t flags) {
  frontier->size = 0;
  frontier->capacity = n;

  error_code status;
  if ((status = solver_pool_alloc(&frontier->solvers,
                                  num_vertices, num_quads, num_hexes,
                                  num_symmetries, num_boundary_vertices,
                                  flags, frontier->capacity)) != SUCCESS) {
    return status;
  }

  return SUCCESS;
}

static void frontier_release(frontier *frontier) {
  solver_pool_release(&frontier->solvers);
}

static
error_code frontier_push(frontier *queue, solver **solver) {
  if (queue->size == queue->capacity) {
    uint32_t new_capa = (queue->capacity * 3) / 2;

    error_code status;
    if ((status = solver_pool_realloc(&queue->solvers, new_capa)) != SUCCESS)
      return status;

    queue->capacity = new_capa;
  }

  *solver = solver_pool_get(&queue->solvers, queue->size);
  queue->size++;

  return SUCCESS;
}

static
void frontier_pop(frontier *queue) {
  queue->size--;
}

static
void frontier_get(frontier *queue, uint32_t i, solver **solver) {
  *solver = solver_pool_get(&queue->solvers, i);
}

static
error_code expand_subproblem(solver *solver, frontier *output) {
  no_goods_delta delta;
  solver_start_branch(solver, &delta);

  solver->num_branches = 0;

  error_code status;

  vertex_set old_domain;
  quad_vertex_index vertex_id = choose_variable(solver, &old_domain);
  VERTEX_SET_FOREACH(old_domain, x) {
    struct solver *copy;
    if ((status = frontier_push(output, &copy)) != SUCCESS)
      return status;

    solver_copy(copy, solver);

    vertex_set new_domain;
    vertex_set_init_empty(&new_domain);
    vertex_set_insert(&new_domain, x);
    copy->domains[vertex_id] = new_domain;

    if (!propagate(copy)) frontier_pop(output);

    solver_is_no_good(solver, vertex_id, x, &delta);
  }

  return SUCCESS;
}

/**
 * Pushes a node to the output array, and backtrack to its parent state.
 */
static
error_code push_shellable_child(
  solver *solver, frontier *output,
  shellable_delta *delta, no_goods_delta *symmmetry_delta);

static
error_code expand_shellable_subproblem(solver *solver, frontier *output) {
  solver->hex_id++;
  solver->bound_mask = 0;

  solver->num_branches = 0;

  if (solver->hex_id == solver->num_hexes)
    return SUCCESS;

  shellable_delta delta;
  no_goods_delta symmetry_delta;

  solver_start_branch(solver, &symmetry_delta);

  quad_index boundary[6];
  if (allowed_finish_mesh(solver, boundary)) {
    finish_mesh(solver, boundary);

    solver->bound_mask = ~(uint32_t)0;
    if (!is_dominated(solver)) {
      solver->hex_id++;
      solver->cb(solver, solver->hex_id, solver->user_data);
      solver->hex_id--;
    }
    solver->bound_mask = 0;

    undo_finish_mesh(solver, boundary);
  }

  vertex_index v_min, h_min;
  uint32_t num_quads = 0;
  quad_index quads[solver->max_quad_id];
  for (uint32_t i = solver->quad_queue_offset; i < solver->quad_queue_size; i++) {
    quad_index q = solver->quad_queue[i];
    if (solver->num_occurrences[q] == 1) {
      quads[num_quads++] = q;
    }
  }

  component_lower_bound(solver, quads, num_quads, &v_min, &h_min);

  if (v_min > solver->num_vertices - solver->max_vertex_id ||
      h_min > solver->num_hexes - solver->hex_id ||
      look_up_upper_bound(solver, quads, num_quads))
    return SUCCESS;

  check_known_solutions(solver, quads, num_quads);

  error_code status = SUCCESS;

  for (uint32_t i = solver->quad_queue_offset;
       i < solver->quad_queue_size; i++) {
    quad_index q = solver->quad_queue[i];
    if (solver->num_occurrences[q] != 0) {
      if (allowed_flip_1_to_5(solver, q)) {
        flip_1_to_5(solver, q, &delta);
        if ((status = push_shellable_child(
               solver, output, &delta, &symmetry_delta)) != SUCCESS)
          return status;
      }

      if (allowed_flip_5_to_1(solver, q)) {
        flip_5_to_1(solver, q, &delta);
        if ((status = push_shellable_child(
               solver, output, &delta, &symmetry_delta)) != SUCCESS)
          return status;
      }

      for (quad_edge_index j = 0; j < 4; j++) {
        quad_edge edge = make_quad_edge(q, j);

        if (allowed_flip_2_to_4(solver, edge)) {
          flip_2_to_4(solver, edge, &delta);
          if ((status = push_shellable_child(
                 solver, output, &delta, &symmetry_delta)) != SUCCESS)
            return status;
        }

        if (allowed_flip_4_to_2(solver, edge)) {
          flip_4_to_2(solver, edge, &delta);
          if ((status = push_shellable_child(
                 solver, output, &delta, &symmetry_delta)) != SUCCESS)
            return status;
        }

        if (allowed_flip_3_line(solver, edge)) {
          flip_3_line(solver, edge, &delta);
          if ((status = push_shellable_child(
                 solver, output, &delta, &symmetry_delta)) != SUCCESS)
            return status;
        }

        if (allowed_flip_3_cycle(solver, edge)) {
          flip_3_cycle(solver, edge, &delta);
          if ((status = push_shellable_child(
                 solver, output, &delta, &symmetry_delta)) != SUCCESS)
            return status;
        }
      }
    }
  }

  return status;
}

static
error_code push_shellable_child(
  solver *solver, frontier *output,
  shellable_delta *delta, no_goods_delta *symmetry_delta) {
  error_code status;
  struct solver *copy;
  if ((status = frontier_push(output, &copy)) != SUCCESS)
      return status;

  solver_copy(copy, solver);

  undo_shellable_delta(solver, delta);

  copy->bound_mask = ~(uint32_t)0;
  if (is_dominated(copy))
    frontier_pop(output);
  else
    solver_hex_is_no_good(solver, symmetry_delta);

  return SUCCESS;
}

static
void enqueue_initial_problem(thread_state *state, const solver *solver) {
  error_code status;

  struct solver *copy;
  if ((status = frontier_push(&state->frontier, &copy)) != SUCCESS)
    state->status = status;
  else
    solver_copy(copy, solver);
}

static
void bfs_until(thread_state *states, uint32_t id, uint32_t n,
               uint32_t target_num_problems,
               atomic_uint_fast32_t *atomic_num_subproblems,
               size_t *num_branches) {
  if (num_branches) *num_branches = 0;
  uint32_t num_subproblems = 1;
  while (0 < num_subproblems && num_subproblems < target_num_problems) {
    if (num_branches)
      *num_branches += states[id].frontier.size;
    bfs_step(states, id, n);

    atomic_fetch_add_explicit(
      atomic_num_subproblems, states[id].next_frontier.size,
      memory_order_acq_rel);

    #pragma omp barrier

    num_subproblems = atomic_load_explicit(atomic_num_subproblems,
                                           memory_order_acquire);

    frontier tmp = states[id].frontier;
    states[id].frontier      = states[id].next_frontier;
    states[id].next_frontier = tmp;

    states[id].next_frontier.size = 0;

    #pragma omp barrier

    atomic_store_explicit(atomic_num_subproblems, 0, memory_order_release);

    #pragma omp barrier
  }
}

static
void bfs_step(thread_state *states, uint32_t id, uint32_t n) {
  for (uint32_t i = 0; i < n; i++) {
    #pragma omp for nowait
    for (uint32_t j = 0; j < states[i].frontier.size; j++) {
      struct solver *solver;
      frontier_get(&states[i].frontier, j, &solver);

      solver->user_data = states[id].cb_data;

      error_code status;
      if (solver->flags & SOLVER_SHELLABLE_ONLY) {
        /* solver->ub_data = &states[id].ub_table; */
        status = expand_shellable_subproblem(solver, &states[id].next_frontier);
      }
      else
        status = expand_subproblem(solver, &states[id].next_frontier);

      if (status != SUCCESS) {
        states[id].status = status;
      }
    }
  }
}

static
void shuffle_subproblems(thread_state *state) {
  solver *tmp = solver_pool_get(&state->next_frontier.solvers, 0);

  for (uint32_t i = 0; i + 1 < state->frontier.size; i++) {
    uint32_t j =
      xorshift128(state->rand_state) % (state->frontier.size - i);
    if (i == j) continue;

    struct solver *solver_a, *solver_b;
    frontier_get(&state->frontier, i, &solver_a);
    frontier_get(&state->frontier, j, &solver_b);

    solver_copy(tmp, solver_a);
    solver_copy(solver_a, solver_b);
    solver_copy(solver_b, tmp);
  }
}

static
void explore_subproblems(thread_state *states, uint32_t n) {
  /*
   * Use dynamic scheduling policy because of the very high variations in size
   * for the different subproblems.
   */
  for (uint32_t i = 0; i < n; i++) {
    #pragma omp for schedule(dynamic) nowait
    for (uint32_t j = 0; j < states[i].frontier.size; j++) {
      struct solver *solver;
      frontier_get(&states[i].frontier, j, &solver);
      if (solver->flags & SOLVER_SHELLABLE_ONLY) {
        /* solver->ub_data = &states[id].ub_table; */
        shellable_dfs(solver);
      }
      else
        choose_vertex(solver);
    }
  }
}

/**
 * Stores a solution in a @ref thread_state instance. This should be used as a
 * callback called when a @ref solver finds a solution.
 */
static void store_solution(const solver *solver, uint32_t num_hexes,
                           void *data) {
  thread_state *state = data;

  pthread_mutex_lock(&state->solution_lock);

  if (state->size == state->capacity) {
    uint32_t new_capa = state->capacity * 3 / 2;
    uint8_t *data = realloc(state->solution_data,
                            new_capa * state->element_size);
    if (!data)
      return;

    state->capacity = new_capa;
    state->solution_data = data;
  }

  uint32_t offset = state->element_size * state->size;
  state->solution_data[offset] = num_hexes;

  memcpy(state->solution_data + offset + 1,
         solver->hexes, 8 * num_hexes * sizeof(vertex_index));

  state->size++;
  if (state->solution_counter) {
    atomic_fetch_add_explicit(state->solution_counter, 1,
                              memory_order_acq_rel);
  }

  pthread_mutex_unlock(&state->solution_lock);
}

/**
 * Initializes all fields required for performing a parallel BFS and sending the
 * results to a server.
 *
 * Each call to this function should be matched with a call to @ref
 * distributed_state_release.
 */
static void distributed_state_init(thread_state *state,
                                   const solver *solver) {
  thread_init(state, solver);

  state->cb_data = state;

  state->element_size = (1 + 8 * UINT8_MAX);
  state->size = 0;
  state->capacity = 16;

  state->solution_data = malloc(state->capacity * state->element_size);
  if (!state->solution_data)
    state->status = OUT_OF_MEMORY;

  if (pthread_mutex_init(&state->solution_lock, NULL) != 0)
    state->status = SYSTEM_ERROR;
}

/**
 * Releases memory allocated by @ref distributed_state_init.
 */
static void distributed_state_release(thread_state *state) {
  pthread_mutex_destroy(&state->solution_lock);
  free(state->solution_data);
  thread_release(state);
}

/**
 * Uses a BFS to subdivide an initial problem, for the purpose of running a
 * distributed computation.
 *
 * @param [out] Maximum number of entries in the no_goods array, or NULL if this
 *   value should not be computed.
 */
static void subdivide_initial_problem(thread_state *states, solver *solver,
                                      uint32_t target_num_subproblems,
                                      size_t *max_no_goods_size);

/**
 * Sends all known solutions found so far and empties out the
 * solution array in each thread state.
 *
 * This function can be called even if some threads are currently exploring
 * subproblems and potentially finding solutions at the same time.
 *
 * @param client Connection to the server
 * @param states State of each thread
 * @param num_threads Number of active threads
 */
static error_code send_known_solutions(client *client, thread_state *states,
                                       uint32_t num_threads);

/**
 * Sends the list of subproblems generated after a call to @ref
 * subdivide_initial_problem.
 *
 * This function also uses @ref client_notify_finished, meaning that no more
 * subproblems can be sent after calling this function.
 */
static error_code send_initial_subproblems(client *client, thread_state *states,
                                           uint32_t num_threads);

/**
 * Main entry point for a distributed client. All clients, regardless of whether
 * or not they read the input file or not, should eventually enter this
 * function to start receiving subproblems from the server.
 *
 * @param chunk_size Determines how many subproblems the client should request
 *   when asking the server for more subproblems. Faster machines should use
 *   higher chunk sizes to hide communication latency.
 */
static error_code distributed_loop(client *client,
                                   client_log log, void *log_data,
                                   uint32_t chunk_size);

error_code solver_run_distributed(
  solver *solver, const char *server_name,
  client_log log, void *log_data,
  uint32_t chunk_size, uint32_t num_subproblems) {
  error_code status = SUCCESS;

  uint32_t num_threads = omp_get_max_threads();

  client client;
  if ((status = client_init(&client, server_name)) != SUCCESS)
    goto fail_init_client;

  thread_state *states = malloc(num_threads * sizeof(*states));
  if (!states) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_states;
  }

  size_t max_no_goods;
  subdivide_initial_problem(states, solver, num_subproblems, &max_no_goods);

  if ((status = client_send_layout(
         &client, solver->num_vertices, solver->num_quads,
         solver->num_hexes, solver->num_symmetries,
         solver->num_boundary_vertices,
         max_no_goods, solver->flags)) != SUCCESS) {
    log("Failed to send layout to the server\n", log_data);
    goto fail_send;
  }

  if ((status = send_known_solutions(&client, states, num_threads)) != SUCCESS)
    goto fail_send;

  if ((status = send_initial_subproblems(&client, states,
                                         num_threads)) != SUCCESS)
    goto fail_send;

  for (uint32_t i = 0; i < num_threads; i++)
    distributed_state_release(&states[i]);
  free(states);
  states = NULL;

  status = distributed_loop(&client, log, log_data, chunk_size);

fail_send:
  if (states) {
    for (uint32_t i = 0; i < num_threads; i++)
      distributed_state_release(&states[i]);
    free(states);
  }
fail_alloc_states:
  client_close(&client);
fail_init_client:
  return status;
}

error_code solver_run_distributed_client(const char *server_name,
                                         client_log log, void *log_data,
                                         uint32_t chunk_size) {
  error_code status;

  client client;
  if ((status = client_init(&client, server_name)) != SUCCESS)
    goto fail_init_client;

  status = distributed_loop(&client, log, log_data, chunk_size);

  client_close(&client);
fail_init_client:
  return status;
}

static void subdivide_initial_problem(thread_state *states, solver *solver,
                                      uint32_t target_num_subproblems,
                                      size_t *max_no_goods_size) {
  atomic_uint_fast32_t atomic_num_subproblems;
  atomic_init(&atomic_num_subproblems, 0);

  solver->cb = store_solution;

  #pragma omp parallel
  {
    uint32_t id = omp_get_thread_num();
    uint32_t n = omp_get_num_threads();

    distributed_state_init(&states[id], solver);

    #pragma omp single
    {
      solver->user_data = states[id].cb_data;
      if ((solver->flags & SOLVER_SHELLABLE_ONLY) || propagate(solver)) {
        enqueue_initial_problem(&states[id], solver);
      }
    }

    #pragma omp barrier

    bfs_until(states, id, n, target_num_subproblems, &atomic_num_subproblems,
              NULL);
    shuffle_subproblems(&states[id]);
  }

  if (max_no_goods_size) {
    size_t result = 0;

    #pragma omp parallel for reduction(max: result)
    for (size_t i = 0; i < (size_t)omp_get_max_threads(); i++) {
      for (size_t j = 0; j < states[i].frontier.size; j++) {
        struct solver *solver = solver_pool_get(&states[i].frontier.solvers, j);
        if (solver->no_goods_size > result)
          result = solver->no_goods_size;
      }
    }

    *max_no_goods_size = result;
  }
}

static error_code send_known_solutions(client *client, thread_state *states,
                                       uint32_t num_threads) {
  error_code status = SUCCESS;

  for (uint32_t i = 0; i < num_threads; i++) {
    if (states[i].size != 0) {
      uint32_t element_size = states[i].element_size;

      pthread_mutex_lock(&states[i].solution_lock);

      for (uint32_t j = 0; j < states[i].size; j++) {
        uint8_t *data = states[i].solution_data + element_size * j;
        if ((status = client_send_solution(client, data + 1,
                                           data[0])) != SUCCESS) {
          pthread_mutex_unlock(&states[i].solution_lock);
          fprintf(stderr, "Failed to send solution to the server!\n");
          return status;
        }
      }

      if (states[i].solution_counter) {
        atomic_fetch_sub_explicit(states[i].solution_counter, states[i].size,
                                  memory_order_acq_rel);
      }

      states[i].size = 0;

      pthread_mutex_unlock(&states[i].solution_lock);
    }
  }

  return status;
}

static error_code send_initial_subproblems(client *client, thread_state *states,
                                           uint32_t num_threads) {
  error_code status = SUCCESS;

  for (uint32_t i = 0; i < num_threads; i++) {
    if ((status = client_send_subproblems(
           client, 0, states[i].frontier.size,
           &states[i].frontier.solvers)) != SUCCESS) {
      fprintf(stderr, "Failed to send subproblems to the server\n");
      return status;
    }
  }

  if ((status = client_finish_send_subproblems(client)) != SUCCESS) {
    fprintf(stderr, "Failed to finish sending subproblems to the server\n");
    return status;
  }

  return status;
}

/**
 * Represents a group of subproblems to be explored as a whole.
 *
 * Chunks are used by the distributed client to avoid synchronization for every
 * single subproblem. Instead, the client only communicates to the server when
 * an entire chunk has been explored.
 */
typedef struct chunk {
  /**
   * Index of this chunk. This means that the index of its first subproblem is
   * obtained by multiplying this number with the number of threads and the
   * number of subproblems per thread, @ref
   * communication_environment::chunk_size.
   */
  uint32_t id;

  /**
   * Number of subproblems in this chunk.
   */
  uint32_t n;
} chunk;

/**
 * Contains all data needed for a client to receive and explore subproblems from
 * the server.
 */
typedef struct communication_environment {
  /**
   * Connection with the server.
   */
  client *client;

  client_log log;
  void *log_data;

  /**
   * Queue containing explored chunks.
   *
   * Upon getting such a subproblem, the client thread will request more work
   * from the server.
   */
  concurrent_queue finished_chunks;

  /**
   * Queue containing unexplored chunks.
   *
   * When a thread can't find any more work, it should pop an element from this
   * queue to get more. After popping a value, it should reset @ref
   * communication_environment::owners for the retrieved subproblems, and update
   * @ref communication_environment::num_available and @ref
   * communication_environment::num_unfinished to reflect the size of the chunk
   * it retrieved.
   */
  concurrent_queue available_chunks;

  /**
   * Number of active threads. This does not include the thread used for
   * communications purposes.
   */
  uint32_t num_threads;

  /**
   * State of each thread.
   *
   * The frontier array, normally used for the BFS, remains uninitialized here.
   */
  thread_state *states;

  /**
   * Number of solutions the client needs to send to the server.
   */
  atomic_uint_fast32_t num_solutions;

  /**
   * IDs of the subproblem sent by the server.
   *
   * When the exploration of a chunk terminates, the client needs to send the
   * IDs of the explored subproblems to the server.
   */
  uint32_t *ids;

  /**
   * List of all subproblems
   */
  frontier subproblems;

  /**
   * Maximum number of subproblems in a single chunk.
   */
  uint32_t chunk_size;

  chunk *chunks;

  /**
   * For each chunk, the number of supbroblems whose exploration has not yet
   * started.
   */
  atomic_uint_fast32_t *num_available;

  /**
   * For each chunk, the number of subproblems whose exploration has not yet
   * completed.
   */
  atomic_uint_fast32_t *num_unfinished;

  /**
   * Array containing, for each subproblem, the tag of the thread that is
   * currently exploring it.
   *
   * A tag of 0 indicates that no thread is exploring it.
   */
  atomic_uint_fast32_t *owners;

  /**
   * Tag of the thread which is currently accessing the @ref
   * communication_environment::available_chunks in order to find a new set of
   * subproblems to explore.
   *
   * A tag of 0 indicates that no thread is trying to retrieve a chunk from
   * the queue, suggesting
   */
  atomic_uint_fast32_t waiting_tag;

  /**
   * Whether or not the thread is allowed to send requests to the server.
   *
   * @see client_request
   */
  bool asleep;

  /**
   * Array of pending requests. This starts filling up when the client goes to
   * sleep.
   */
  chunk *pending_chunks;

  /**
   * Number of pending requests in @ref
   * communication_environment::pending_chunks.
   */
  uint32_t num_pending;

  /**
   * Number of subproblems received but the completion of which has not yet been
   * signaled to the server.
   */
  uint32_t num_exploring;

  /**
   * Lock used to check for new available chunks.
   */
  pthread_mutex_t chunk_lock;

  /**
   * Condition variable used to check for new available chunks.
   */
  pthread_cond_t chunk_cond;
} communication_environment;

/**
 * Initializes a communication environment.
 *
 * Each call to this function should be matched with a call to @ref
 * communication_environment_release.
 */
static error_code communication_environment_init(communication_environment *env,
                                                 client *client,
                                                 client_log log, void *log_data,
                                                 uint32_t chunk_size);

/**
 * Releases memory allocated by @ref communication_environment_release.
 */
static void communication_environment_release(communication_environment *env);

/**
 * Prints a string to the client.
 */
static void communication_environment_printf(
  communication_environment *env, const char *fmt, ...);

/**
 * Function executed by the communication thread.
 *
 * This is where all communication with the server occurs. The other threads are
 * tasked with the exploration of subproblems.
 */
static void *communication_loop(void *data);

/**
 * Gets the id of a chunk with a non-zero number of available subproblems.
 *
 * @param [out] id Id of the chunk that was found
 * @param tag Tag of the current thread
 *
 * @return True if a chunk was received, false if the connection was closed.
 */
static bool get_available_chunk_id(communication_environment *env, uint32_t *id,
                                   uint32_t tag);

/**
 * Waits until the server closes the connection or a chunk is received from
 * the server.
 *
 * @param [out] id Id of the chunk that was received
 * @param tag Tag of the current thread.
 *
 * @return True if a chunk was recieved
 */
static bool wait_for_chunk(communication_environment *env, uint32_t *id,
                           uint32_t tag);

/**
 * Have a thread explore a @ref chunk of subproblems.
 *
 * Multiple threads can explore the same chunk.
 */
static void explore_chunk(communication_environment *env,
                          uint32_t id, uint32_t tag);

static error_code distributed_loop(client *client,
                                   client_log log, void *log_data,
                                   uint32_t chunk_size) {
  error_code status;

  communication_environment env;
  if ((status = communication_environment_init(&env, client,
                                               log, log_data,
                                               chunk_size)) != SUCCESS)
    goto fail_init_env;

  pthread_t comm_thread;
  if (pthread_create(&comm_thread, NULL, communication_loop, &env))
    goto fail_create_thread;

  #pragma omp parallel
  {
    uint32_t thread_id = omp_get_thread_num();

    uint32_t tag = thread_id + 1;
    uint32_t chunk_id = 0;
    while (get_available_chunk_id(&env, &chunk_id, tag)) {
      explore_chunk(&env, chunk_id, tag);
    }
  }

  pthread_join(comm_thread, NULL);
fail_create_thread:
  communication_environment_release(&env);
fail_init_env:
  return SUCCESS;
}

/**
 * Returns true if there is no information to be sent to the server.
 */
static bool communication_should_be_idle(void *arg) {
  communication_environment *env = arg;
  return env->finished_chunks.size == 0 &&
    atomic_load_explicit(&env->num_solutions,
                         memory_order_acquire) == 0;
}

static error_code communication_environment_init(communication_environment *env,
                                                 client *client,
                                                 client_log log, void *log_data,
                                                 uint32_t chunk_size) {
  uint32_t num_vertices, num_quads, num_hexes, num_symmetries,
    num_boundary_vertices, max_no_goods_size;
  uint8_t flags;
  error_code status = SUCCESS;

  if ((status = client_receive_layout(client, &num_vertices,
                                      &num_quads, &num_hexes, &num_symmetries,
                                      &num_boundary_vertices,
                                      &max_no_goods_size,
                                      &flags)) != SUCCESS) {
    goto fail_receive_layout;
  }

  env->client = client;

  env->log = log;
  env->log_data = log_data;

  env->num_threads = omp_get_max_threads();

  if ((status = concurrent_queue_init(&env->finished_chunks, env->num_threads,
                                      sizeof(chunk))))
    goto fail_init_finished_queue;

  if ((status = concurrent_queue_init(&env->available_chunks, env->num_threads,
                                      sizeof(chunk))))
    goto fail_init_available_queue;

  env->states = malloc(env->num_threads * sizeof(*env->states));
  if (!env->states) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_states;
  }

  env->chunks = malloc(env->num_threads * sizeof(*env->chunks));
  if (!env->chunks) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_chunks;
  }

  env->num_available = malloc(env->num_threads * sizeof(*env->num_available));
  if (!env->num_available) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_num_available;
  }

  env->num_unfinished = malloc(env->num_threads * sizeof(*env->num_unfinished));
  if (!env->num_unfinished) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_num_unfinished;
  }

  env->pending_chunks = malloc(env->num_threads * sizeof(*env->pending_chunks));
  if (!env->pending_chunks) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_pending_chunks;
  }

  uint32_t alloced_i = 0;
  for (alloced_i = 0; alloced_i < env->num_threads; alloced_i++) {
    thread_state *state = &env->states[alloced_i];

    state->solution_counter = &env->num_solutions;

    state->element_size = (1 + 8 * UINT8_MAX);
    state->size = 0;
    state->capacity = 16;

    state->solution_data = malloc(state->capacity * state->element_size);

    if (pthread_mutex_init(&state->solution_lock, NULL) != 0) {
      free(state->solution_data);
      goto fail_init_state;
    }

    if (!state->solution_data) goto fail_init_state;
  }

  atomic_init(&env->num_solutions, 0);

  uint32_t num_problems = env->num_threads * chunk_size;
  env->ids = malloc(num_problems * sizeof(*env->ids));
  if (!env->ids) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_ids;
  }

  if ((status = frontier_alloc(&env->subproblems, num_problems,
                               num_vertices, num_quads, num_hexes,
                               num_symmetries, num_boundary_vertices,
                               flags))
      != SUCCESS) {
    goto fail_alloc_frontier;
  }

  env->owners = malloc(num_problems * sizeof(*env->owners));
  if (!env->owners) {
    status = OUT_OF_MEMORY;
    goto fail_alloc_owners;
  }

  for (uint32_t i = 0; i < num_problems; i++)
    atomic_init(&env->owners[i], 0);

  for (uint32_t i = 0; i < env->num_threads; i++) {
    atomic_init(&env->num_available[i], 0);
    atomic_init(&env->num_unfinished[i], 0);
  }

  atomic_init(&env->waiting_tag, 0);

  env->asleep      = false;
  env->num_pending = 0;

  env->num_exploring = 0;

  if (pthread_mutex_init(&env->chunk_lock, NULL) != 0)
    goto fail_init_chunk_lock;

  if (pthread_cond_init(&env->chunk_cond, NULL) != 0)
    goto fail_init_chunk_cond;

  env->chunk_size = chunk_size;

  return SUCCESS;

  pthread_cond_destroy(&env->chunk_cond);
fail_init_chunk_cond:
  pthread_mutex_destroy(&env->chunk_lock);
fail_init_chunk_lock:
  free(env->owners);
fail_alloc_owners:
  frontier_release(&env->subproblems);
fail_alloc_frontier:
  free(env->ids);
fail_alloc_ids:
fail_init_state:
  for (uint32_t j = 0; j < alloced_i; j++) {
    free(env->states[j].solution_data);
    pthread_mutex_destroy(&env->states[j].solution_lock);
  }
  free(env->pending_chunks);
fail_alloc_pending_chunks:
  free(env->num_unfinished);
fail_alloc_num_unfinished:
  free(env->num_available);
fail_alloc_num_available:
  free(env->chunks);
fail_alloc_chunks:
  free(env->states);
fail_alloc_states:
  concurrent_queue_release(&env->available_chunks);
fail_init_available_queue:
  concurrent_queue_release(&env->finished_chunks);
fail_init_finished_queue:
fail_receive_layout:
  return status;
}

static void communication_environment_release(communication_environment *env) {
  pthread_cond_destroy(&env->chunk_cond);
  pthread_mutex_destroy(&env->chunk_lock);

  free(env->owners);
  frontier_release(&env->subproblems);
  free(env->ids);

  for (uint32_t i = 0; i < env->num_threads; i++) {
    free(env->states[i].solution_data);
    pthread_mutex_destroy(&env->states[i].solution_lock);
  }

  free(env->pending_chunks);
  free(env->num_unfinished);
  free(env->num_available);
  free(env->chunks);
  free(env->states);

  concurrent_queue_release(&env->available_chunks);
  concurrent_queue_release(&env->finished_chunks);
}

static void communication_environment_printf(
  communication_environment *env, const char *fmt, ...) {
  char buffer[1024];
  va_list list;
  va_start(list, fmt);
  vsnprintf(buffer, sizeof(buffer), fmt, list);
  va_end(list);

  env->log(buffer, env->log_data);
}

/**
 * Requests subproblems from the server.
 *
 * If the client is asleep, adds the chunk to the list of pending requests.
 */
static error_code request_subproblems(communication_environment *env,
                                      chunk chunk);

static void *communication_loop(void *data) {
  communication_environment *env = data;

  for (uint32_t i = 0; i < env->num_threads; i++) {
    chunk chunk = {i, 0};
    if (request_subproblems(env, chunk) != SUCCESS)
      goto fail;
  }

  while (client_idle(env->client, &env->asleep,
                     communication_should_be_idle, env)) {
    while (!env->asleep && env->num_pending != 0) {
      if (request_subproblems(env, env->pending_chunks[env->num_pending-1]) !=
          SUCCESS) {
        goto fail;
      }

      env->num_pending--;
    }

    if (atomic_load_explicit(&env->num_solutions, memory_order_acquire) != 0) {
      if (send_known_solutions(env->client, env->states,
                               env->num_threads) != SUCCESS) {
        goto fail;
      }
    }

    chunk finished;

    if (concurrent_queue_try_pop(&env->finished_chunks, &finished)) {
      if (atomic_load_explicit(&env->num_solutions,
                               memory_order_acquire) != 0) {
        if (send_known_solutions(env->client, env->states,
                                 env->num_threads) != SUCCESS) {
          goto fail;
        }
      }

      uint32_t offset = finished.id * env->chunk_size;
      for (uint32_t i = 0; i < finished.n; i++) {
        if (client_notify_finished(env->client, env->ids[offset + i]) !=
            SUCCESS) {
          communication_environment_printf(
            env, "Failed to notify that buffer %u was finished\n",
            env->ids[offset + i]);
          goto fail;
        }
      }

      env->num_exploring -= finished.n;

      communication_environment_printf(
        env, "Notified server about %u finished subproblems (num left: %u)\n",
        finished.n, env->num_exploring);

      if (request_subproblems(env, finished) != SUCCESS)
        goto fail;
    }
  }

fail:
  concurrent_queue_close(&env->available_chunks);
  return NULL;
}

static error_code request_subproblems(communication_environment *env,
                                      chunk chunk) {
  if (env->asleep)
    env->pending_chunks[env->num_pending++] = chunk;
  else {
    communication_environment_printf(env, "Requesting %u subproblems\n",
                                     env->chunk_size);

    uint32_t offset = chunk.id * env->chunk_size;

    if (client_request(env->client, offset, env->chunk_size,
                       &env->subproblems.solvers,
                       env->ids, &chunk.n) != SUCCESS) {
      communication_environment_printf(env, "Failed to receive subproblems\n");
      return PROTOCOL_ERROR;
    }

    communication_environment_printf(
      env, "Received %u subproblems (num left: %u)\n", chunk.n,
      env->num_exploring + chunk.n);

    if (chunk.n == 0) {
      communication_environment_printf(
        env, "Server is empty, going to sleep until more work "
        "is available\n");
      env->asleep = true;
    }

    env->num_exploring += chunk.n;

    concurrent_queue_push(&env->available_chunks, &chunk);
  }

  return SUCCESS;
}

/**
 * Returns the id of a chunk with available supbroblems, or NUM_CHUNKS if no
 * such chunk can be found.
 */
static uint32_t find_available_chunk_id(
  communication_environment *env, uint32_t id);

static bool get_available_chunk_id(communication_environment *env, uint32_t *id,
                                   uint32_t tag) {
  uint32_t new_id = find_available_chunk_id(env, *id);
  if (new_id == env->num_threads)
    return wait_for_chunk(env, id, tag);
  else {
    *id = new_id;
    return true;
  }
}

/**
 * Retrieves a chunk from the queue of available chunks.
 *
 * This should be called by only one thread at a time. The tag of this thread is
 * stored in @ref communication_environment::waiting_tag to maintain this
 * constraint.
 */
static bool pop_chunk(communication_environment *env, uint32_t *id,
                      uint32_t tag);

static bool wait_for_chunk(communication_environment *env, uint32_t *id,
                           uint32_t tag) {
  bool result = true;

  pthread_mutex_lock(&env->chunk_lock);
  while (1) {
    bool is_closed = env->available_chunks.closed;
    uint32_t new_id = find_available_chunk_id(env, *id);

    uint_fast32_t expected = 0;

    if (is_closed && new_id == env->num_threads) {
      result = false;
      break;
    }
    else if (new_id != env->num_threads) {
      *id = new_id;
      break;
    }
    else if (atomic_compare_exchange_strong_explicit(
               &env->waiting_tag, &expected, tag,
               memory_order_acq_rel, memory_order_acquire)) {
      pthread_mutex_unlock(&env->chunk_lock);
      return pop_chunk(env, id, tag);
    }

    pthread_cond_wait(&env->chunk_cond, &env->chunk_lock);
  }
  pthread_mutex_unlock(&env->chunk_lock);

  return result;
}

static bool pop_chunk(communication_environment *env, uint32_t *id,
                      uint32_t tag) {
  chunk chunk;

  bool result;
  while ((result = concurrent_queue_pop(&env->available_chunks, &chunk)) &&
         chunk.n == 0) {
    concurrent_queue_push(&env->finished_chunks, &chunk);
  }

  if (result) {
    pthread_mutex_lock(&env->chunk_lock);
    *id = chunk.id;

    env->chunks[chunk.id].id = chunk.id;
    env->chunks[chunk.id].n  = chunk.n;

    uint32_t offset = chunk.id * env->chunk_size;
    for (uint32_t i = 0; i < chunk.n; i++) {
      atomic_store_explicit(&env->owners[offset + i], 0,
                            memory_order_release);
    }

    atomic_store_explicit(&env->num_unfinished[chunk.id], chunk.n,
                          memory_order_release);
    atomic_store_explicit(&env->num_available[chunk.id], chunk.n,
                          memory_order_release);
    pthread_mutex_unlock(&env->chunk_lock);
  }
  pthread_cond_broadcast(&env->chunk_cond);

  atomic_store_explicit(&env->waiting_tag, 0, memory_order_release);
  return result;
}

static void explore_chunk(communication_environment *env,
                          uint32_t id, uint32_t tag) {
  uint32_t offset = env->chunks[id].id * env->chunk_size;
  uint32_t n = env->chunks[id].n;
  for (uint32_t j = 0; j < n; j++) {
    uint_fast32_t expected = 0;
    if (atomic_compare_exchange_strong_explicit(
          &env->owners[offset + j], &expected, tag,
          memory_order_acq_rel, memory_order_acquire)) {
      atomic_fetch_sub_explicit(&env->num_available[id], 1,
                                memory_order_acq_rel);

      solver *solver;
      frontier_get(&env->subproblems, offset + j, &solver);

      solver->cb = store_solution;
      solver->user_data = &env->states[tag-1];

      if (solver->flags & SOLVER_SHELLABLE_ONLY)
        shellable_dfs(solver);
      else
        choose_vertex(solver);

      uint32_t n = atomic_fetch_sub_explicit(&env->num_unfinished[id], 1,
                                             memory_order_acq_rel);
      if (n == 1) {
        concurrent_queue_push(&env->finished_chunks, &env->chunks[id]);
      }
    }
  }
}

static uint32_t find_available_chunk_id(communication_environment *env,
                                        uint32_t id) {
  uint32_t start = id;
  id++;
  if (id == env->num_threads) id = 0;

  uint32_t end = (start + env->num_threads - 1) % env->num_threads;

  while (id != end) {
    if (atomic_load_explicit(&env->num_available[id],
                             memory_order_acquire) != 0)
      return id;

    id++;
    if (id == env->num_threads) id = 0;
  }

  return env->num_threads;
}
