/* Copyright 2020 UPMEM. All rights reserved.
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#include "dpu_memory.h"
#include "dpu_rank_handler.h"
#include <stdbool.h>
#include <stdint.h>
#include <string.h>

#include <dpu_error.h>
#include <dpu_types.h>
#include <verbose_control.h>
#include <dpu_attributes.h>
#include <dpu_management.h>
#include <dpu_config.h>
#include <dpu_internals.h>
#include <dpu_mask.h>
#include <dpu_rank.h>
#include <dpu_api_log.h>
#include <dpu_characteristics.h>
#include <profiling_internals.h>

dpu_error_t
dpu_thread_boot_safe_for_dpu(struct dpu_t *dpu, uint8_t thread, uint8_t *previous, bool resume)
{
    LOG_DPU(VERBOSE, dpu, "%d", thread);

    dpu_error_t status;
    struct dpu_rank_t *rank = dpu->rank;

    FF(RANK_FEATURE(rank, start_thread_dpu)(dpu, thread, resume, previous));

end:
    return status;
}

dpu_error_t
dpu_thread_boot_safe_for_rank(struct dpu_rank_t *rank, uint8_t thread, uint8_t *previous, bool resume)
{
    LOG_RANK(VERBOSE, rank, "%d", thread);

    dpu_error_t status;

    FF(RANK_FEATURE(rank, start_thread_rank)(rank, thread, resume, previous));

end:
    return status;
}

__API_SYMBOL__ dpu_run_context_t
dpu_get_run_context(struct dpu_rank_t *rank)
{
    return &rank->runtime.run_context;
}

__API_SYMBOL__ dpu_error_t
dpu_launch_thread_on_rank(struct dpu_rank_t *rank, dpu_thread_t thread, bool should_resume)
{
    /* lldb have issue breaking on dpu_launch_thread_on_rank. It seems that the debug information generated by gcc do not match
     * lldb expectation. This asm statement help gcc to produces some debug information that lldb understand. One day lldb should
     * be smart enough so that we could remove the statement... I hope
     * https://github.com/upmem/llvm-project/issues/39
     */
    __asm__("");

    LOG_RANK(VERBOSE, rank, "%d, %d", thread, should_resume);

    dpu_error_t status;

    verify_thread_id(thread, rank);

    dpu_lock_rank(rank);

    FF(dpu_poll_rank(rank));

    if (!should_resume) {
        switch (rank->profiling_context.enable_profiling) {
            default:
                break;
            case DPU_PROFILING_STATS:
                status = dpu_set_magic_profiling_for_dpu(rank->profiling_context.dpu);
                if (status != DPU_OK) {
                    dpu_unlock_rank(rank);
                    return status;
                }
                break;
            case DPU_PROFILING_SAMPLES:
                memset(rank->profiling_context.sample_stats,
                    0,
                    rank->description->hw.memories.iram_size * sizeof(*(rank->profiling_context.sample_stats)));
                break;
        }
    }

    if (should_resume) {
        FF(dpu_thread_boot_safe_for_rank(rank, thread, NULL, true));
    } else {
        FF(dpu_thread_boot_safe_for_rank(rank, thread, NULL, false));
    }

    uint32_t nb_dpu_running = 0;
    for (dpu_slice_id_t each_slice = 0; each_slice < rank->description->hw.topology.nr_of_control_interfaces; ++each_slice) {
        dpu_selected_mask_t mask_all = rank->runtime.control_interface.slice_info[each_slice].enabled_dpus;

        rank->runtime.run_context.dpu_running[each_slice] = mask_all;
        nb_dpu_running += dpu_mask_count(mask_all);
    }
    rank->runtime.run_context.nb_dpu_running = nb_dpu_running;

end:
    dpu_unlock_rank(rank);
    return status;
}

__API_SYMBOL__ dpu_error_t
dpu_launch_thread_on_dpu(struct dpu_t *dpu, dpu_thread_t thread, bool should_resume)
{
    /* lldb have issue breaking on dpu_launch_thread_on_rank. It seems that the debug information generated by gcc do not match
     * lldb expectation. This asm statement help gcc to produces some debug information that lldb understand. One day lldb should
     * be smart enough so that we could remove the statement... I hope
     * https://github.com/upmem/llvm-project/issues/39
     */
    __asm__("");

    LOG_DPU(VERBOSE, dpu, "%d, %d", thread, should_resume);

    if (!dpu->enabled) {
        return DPU_ERR_DPU_DISABLED;
    }

    struct dpu_rank_t *rank = dpu_get_rank(dpu);
    dpu_slice_id_t slice_id = dpu->slice_id;
    dpu_member_id_t member_id = dpu->dpu_id;
    verify_thread_id(thread, rank);

    dpu_error_t status;

    dpu_lock_rank(rank);

    bool dpu_is_running;
    bool dpu_is_in_fault;

    FF(dpu_poll_dpu(dpu, &dpu_is_running, &dpu_is_in_fault));

    if (!should_resume && (rank->profiling_context.dpu == dpu)) {
        switch (rank->profiling_context.enable_profiling) {
            default:
                break;
            case DPU_PROFILING_STATS:
                FF(dpu_set_magic_profiling_for_dpu(dpu));
                break;
            case DPU_PROFILING_SAMPLES:
                memset(rank->profiling_context.sample_stats,
                    0,
                    rank->description->hw.memories.iram_size * sizeof(*(rank->profiling_context.sample_stats)));
                break;
        }
    }

    dpu_selected_mask_t mask_one = dpu_mask_one(member_id);

    FF(dpu_thread_boot_safe_for_dpu(dpu, thread, NULL, should_resume));

    if (!dpu_is_running) {
        rank->runtime.run_context.dpu_running[slice_id] |= mask_one;
        rank->runtime.run_context.nb_dpu_running++;
    }

end:
    dpu_unlock_rank(rank);
    return status;
}

__API_SYMBOL__ dpu_error_t
dpu_poll_rank(struct dpu_rank_t *rank)
{
    LOG_RANK(VERBOSE, rank, "");

    dpu_error_t status;
    dpu_slice_id_t slice_id_profiling = dpu_get_slice_id(rank->profiling_context.dpu);
    dpu_member_id_t dpu_id_profiling = dpu_get_member_id(rank->profiling_context.dpu);
    uint8_t nr_threads = rank->description->hw.dpu.nr_of_threads;
    dpu_bitfield_t dpu_poll_running[DPU_MAX_NR_CIS];
    dpu_bitfield_t dpu_poll_in_fault[DPU_MAX_NR_CIS];
    dpu_run_context_t run_context = dpu_get_run_context(rank);

    dpu_lock_rank(rank);

    FF(RANK_FEATURE(rank, poll_rank)(rank, dpu_poll_running, dpu_poll_in_fault));

    switch (rank->profiling_context.enable_profiling) {
        default:
            break;
        case DPU_PROFILING_STATS: {
            uint32_t profiled_address[nr_threads];
            memset(profiled_address, 0, nr_threads * sizeof(uint32_t));

            struct dpu_t *dpu = DPU_GET_UNSAFE(rank, slice_id_profiling, dpu_id_profiling);
            FF(RANK_FEATURE(rank, copy_from_wrams_dpu)(
                dpu, profiled_address, (wram_addr_t)(rank->profiling_context.thread_profiling_address / 4), nr_threads));

            dpu_collect_statistics_profiling(rank->profiling_context.dpu, nr_threads, profiled_address);

            // If the dpu stopped, dump profiling statistics
            dpu_selected_mask_t mask_one = dpu_mask_one(dpu_id_profiling);
            bool dpu_is_not_running = ((dpu_poll_running[slice_id_profiling] & mask_one) == 0)
                || ((dpu_poll_in_fault[slice_id_profiling] & mask_one) != 0);
            bool dpu_was_running = run_context->dpu_running[slice_id_profiling] & mask_one;
            if (dpu_is_not_running && dpu_was_running) {
                dpu_dump_statistics_profiling(rank->profiling_context.dpu, nr_threads);
            }

            break;
        }
        case DPU_PROFILING_SAMPLES: {
            iram_addr_t sampled_address;

            struct dpu_t *dpu = DPU_GET_UNSAFE(rank, slice_id_profiling, dpu_id_profiling);
            FF(RANK_FEATURE(rank, sample_pc_dpu)(dpu, &sampled_address));

            dpu_collect_samples_profiling(rank->profiling_context.dpu, sampled_address);

            // If the dpu stopped, dump profiling statistics
            dpu_selected_mask_t mask_one = dpu_mask_one(dpu_id_profiling);
            bool dpu_is_not_running = ((dpu_poll_running[slice_id_profiling] & mask_one) == 0)
                || ((dpu_poll_in_fault[slice_id_profiling] & mask_one) != 0);
            bool dpu_was_running = run_context->dpu_running[slice_id_profiling] & mask_one;
            if (dpu_is_not_running && dpu_was_running) {
                dpu_dump_samples_profiling(rank->profiling_context.dpu);
            }

            break;
        }
        case DPU_PROFILING_SECTIONS: {
            dpuword_t perfcounter_end_value = 0;

            struct dpu_t *dpu = DPU_GET_UNSAFE(rank, slice_id_profiling, dpu_id_profiling);

            // If the dpu stopped, dump sections ratio
            dpu_selected_mask_t mask_one = dpu_mask_one(dpu_id_profiling);
            bool dpu_is_not_running = ((dpu_poll_running[slice_id_profiling] & mask_one) == 0)
                || ((dpu_poll_in_fault[slice_id_profiling] & mask_one) != 0);
            bool dpu_was_running = run_context->dpu_running[slice_id_profiling] & mask_one;
            if (dpu_is_not_running && dpu_was_running) {
                // Get program duration time
                FF(RANK_FEATURE(rank, copy_from_wrams_dpu)(dpu,
                    &perfcounter_end_value,
                    (wram_addr_t)(rank->profiling_context.perfcounter_end_value_address / sizeof(dpuword_t)),
                    1));

                unsigned int nr_symbols = rank->profiling_context.profiling_symbols->nr_symbols;
                dpuword_t section_info[sizeof(dpu_profiling_t) / sizeof(dpuword_t)];

                for (unsigned int each_symbol = 0; each_symbol < nr_symbols; ++each_symbol) {
                    const char *section_name = rank->profiling_context.profiling_symbols->map[each_symbol].name;

                    // Get section duration time
                    FF(RANK_FEATURE(rank, copy_from_wrams_dpu)(dpu,
                        section_info,
                        (wram_addr_t)(rank->profiling_context.profiling_symbols->map[each_symbol].value / sizeof(dpuword_t)),
                        rank->profiling_context.profiling_symbols->map[each_symbol].size / sizeof(dpuword_t)));

                    dpu_dump_section_profiling(dpu, nr_threads, section_name, section_info, perfcounter_end_value);
                }
            }

            break;
        }
    }

    uint32_t nb_dpu_running = 0;
    for (dpu_slice_id_t each_slice = 0; each_slice < rank->description->hw.topology.nr_of_control_interfaces; ++each_slice) {
        dpu_selected_mask_t mask_all = rank->runtime.control_interface.slice_info[each_slice].enabled_dpus;

        run_context->dpu_in_fault[each_slice] = dpu_poll_in_fault[each_slice] & mask_all;
        run_context->dpu_running[each_slice]
            = (dpu_poll_running[each_slice] & mask_all) & (~run_context->dpu_in_fault[each_slice]);
        nb_dpu_running += __builtin_popcount(run_context->dpu_running[each_slice]);
    }
    run_context->nb_dpu_running = nb_dpu_running;

end:
    dpu_unlock_rank(rank);
    return status;
}

__API_SYMBOL__ dpu_error_t
dpu_poll_dpu(struct dpu_t *dpu, bool *dpu_is_running, bool *dpu_is_in_fault)
{
    LOG_DPU(VERBOSE, dpu, "");

    if (!dpu->enabled) {
        return DPU_ERR_DPU_DISABLED;
    }

    dpu_error_t status;
    struct dpu_rank_t *rank = dpu_get_rank(dpu);
    dpu_slice_id_t slice_id = dpu->slice_id;
    dpu_member_id_t member_id = dpu->dpu_id;
    dpu_run_context_t run_context = dpu_get_run_context(rank);

    dpu_selected_mask_t mask_one = dpu_mask_one(member_id);

    dpu_lock_rank(rank);

    status = dpu_poll_rank(rank);

    *dpu_is_running = (run_context->dpu_running[slice_id] & mask_one) != 0;
    *dpu_is_in_fault = (run_context->dpu_in_fault[slice_id] & mask_one) != 0;

    dpu_unlock_rank(rank);
    return status;
}

__API_SYMBOL__ dpu_error_t
dpu_boot_dpu(struct dpu_t *dpu)
{
    LOG_DPU(VERBOSE, dpu, "");

    dpu_error_t status;

    if (!dpu->enabled) {
        status = DPU_ERR_DPU_DISABLED;
        goto end;
    }

    struct dpu_rank_t *rank = dpu_get_rank(dpu);
    dpu_lock_rank(rank);
    dpu_run_context_t run_context = dpu_get_run_context(rank);
    dpu_slice_id_t slice_id = dpu_get_slice_id(dpu);
    dpu_member_id_t member_id = dpu_get_member_id(dpu);

    if (dpu_mask_intersection(run_context->dpu_running[slice_id], dpu_mask_one(member_id)) != dpu_mask_empty()) {
        dpu_unlock_rank(rank);
        status = DPU_ERR_DPU_ALREADY_RUNNING;
        goto end;
    }

    if ((status = dpu_custom_for_dpu(dpu, DPU_COMMAND_DPU_PREEXECUTION, NULL)) != DPU_OK) {
        dpu_unlock_rank(rank);
        goto end;
    }

    if ((status = dpu_launch_thread_on_dpu(dpu, DPU_BOOT_THREAD, false)) != DPU_OK) {
        dpu_unlock_rank(rank);
        goto end;
    }

    dpu_unlock_rank(rank);
end:
    return status;
}

__API_SYMBOL__ dpu_error_t
dpu_boot_rank(struct dpu_rank_t *rank)
{
    LOG_RANK(VERBOSE, rank, "");

    dpu_error_t status;
    dpu_lock_rank(rank);
    dpu_run_context_t run_context = dpu_get_run_context(rank);

    if (run_context->nb_dpu_running != 0) {
        dpu_unlock_rank(rank);
        status = DPU_ERR_DPU_ALREADY_RUNNING;
        goto end;
    }

    if ((status = dpu_custom_for_rank(rank, DPU_COMMAND_ALL_PREEXECUTION, NULL)) != DPU_OK) {
        dpu_unlock_rank(rank);
        goto end;
    }

    if ((status = dpu_launch_thread_on_rank(rank, DPU_BOOT_THREAD, false)) != DPU_OK) {
        dpu_unlock_rank(rank);
        goto end;
    }

    dpu_unlock_rank(rank);
end:
    return status;
}

__API_SYMBOL__ dpu_error_t
dpu_status_rank(struct dpu_rank_t *rank, bool *done, bool *fault)
{
    dpu_lock_rank(rank);
    dpu_description_t description = dpu_get_description(rank);
    dpu_run_context_t run_context = dpu_get_run_context(rank);

    uint8_t nr_cis = description->hw.topology.nr_of_control_interfaces;
    dpu_bitfield_t empty_mask = dpu_mask_empty();

    *done = true;
    *fault = false;

    for (dpu_slice_id_t each_slice = 0; each_slice < nr_cis; ++each_slice) {
        *done = *done && (run_context->dpu_running[each_slice] == empty_mask);
        *fault = *fault || (run_context->dpu_in_fault[each_slice] != empty_mask);
    }

    dpu_unlock_rank(rank);
    return DPU_OK;
}

__API_SYMBOL__ dpu_error_t
dpu_status_dpu(struct dpu_t *dpu, bool *done, bool *fault)
{
    struct dpu_rank_t *rank = dpu_get_rank(dpu);

    if (!dpu->enabled) {
        return DPU_ERR_DPU_DISABLED;
    }

    dpu_lock_rank(rank);

    dpu_run_context_t run_context = dpu_get_run_context(rank);
    dpu_slice_id_t slice_id = dpu_get_slice_id(dpu);
    dpu_member_id_t member_id = dpu_get_member_id(dpu);

    *done = !dpu_mask_is_selected(run_context->dpu_running[slice_id], member_id);
    *fault = dpu_mask_is_selected(run_context->dpu_in_fault[slice_id], member_id);

    dpu_unlock_rank(rank);

    return DPU_OK;
}
