
/*
 * 自动有gsource 参考 https://hev.cc/848.html
 */

int qemu_init_main_loop(Error **errp)
{
    int ret;
    GSource *src;
    Error *local_error = NULL;

    init_clocks();

    ret = qemu_signal_init();

    qemu_aio_context = aio_context_new(&local_error);
    qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL);

    gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
	/* 获取事件源 */
    src = aio_get_g_source(qemu_aio_context);
	/* 添加事件源，第二个参数如果为NULL,则使用默认的context */
    g_source_attach(src, NULL);
    g_source_unref(src);
    src = iohandler_get_g_source();
    g_source_attach(src, NULL);
    g_source_unref(src);
    return 0;
}


GSource *iohandler_get_g_source(void)
{
    iohandler_init();
    return aio_get_g_source(iohandler_ctx);
}

static void iohandler_init(void)
{
    if (!iohandler_ctx) {
        iohandler_ctx = aio_context_new(&error_abort);
    }
}

GSource *aio_get_g_source(AioContext *ctx)
{
    g_source_ref(&ctx->source);
    return &ctx->source;
}



/* gsource的回调函数 */
static GSourceFuncs aio_source_funcs = {
    aio_ctx_prepare,
    aio_ctx_check,
    aio_ctx_dispatch,
    aio_ctx_finalize
};


AioContext *aio_context_new(Error **errp)
{
    int ret;
    AioContext *ctx;
    Error *local_err = NULL;

	/* 创建aio的事件源 */
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
    aio_context_setup(ctx, &local_err);

    ret = event_notifier_init(&ctx->notifier, false);
    g_source_set_can_recurse(&ctx->source, true);
    aio_set_event_notifier(ctx, &ctx->notifier,
                           false,
                           (EventNotifierHandler *)
                           event_notifier_dummy_cb);
    ctx->thread_pool = NULL;
    qemu_mutex_init(&ctx->bh_lock);
    rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

    ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);

    return ctx;
}

/* main_loop 中进行gsource的 prepare,query,check,disptah */
static int os_host_main_loop_wait(int64_t timeout)
{
    int ret;
    static int spin_counter;
    struct timespec ts1, ts2;
    int64_t timeout_delta;

    glib_pollfds_fill(&timeout);

    /* If the I/O thread is very busy or we are incorrectly busy waiting in
     * the I/O thread, this can lead to starvation of the BQL such that the
     * VCPU threads never run.  To make sure we can detect the later case,
     * print a message to the screen.  If we run into this condition, create
     * a fake timeout in order to give the VCPU threads a chance to run.
     */
    if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
        static bool notified;

        if (!notified && !qtest_driver()) {
            fprintf(stderr,
                    "main-loop: WARNING: I/O thread spun for %d iterations\n",
                    MAX_MAIN_LOOP_SPIN);
            notified = true;
        }

        timeout = SCALE_MS;
    }

    if (timeout) {
        spin_counter = 0;
        qemu_mutex_unlock_iothread();
    } else {
        spin_counter++;
    }

    inc_vm_count(MAIN_SELECT_ENTER);

    clock_gettime(CLOCK_MONOTONIC_RAW, &ts1);
    ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);

    inc_vm_count(MAIN_SELECT_EXIT);

    clock_gettime(CLOCK_MONOTONIC_RAW, &ts2);
    timeout_delta = ((ts2.tv_sec - ts1.tv_sec) * NSEC_PER_SEC + ts2.tv_nsec -
                              ts1.tv_nsec - timeout) / NSEC_PER_SEC;
    if (unlikely(timeout_delta > 1))
        DPRINTF("os_host_main_loop_wait select use %d seconds when monotonic raw\n",
                              (int)timeout_delta);

    if (timeout) {
        qemu_mutex_lock_iothread();
    }

    glib_pollfds_poll();
    return ret;
}


static void glib_pollfds_fill(int64_t *cur_timeout)
{
    GMainContext *context = g_main_context_default();
    int timeout = 0;
    int64_t timeout_ns;
    int n;

	/* prepare */
    g_main_context_prepare(context, &max_priority);

    glib_pollfds_idx = gpollfds->len;
    n = glib_n_poll_fds;
    do {
        GPollFD *pfds;
        glib_n_poll_fds = n;
        g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
        pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
		/* query */
        n = g_main_context_query(context, max_priority, &timeout, pfds,
                                 glib_n_poll_fds);
    } while (n != glib_n_poll_fds);

    if (timeout < 0) {
        timeout_ns = -1;
    } else {
        timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
    }

    *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
}

static void glib_pollfds_poll(void)
{
    GMainContext *context = g_main_context_default();
    GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);

	/* check */
    if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
		/* dispatch */
        g_main_context_dispatch(context);
    }
}

static gboolean
aio_ctx_prepare(GSource *source, gint    *timeout)
{
    AioContext *ctx = (AioContext *) source;

    atomic_or(&ctx->notify_me, 1);

    /* We assume there is no timeout already supplied */
    *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));

    if (aio_prepare(ctx)) {
        *timeout = 0;
    }

    return *timeout == 0;
}

static gboolean
aio_ctx_check(GSource *source)
{
    AioContext *ctx = (AioContext *) source;
    QEMUBH *bh;

    atomic_and(&ctx->notify_me, ~1);
    aio_notify_accept(ctx);

    for (bh = ctx->first_bh; bh; bh = bh->next) {
        if (!bh->deleted && bh->scheduled) {
            return true;
		}
    }

    return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
}

static gboolean
aio_ctx_dispatch(GSource     *source,
                 GSourceFunc  callback,
                 gpointer     user_data)
{
    AioContext *ctx = (AioContext *) source;

    aio_dispatch(ctx);
    return true;
}

