/*
 *  linux/init/main.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  GK 2/5/95  -  Changed to support mounting root fs via NFS
 *  Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
 *  Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
 *  Simplified starting of init:  Michael A. Griffith <grif@acm.org>
 */
#include <seminix/kernel.h>
#include <seminix/start_kernel.h>
#include <seminix/linkage.h>
#include <seminix/init.h>
#include <seminix/smp.h>
#include <seminix/param.h>
#include <seminix/irqflags.h>
#include <seminix/irq.h>
#include <seminix/slab.h>
#include <seminix/preempt.h>
#include <seminix/percpu.h>
#include <seminix/of.h>
#include <seminix/extable.h>
#include <seminix/sched/clock.h>
#include <seminix/sched/init.h>
#include <seminix/sched/task.h>
#include <seminix/sched/task_stack.h>
#include <seminix/mm.h>
#include <seminix/cpu.h>
#include <seminix/uts.h>
#include <seminix/stackprotector.h>
#include <seminix/ktime.h>
#include <seminix/hrtimer.h>
#include <seminix/sched.h>
#include <seminix/radix-tree.h>

phys_addr_t phys_initrd_start;
unsigned long phys_initrd_size;

enum system_states system_state;
/* Untouched command line saved by arch-specific code. */
char boot_command_line[COMMAND_LINE_SIZE];

static int __init do_early_param(char *param, char *val,
                    const char *unused, void *arg)
{
    const struct obs_kernel_param *p;

    for (p = __setup_start; p < __setup_end; p++) {
        if (parameq(param, p->str))
            if (p->setup_func(val) != 0)
                pr_warn("Malformed early option '%s'\n", param);
    }
    /* We accept everything at this stage. */
    return 0;
}

static void __init parse_early_options(char *cmdline)
{
    parse_args("early options", cmdline, NULL, do_early_param);
}

/* Arch code calls this early on, or if not, just before other parsing. */
void __init parse_early_param(void)
{
    static int done __initdata;
    static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;

    if (done)
        return;

    /* All fall through to do_early_param. */
    strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
    parse_early_options(tmp_cmdline);
    done = 1;
}

static int __init do_one_initcall(initcall_t fn)
{
    int count = preempt_count();
    char msgbuf[64];
    int ret = fn();

    msgbuf[0] = 0;

    if (preempt_count() != count) {
        sprintf(msgbuf, "preemption imbalance ");
        preempt_count_set(count);
    }
    if (irqs_disabled()) {
        strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
        local_irq_enable();
    }
    WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);

    return ret;
}

extern initcall_entry_t __initcall_start[];
extern initcall_entry_t __initcall0_start[];
extern initcall_entry_t __initcall1_start[];
extern initcall_entry_t __initcall2_start[];
extern initcall_entry_t __initcall3_start[];
extern initcall_entry_t __initcall4_start[];
extern initcall_entry_t __initcall5_start[];
extern initcall_entry_t __initcall6_start[];
extern initcall_entry_t __initcall7_start[];
extern initcall_entry_t __initcall_end[];

static initcall_entry_t *initcall_levels[] __initdata = {
    __initcall1_start,
    __initcall2_start,
    __initcall3_start,
    __initcall4_start,
    __initcall5_start,
    __initcall6_start,
    __initcall7_start,
    __initcall_end,
};

static void __init do_initcall_level(int level)
{
    initcall_entry_t *fn;

    for (fn = initcall_levels[level]; fn < initcall_levels[level + 1]; fn++)
        do_one_initcall(*fn);
}

static void __init do_pre_smp_initcalls(void)
{
    initcall_entry_t *fn;

    for (fn = __initcall_start; fn < __initcall1_start; fn++)
        do_one_initcall(*fn);
}

static void __init do_initcalls(void)
{
    int level;

    for (level = 0; level < (int)ARRAY_SIZE(initcall_levels) - 1; level++)
        do_initcall_level(level);
}

static __init void mm_init(void)
{
    free_area_init_nodes();
    mem_print_memory_info();
    kmem_cache_init();
}

static noinline void rest_init(void);

asmlinkage __visible void __init start_kernel(void)
{
    system_state = SYSTEM_BOOTING;

    set_task_stack_end_magic(&init_task);
    smp_setup_processor_id();

    local_irq_disable();

    /*
     * Interrupts are still disabled. Do necessary setups, then
     * enable them.
     */
    boot_cpu_init();
    pr_notice("%s", seminix_banner);
    setup_arch();

    boot_init_stack_canary();

    setup_nr_cpu_ids();
    setup_per_cpu_areas();
    smp_prepare_boot_cpu();

    pr_notice("Kernel command line: %s\n", boot_command_line);
    parse_early_param();
    sort_main_extable();

    mm_init();

    /*
     * Set up the scheduler prior starting any interrupts (such as the
     * timer interrupt). Full topology setup happens at smp_init()
     * time - but meanwhile we still have a functioning scheduler.
     */
    sched_init();

    call_function_init();

    /*
     * Disable preemption - early bootup scheduling is extremely
     * fragile until we cpu_idle() for the first time.
     */
    preempt_disable();
    if (WARN(!irqs_disabled(),
         "Interrupts were enabled *very* early, fixing it\n"))
        local_irq_disable();
    radix_tree_init();

    init_IRQ();
    time_init();
    hrtimers_init();

    WARN(!irqs_disabled(), "Interrupts were enabled early\n");
    local_irq_enable();

    sched_clock_init();
    system_tick_init();

    smp_prepare_cpus();
    do_pre_smp_initcalls();
    smp_init();
    sched_init_smp();

    do_initcalls();

    rest_init();
}

static noinline void rest_init(void)
{
    // struct task_struct *rootserver;

    // rootserver = rootserver_init();
    // if (!rootserver)
    //     panic("No working rootserver create.  Try passing rootserver= option to kernel.");

    free_initmem();
    mark_rodata_ro();

 //   wake_up_new_task(rootserver);

    system_state = SYSTEM_RUNNING;

    schedule_preempt_disabled();
    /* Call into cpu_idle with preempt disabled */
    cpu_startup_entry();
}
