#include "threads/thread.h"
#include <debug.h>
#include <stddef.h>
#include <random.h>
#include <stdio.h>
#include <string.h>
#include "threads/flags.h"
#include "threads/interrupt.h"
#include "threads/intr-stubs.h"
#include "threads/palloc.h"
#include "threads/switch.h"
#include "threads/synch.h"
#include "threads/vaddr.h"

#ifdef USERPROG
#include "userprog/process.h"
#endif


fixed_t load_avg;
#define THREAD_MAGIC 0xcd6abf4b

static struct list ready_list;

static struct list all_list;

static struct thread *idle_thread;

static struct thread *initial_thread;

static struct lock tid_lock;

struct kernel_thread_frame {
    void *eip;
    thread_func *function;
    void *aux;
};

static long long idle_ticks;
static long long kernel_ticks;
static long long user_ticks;

#define TIME_SLICE 4
static unsigned thread_ticks;


bool thread_mlfqs;

static void kernel_thread(thread_func *, void *aux);

static void idle(void *aux UNUSED);

static struct thread *running_thread(void);

static struct thread *next_thread_to_run(void);

static void init_thread(struct thread *, const char *name, int priority);

static bool is_thread(struct thread *);

static void *alloc_frame(struct thread *, size_t size);

static void schedule(void);

void thread_schedule_tail(struct thread *prev);

static tid_t allocate_tid(void);

void
thread_init(void) {
    ASSERT(intr_get_level() == INTR_OFF);

    lock_init(&tid_lock);
    list_init(&ready_list);
    list_init(&all_list);

    initial_thread = running_thread();
    init_thread(initial_thread, "main", PRI_DEFAULT);
    initial_thread->status = THREAD_RUNNING;
    initial_thread->tid = allocate_tid();
}

void
thread_start(void) {
    struct semaphore idle_started;
    sema_init(&idle_started, 0);
    thread_create("idle", PRI_MIN, idle, &idle_started);
    load_avg = FP_CONST(0);

    intr_enable();

    sema_down(&idle_started);
}


void
thread_tick(void) {
    struct thread *t = thread_current();

    if (t == idle_thread)
        idle_ticks++;
#ifdef USERPROG
        else if (t->pagedir != NULL)
          user_ticks++;
#endif
    else
        kernel_ticks++;

    if (++thread_ticks >= TIME_SLICE)
        intr_yield_on_return();
}

void
thread_print_stats(void) {
    printf("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
           idle_ticks, kernel_ticks, user_ticks);
}

tid_t
thread_create(const char *name, int priority,
              thread_func *function, void *aux) {
    struct thread *t;
    struct kernel_thread_frame *kf;
    struct switch_entry_frame *ef;
    struct switch_threads_frame *sf;
    tid_t tid;
    enum intr_level old_level;

    ASSERT(function != NULL);

    t = palloc_get_page(PAL_ZERO);
    if (t == NULL)
        return TID_ERROR;

    init_thread(t, name, priority);
    tid = t->tid = allocate_tid();

    old_level = intr_disable();

    kf = alloc_frame(t, sizeof *kf);
    kf->eip = NULL;
    kf->function = function;
    kf->aux = aux;

    ef = alloc_frame(t, sizeof *ef);
    ef->eip = (void (*)(void)) kernel_thread;

    sf = alloc_frame(t, sizeof *sf);
    sf->eip = switch_entry;
    sf->ebp = 0;

    intr_set_level(old_level);

    thread_unblock(t);

    if (thread_current()->priority < priority) thread_yield();

    return tid;
}

void
thread_block(void) {
    ASSERT(!intr_context());
    ASSERT(intr_get_level() == INTR_OFF);

    thread_current()->status = THREAD_BLOCKED;
    schedule();
}

void
thread_unblock(struct thread *t) {
    enum intr_level old_level;

    ASSERT(is_thread(t));

    old_level = intr_disable();
    ASSERT(t->status == THREAD_BLOCKED);
    list_insert_ordered(&ready_list, &t->elem, (list_less_func * ) & thread_cmp_priority, NULL);
    t->status = THREAD_READY;
    intr_set_level(old_level);
}

const char *
thread_name(void) {
    return thread_current()->name;
}

struct thread *
thread_current(void) {
    struct thread *t = running_thread();

    ASSERT(is_thread(t));
    ASSERT(t->status == THREAD_RUNNING);

    return t;
}

tid_t
thread_tid(void) {
    return thread_current()->tid;
}

void
thread_exit(void) {
    ASSERT(!intr_context());

#ifdef USERPROG
    process_exit ();
#endif

    intr_disable();
    list_remove(&thread_current()->allelem);
    thread_current()->status = THREAD_DYING;
    schedule();
    NOT_REACHED();
}

void
thread_yield(void) {
    struct thread *cur = thread_current();
    enum intr_level old_level;

    ASSERT(!intr_context());

    old_level = intr_disable();
    if (cur != idle_thread)
        list_insert_ordered(&ready_list, &cur->elem, (list_less_func * ) & thread_cmp_priority, NULL);
    cur->status = THREAD_READY;
    schedule();
    intr_set_level(old_level);
}

void
thread_foreach(thread_action_func *func, void *aux) {
    struct list_elem *e;

    ASSERT(intr_get_level() == INTR_OFF);

    for (e = list_begin(&all_list); e != list_end(&all_list); e = list_next(e)) {
        struct thread *t = list_entry(e,
        struct thread, allelem);
        func(t, aux);
    }
}

void
thread_set_priority(int new_priority) {
    if (thread_mlfqs)
        return;

    enum intr_level old_level = intr_disable();

    struct thread *current_thread = thread_current();
    current_thread->base_priority = new_priority;

    if (list_empty(&current_thread->locks) || new_priority > current_thread->priority) {
        current_thread->priority = new_priority;
        thread_yield();
    }

    intr_set_level(old_level);
}

int
thread_get_priority(void) {
    return thread_current()->priority;
}

void
thread_set_nice(int nice) {
    thread_current()->nice = nice;
    thread_mlfqs_update_priority(thread_current());
    thread_yield();
}

int
thread_get_nice(void) {
    return thread_current()->nice;
}

int
thread_get_load_avg(void) {
    return FP_ROUND(FP_MULT_MIX(load_avg, 100));
}

int
thread_get_recent_cpu(void) {
    return FP_ROUND(FP_MULT_MIX(thread_current()->recent_cpu, 100));
}

static void
idle(void *idle_started_ UNUSED) {
    struct semaphore *idle_started = idle_started_;
    idle_thread = thread_current();
    sema_up(idle_started);

    for (;;) {
        intr_disable();
        thread_block();


        asm volatile ("sti; hlt" : : : "memory");
    }
}

static void
kernel_thread(thread_func *function, void *aux) {
    ASSERT(function != NULL);

    intr_enable();
    function(aux);
    thread_exit();
}

struct thread *
running_thread(void) {
    uint32_t *esp;

    asm ("mov %%esp, %0" : "=g" (esp));
    return pg_round_down(esp);
}

static bool
is_thread(struct thread *t) {
    return t != NULL && t->magic == THREAD_MAGIC;
}

static void
init_thread(struct thread *t, const char *name, int priority) {
    ASSERT(t != NULL);
    ASSERT(PRI_MIN <= priority && priority <= PRI_MAX);
    ASSERT(name != NULL);

    memset(t, 0, sizeof *t);
    t->status = THREAD_BLOCKED;
    strlcpy(t->name, name, sizeof t->name);
    t->stack = (uint8_t *) t + PGSIZE;
    t->priority = priority;
    t->magic = THREAD_MAGIC;
    t->ticks_blocked = 0;
    t->base_priority = priority;
    list_init(&t->locks);
    t->lock_waiting = NULL;
    t->nice = 0;
    t->recent_cpu = FP_CONST(0);

    list_insert_ordered(&all_list, &t->allelem, (list_less_func * ) & thread_cmp_priority, NULL);
}

static void *
alloc_frame(struct thread *t, size_t size) {
    ASSERT(is_thread(t));
    ASSERT(size % sizeof(uint32_t) == 0);

    t->stack -= size;
    return t->stack;
}


static struct thread *
next_thread_to_run(void) {
    if (list_empty(&ready_list))
        return idle_thread;
    else
        return list_entry(list_pop_front(&ready_list),
    struct thread, elem);
}


void
thread_schedule_tail(struct thread *prev) {
    struct thread *cur = running_thread();

    ASSERT(intr_get_level() == INTR_OFF);

    cur->status = THREAD_RUNNING;

    thread_ticks = 0;

#ifdef USERPROG
    process_activate ();
#endif


    if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) {
        ASSERT(prev != cur);
        palloc_free_page(prev);
    }
}


static void
schedule(void) {
    struct thread *cur = running_thread();
    struct thread *next = next_thread_to_run();
    struct thread *prev = NULL;

    ASSERT(intr_get_level() == INTR_OFF);
    ASSERT(cur->status != THREAD_RUNNING);
    ASSERT(is_thread(next));

    if (cur != next)
        prev = switch_threads(cur, next);
    thread_schedule_tail(prev);
}

static tid_t
allocate_tid(void) {
    static tid_t next_tid = 1;
    tid_t tid;

    lock_acquire(&tid_lock);
    tid = next_tid++;
    lock_release(&tid_lock);

    return tid;
}

uint32_t thread_stack_ofs = offsetof(
struct thread, stack);

// 1

void
blocked_thread_check(struct thread *t, void *aux UNUSED) {
    if (t->status == THREAD_BLOCKED && t->ticks_blocked > 0) {
        t->ticks_blocked--;
        if (t->ticks_blocked == 0) {
            thread_unblock(t);
        }
    }
}

// 2

bool
thread_cmp_priority(const struct list_elem *a, const struct list_elem *b, void *aux UNUSED) {
    return list_entry(a,
    struct thread, elem)->priority > list_entry(b,
    struct thread, elem)->priority;
}

bool
lock_cmp_priority(const struct list_elem *a, const struct list_elem *b, void *aux UNUSED) {
    return list_entry(a,
    struct lock, elem)->max_priority > list_entry(b,
    struct lock, elem)->max_priority;
}

void
thread_hold_the_lock(struct lock *lock) {
    enum intr_level old_level = intr_disable();
    list_insert_ordered(&thread_current()->locks, &lock->elem, lock_cmp_priority, NULL);

    if (lock->max_priority > thread_current()->priority) {
        thread_current()->priority = lock->max_priority;
        thread_yield();
    }

    intr_set_level(old_level);
}

void
thread_remove_lock(struct lock *lock) {
    enum intr_level old_level = intr_disable();
    list_remove(&lock->elem);
    thread_update_priority(thread_current());
    intr_set_level(old_level);
}

void
thread_donate_priority(struct thread *t) {
    enum intr_level old_level = intr_disable();
    thread_update_priority(t);

    if (t->status == THREAD_READY) {
        list_remove(&t->elem);
        list_insert_ordered(&ready_list, &t->elem, thread_cmp_priority, NULL);
    }
    intr_set_level(old_level);
}

void
thread_update_priority(struct thread *t) {
    enum intr_level old_level = intr_disable();
    int max_priority = t->base_priority;
    int lock_priority;

    if (!list_empty(&t->locks)) {
        list_sort(&t->locks, lock_cmp_priority, NULL);
        lock_priority = list_entry(list_front(&t->locks),
        struct lock, elem)->max_priority;
        if (lock_priority > max_priority)
            max_priority = lock_priority;
    }

    t->priority = max_priority;
    intr_set_level(old_level);
}

// 3

void
thread_mlfqs_increase_recent_cpu_by_one(void) {
    ASSERT(thread_mlfqs);
    ASSERT(intr_context());

    struct thread *current_thread = thread_current();
    if (current_thread == idle_thread)
        return;
    current_thread->recent_cpu = FP_ADD_MIX(current_thread->recent_cpu, 1);
}

void
thread_mlfqs_update_load_avg_and_recent_cpu(void) {
    ASSERT(thread_mlfqs);
    ASSERT(intr_context());

    size_t ready_threads = list_size(&ready_list);
    if (thread_current() != idle_thread)
        ready_threads++;
    load_avg = FP_ADD(FP_DIV_MIX(FP_MULT_MIX(load_avg, 59), 60), FP_DIV_MIX(FP_CONST(ready_threads), 60));

    struct thread *t;
    struct list_elem *e = list_begin(&all_list);
    for (; e != list_end(&all_list); e = list_next(e)) {
        t = list_entry(e,
        struct thread, allelem);
        if (t != idle_thread) {
            t->recent_cpu = FP_ADD_MIX(
                    FP_MULT(FP_DIV(FP_MULT_MIX(load_avg, 2), FP_ADD_MIX(FP_MULT_MIX(load_avg, 2), 1)), t->recent_cpu),
                    t->nice);
            thread_mlfqs_update_priority(t);
        }
    }
}

void
thread_mlfqs_update_priority(struct thread *t) {
    if (t == idle_thread)
        return;

    ASSERT(thread_mlfqs);
    ASSERT(t != idle_thread);

    t->priority = FP_INT_PART(FP_SUB_MIX(FP_SUB(FP_CONST(PRI_MAX), FP_DIV_MIX(t->recent_cpu, 4)), 2 * t->nice));
    t->priority = t->priority < PRI_MIN ? PRI_MIN : t->priority;
    t->priority = t->priority > PRI_MAX ? PRI_MAX : t->priority;
}