#ifndef SEMINIX_TCB_H
#define SEMINIX_TCB_H

#include <seminix/thread.h>
#include <seminix/sched.h>
#include <seminix/mm_types.h>
#include <seminix/restart_block.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <libseminix/types.h>

struct tcb;

struct copy_thread_param {
    struct tcb *new_tsk, *copy_tsk;
    unsigned long pc;
    unsigned long reg0;
    unsigned long stack;
    unsigned long flags;
    unsigned long __user *parent_tidptr;
    unsigned long __user *child_tidptr;
};

/* Task command name length: */
#define TASK_COMM_LEN			16

struct sighand_struct {
    seminix_sigaction_t action[SEMINIX_SIGNAL_MAX];
    spinlock_t  siglock;
    int signal;
};

struct tcb {
    struct thread_info  thread_info;

    char comm[TASK_COMM_LEN];
    int     tid;
    /* Per task flags (PF_*), defined further below: */
    unsigned int	flags;

    atomic_t   usage;
    /* Protection of the PI data structures: */
    raw_spinlock_t pi_lock;

    int state;  /* -1 unrunnable, 0 runnable, >0 stopped */

    int prio, mcprio, policy;
#ifdef CONFIG_SMP
    int     cpu;
    int     on_cpu;
#endif
    int on_rq;
    const struct sched_class *sched_class;
    struct sched_entity se;
    struct sched_rt_entity rt;
    struct sched_dl_entity dl;
    u64 nvcsw, nivcsw; /* context switch counts */

    struct mm_struct *mm;
    /* Per-thread vma caching: */
    struct vmacache	vmacache;

    void    *stack;
    /* A live task holds one reference: */
    atomic_t stack_refcount;

    struct sighand_struct   sighand;

    unsigned long __user *tid_address;

    struct cap_cnode     *cap_cnode;
    struct cap_rlimit    *cap_rlimit;
    seminix_ipc_buffer_t *ipc_buffer;
    unsigned long         user_ipc_buffer;

    struct restart_block restart_block;

    struct list_head    thread_node, tcb_node;

    /* CPU-specific state of this task: */
    struct thread_struct		thread;
};

/*
 * Per process flags
 */
#define PF_IDLE			    0x00000002	/* I am an IDLE thread */

static inline bool is_idle_task(const struct tcb *p)
{
    return !!(p->flags & PF_IDLE);
}

#define TASK_RUNNING			0x0000
#define TASK_INTERRUPTIBLE		0x0001
#define TASK_UNINTERRUPTIBLE    0x0002
#define TASK_STOPED	    	    0x0004  /* suspend */
#define TASK_DEAD               0x0008  /* crash */
#define TASK_KILL               0x0010  /* kill/signal */
#define TASK_EXIT               0x0020  /* exit */
#define TASK_NEW                0x0040  /* new task */

/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL        (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE | TASK_STOPED)
#define TASK_DIE           (TASK_DEAD | TASK_KILL | TASK_EXIT)

static inline int task_cpu(const struct tcb *p)
{
#ifdef CONFIG_SMP
    return p->cpu;
#else
    return 0;
#endif
}

#define STACK_END_MAGIC		0x57AC6E9D

static inline void *task_stack_page(const struct tcb *tsk)
{
    return tsk->stack;
}

static inline unsigned long *end_of_stack(const struct tcb *tsk)
{
    return tsk->stack;
}

extern void set_task_stack_end_magic(struct tcb *tsk);

static inline void *try_get_task_stack(struct tcb *tsk)
{
    return atomic_inc_not_zero(&tsk->stack_refcount) ?
        task_stack_page(tsk) : NULL;
}

extern void put_task_stack(struct tcb *tsk);

static inline int task_tid_nr(struct tcb *tsk)
{
    return tsk->tid;
}

extern struct tcb *tcb_struct_create(void);

#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)

extern void __put_task_struct(struct tcb *t);

static inline void put_task_struct(struct tcb *t)
{
    if (atomic_dec_and_test(&t->usage))
        __put_task_struct(t);
}

static inline struct thread_info *task_thread_info(struct tcb *task)
{
    return &task->thread_info;
}

/*
 * Set thread flags in other task's structures.
 * See asm/thread_info.h for TIF_xxxx flags available:
 */
static inline void set_tsk_thread_flag(struct tcb *tsk, int flag)
{
    set_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline void clear_tsk_thread_flag(struct tcb *tsk, int flag)
{
    clear_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline void update_tsk_thread_flag(struct tcb *tsk, int flag,
                      bool value)
{
    update_ti_thread_flag(task_thread_info(tsk), flag, value);
}

static inline int test_and_set_tsk_thread_flag(struct tcb *tsk, int flag)
{
    return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline int test_and_clear_tsk_thread_flag(struct tcb *tsk, int flag)
{
    return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline int test_tsk_thread_flag(struct tcb *tsk, int flag)
{
    return test_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline void set_tsk_need_resched(struct tcb *tsk)
{
    set_tsk_thread_flag(tsk, TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct tcb *tsk)
{
    clear_tsk_thread_flag(tsk, TIF_NEED_RESCHED);
}

static inline int test_tsk_need_resched(struct tcb *tsk)
{
    return unlikely(test_tsk_thread_flag(tsk, TIF_NEED_RESCHED));
}

static inline int tsk_is_polling(struct tcb *tsk)
{
    return test_tsk_thread_flag(tsk, TIF_POLLING_NRFLAG);
}

static __always_inline bool need_resched(void)
{
    return unlikely(tif_need_resched());
}

#define __set_task_state(tsk, state_value)		\
    do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
    smp_store_mb((tsk)->state, (state_value))
#define __set_current_state(state_value)				\
    current->state = (state_value)

#define set_current_state(state_value)					\
    smp_store_mb(current->state, (state_value))

#define NR_TCB_DEFAULT  1024

extern spinlock_t tcb_list_lock;
extern struct list_head tcb_list;

#define do_each_thread(t)   \
    spin_lock(&tcb_list_lock);  \
    list_for_each_entry(t, &tcb_list, tcb_node)

#define while_each_thread(t)    \
    spin_unlock(&tcb_list_lock)

#endif /* !SEMINIX_TCB_H */
