#ifndef __LINUX_SCHED_H
#define __LINUX_SCHED_H

#include "asm/ptrace.h"
#include "asm/thread_info.h"

#include "linux/wait.h"
#include "linux/fs.h"
#include "linux/list.h"
#include "linux/mm.h"
#include "linux/wait.h"
#include "linux/spinlock_types.h"
#include "linux/atomic.h"
#include "linux/slab.h"
#include "linux/percpu-defs.h"
#include "linux/thread_info.h"

typedef enum 
{
	running,
	ready,
	wait,
	sleeping,
}status;

typedef struct _heap_slab
{
	unsigned int heap_slab_start;
	unsigned int heap_slab_size;
	struct _heap_slab *heap_slab_next;
}heap_slab;

typedef unsigned long mm_segment_t;
struct task_struct
{
	struct thread_info thread_info;

	unsigned long sp;
	unsigned long sp_size;
	unsigned long sp_bottom;
	unsigned long current_max_sp;
 
	unsigned int flags;
	void *stack;

	int did_exec;

	int pid;
	char proc_name[16];
	unsigned int sleep_time;

	unsigned int state;

	struct mm_struct *mm;
	struct mm_struct *active_mm;

    volatile unsigned long need_resched;

    unsigned long counter;
    int lock_depth;
    unsigned long start_time;

    struct list_head run_list;

	unsigned int prio;

	unsigned long process_mem_size;
	unsigned long phyaddr;
	unsigned long viraddr;

	unsigned int authority;//锟斤拷锟斤拷权锟睫ｏ拷锟节核斤拷锟教★拷锟矫伙拷锟斤拷锟教ｏ拷

	int time_slice;
	int ticks;

	wait_queue_t wq;
	wait_queue_t wait_childexit;
	spinlock_t alloc_lock;
	raw_spinlock_t pi_lock;

	int p_recvfrom;
	int p_sendto;
	struct task_struct *q_sending;
	struct task_struct *next_sending;
	int has_int_msg;           /**
									* nonzero if an INTERRUPT occurred when
									* the task is not ready to deal with it.
									*/
	int nr_tty;
	
	struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;

	unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;

	struct task_struct 		*next;
	struct task_struct 		*prev;
	struct file 	*filp[NR_OPEN];
	struct inode	*pwd;
	struct inode 	*root;

    struct task_struct      *next_sleep_proc;
    struct task_struct      *prev_sleep_proc;
};

union thread_union 
{
	struct thread_info thread_info;
	unsigned long stack[THREAD_SIZE / sizeof(long)];
};

#define NR_OPEN_DEFAULT 32

#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
#define TASK_ZOMBIE		4
#define TASK_STOPPED		8

extern struct task_struct *current;
extern struct task_struct *old_task;

/* Maximum number of active map areas.. This is a random (large) number */
#define MAX_MAP_COUNT	(65536)

/*
 * Per process flags
 */
#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */
					/* Not implemented yet, only for 486*/
#define PF_STARTING	0x00000002	/* being created */
#define PF_EXITING	0x00000004	/* getting shut down */
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
#define PF_VFORK	0x00001000	/* Wake up parent in mm_release */
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */

#define PF_USEDFPU	0x00100000	/* task used FPU this quantum (SMP) */

/*
 * Ptrace flags
 */

#define PT_PTRACED	0x00000001
#define PT_TRACESYS	0x00000002
#define PT_DTRACE	0x00000004	/* delayed trace (used on m68k, i386) */


struct cfs_rq
{
	unsigned int nr_running;

	struct list_head tasks;
};

struct rt_rq
{
	unsigned int rt_nr_running;
};

struct dl_rq
{
	unsigned int dl_nr_running;
};

struct rq
{
	raw_spinlock_t lock;

	unsigned int nr_running;
	struct task_struct *curr;
	struct task_struct *idle;

	struct cfs_rq cfs;
	struct rt_rq rt;
	struct dl_rq dl;

	int cpu;

};

DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return p->thread_info.cpu;
}

#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
#define task_rq(p)		cpu_rq(task_cpu(p))

static inline int is_idle_task(const struct task_struct *p)
{
	//return p->pid == 0;
    return 0;
}


static inline int need_resched(void)
{
    return 0;
}

static inline void *task_stack_page(const struct task_struct *task)
{
	return task->stack;
}

static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return (struct thread_info *)(task->stack);
}


int do_execve(char *, char **, char **);
void wake_up_new_task(struct task_struct *p);
void wake_up_process(struct task_struct * p);
void sched_init(void);
void sched_clock_init(void);
struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);

#define switch_to(prev, next, last) \
do  \
{ \
	last = __switch_to(prev, task_thread_info(prev), task_thread_info(next));\
} while (0)


#define get_current() (current_thread_info()->task)
#define current get_current()

extern union thread_union init_thread_union;
extern struct task_struct init_task;
extern struct   mm_struct init_mm;


#endif
