#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/smp.h>
#include <linux/page-flags.h>
#include <linux/backing-dev.h>
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
#include <linux/limits.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
#include <linux/sort.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/vmpressure.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/blkdev.h>
#include <linux/kidled.h>
#include <net/sock.h>
#include <net/tcp_memcontrol.h>
#include <linux/spinlock_types.h>

struct mem_cgroup_lru_info;
enum drt_count_t {
    DRT_0_50,
    DRT_50_100,
    DRT_100_200,
    DRT_200_500,
    DRT_500_1k,
    DRT_1k_5k,
    DRT_5k_10k,
    DRT_10k_100k,
    DRT_100k_INF,
    DRT_COUNT,
};
struct eventfd_ctx {
    struct kref kref;
    wait_queue_head_t wqh;
    /*
     * Every time that a write(2) is performed on an eventfd, the
     * value of the __u64 being written is added to "count" and a
     * wakeup is performed on "wqh". A read(2) will return the "count"
     * value to userspace, and will reset "count" to zero. The kernel
     * side eventfd_signal() also, adds to the "count" counter and
     * issue a wakeup.
     */
    __u64 count;
    unsigned int flags;
};
struct mem_cgroup_threshold {
	    struct eventfd_ctx *eventfd;
		u64 threshold;
};

/* For threshold */
struct mem_cgroup_threshold_ary {
	    /* An array index points to threshold just below or equal to usage. */
	    int current_threshold;
		/* Size of entries[] */
		unsigned int size;
		/* Array of thresholds */
		struct mem_cgroup_threshold entries[0];
};

struct mem_cgroup_thresholds {
	    /* Primary thresholds array */
	    struct mem_cgroup_threshold_ary *primary;
		    /*
			 *       * Spare threshold array.
			 *       * This is needed to make mem_cgroup_unregister_event() "never fail".
			 *       * It must be able to store at least primary->size - 1 entries.
			 *       */
		    struct mem_cgroup_threshold_ary *spare;
};

struct mem_cgroup_reclaim_iter {
    /*
	* last scanned hierarchy member. Valid only if last_dead_count
    * matches memcg->dead_count of the hierarchy root group.
    */
    struct mem_cgroup *last_visited;
    unsigned long last_dead_count;

    /* scan generation, increased every round-trip */
    unsigned int generation;
};
struct mem_cgroup_per_zone {
    struct lruvec       lruvec;
    unsigned long       lru_size[NR_LRU_LISTS];

    struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];

    struct rb_node      tree_node;  /* RB tree node */
    unsigned long long  usage_in_excess;/* Set to the value by which */
                        /* the soft limit is exceeded*/
    bool            on_tree;
    bool            writeback;  /* memcg kswapd reclaim writeback */
    bool            dirty;      /* memcg kswapd reclaim dirty */
    bool            congested;  /* memcg has many dirty pages */
                        /* backed by a congested BDI */
    struct mem_cgroup   *memcg;     /* Back pointer, we cannot */
                        /* use container_of    */

    unsigned long       pages_scanned;  /* since last reclaim */
    bool            all_unreclaimable;  /* All pages pinned */
};

struct mem_cgroup_per_node {
    struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};
struct mem_cgroup_lru_info {
    struct mem_cgroup_per_node *nodeinfo[0];
};
struct mem_cgroup {
    struct cgroup_subsys_state css;
    /*
    ┆* the counter to account for memory usage
    ┆*/
    struct res_counter res;

#ifdef CONFIG_MEM_DELAY
    /* Memory delay measurement domain */
    struct memdelay_domain *memdelay_domain;
#endif

    /* vmpressure notifications */
    struct vmpressure vmpressure;

    union {
        /*
        ┆* the counter to account for mem+swap usage.
        ┆*/
        struct res_counter memsw;

        /*
        ┆* rcu_freeing is used only when freeing struct mem_cgroup,
        ┆* so put it into a union to avoid wasting more memory.
        ┆* It must be disjoint from the css field.  It could be
        ┆* in a union with the res field, but res plays a much
        ┆* larger part in mem_cgroup life than memsw, and might
        ┆* be of interest, even at time of free, when debugging.
        ┆* So share rcu_head with the less interesting memsw.
        ┆*/
        struct rcu_head rcu_freeing;
        /*
        ┆* We also need some space for a worker in deferred freeing.
        ┆* By the time we call it, rcu_freeing is no longer in use.
        ┆*/
        struct work_struct work_freeing;
    };

    /*
    ┆* the counter to account for kernel memory usage.
    ┆*/
    struct res_counter kmem;
    /*
    ┆* Should the accounting and control be hierarchical, per subtree?
    ┆*/
    bool use_hierarchy;
    unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */

    int     oom_kill;
    bool        oom_lock;
    atomic_t    under_oom;
    atomic_t    oom_wakeups;

    atomic_t    refcnt;

    int swappiness;

    int priority;

    bool    oom_kill_all;
    bool    use_priority_oom;
    /* OOM-Killer disable */
    int     oom_kill_disable;

    /* set when res.limit == memsw.limit */
    bool        memsw_is_minimum;

    /* protect arrays of thresholds */
    struct mutex thresholds_lock;

    /* thresholds for memory usage. RCU-protected */
    struct mem_cgroup_thresholds thresholds;

    /* thresholds for mem+swap usage. RCU-protected */
    struct mem_cgroup_thresholds memsw_thresholds;

    /* For oom notifier event fd */
    struct list_head oom_notify;

#ifdef CONFIG_CGROUP_WRITEBACK
    struct list_head cgwb_list;
    struct wb_domain cgwb_domain;
#endif

    /*
    ┆* Should we move charges of a task when a task is moved into this
    ┆* mem_cgroup ? And what type of charges should we move ?
    ┆*/
    unsigned long   move_charge_at_immigrate;
    /*
    ┆* set > 0 if pages under this cgroup are moving to other cgroup.
    ┆*/
    atomic_t        moving_account;
    /* taken only while moving_account > 0 */
    spinlock_t      move_lock;
    struct task_struct  *move_lock_task;
    unsigned long       move_lock_flags;
    /*
    ┆* percpu counter.
    ┆*/
    struct mem_cgroup_stat_cpu __percpu *stat;
    spinlock_t pcp_counter_lock;

#ifdef CONFIG_CGROUP_WRITEBACK
    int dirty_ratio;
    int dirty_bg_ratio;
#endif
    atomic_t    wmark_ratio;
    atomic64_t  wmark_extra;
    atomic_t    force_empty_ctl;

    bool        kswapd_stop; /* Protected by kswapds_spinlock */
    struct mutex    kswapd_mutex;
    wait_queue_head_t   *kswapd_wait;

    atomic_t    dead_count;
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
    struct tcp_memcontrol tcp_mem;
#endif
#if defined(CONFIG_MEMCG_KMEM)
    /* analogous to slab_common's slab_caches list. per-memcg */
    struct list_head memcg_slab_caches;
    /* Not a spinlock, we can take a lot of time walking the list */
    struct mutex slab_caches_mutex;
    /* Index in the kmem_cache->memcg_params->memcg_caches array */
    int kmemcg_id;
#endif

    int last_scanned_node;
#if MAX_NUMNODES > 1
    nodemask_t  scan_nodes;
    atomic_t    numainfo_events;
    atomic_t    numainfo_updating;
#endif
    u64 direct_reclaim_time_count[DRT_COUNT];
    spinlock_t direct_reclaim_time_count_lock;

    u64 direct_reclaim_sched_time_histogram
        [DRSTH_COUNT][DRSTH_TYPE_COUNT];
    spinlock_t direct_reclaim_sched_time_histogram_lock;

#ifdef CONFIG_KIDLED
    struct rw_semaphore idle_stats_rwsem;
    unsigned long idle_scans;
    struct kidled_scan_period scan_period;
    int idle_stable_idx;
    struct idle_page_stats idle_stats[KIDLED_STATS_NR_TYPE];
#endif

    /*
    * Per cgroup active and inactive list, similar to the
    * per zone LRU lists.
    *
    * WARNING: This has to be the last element of the struct. Don't
    * add new fields after this point.
    */
    struct mem_cgroup_lru_info info;
};
