/*
 * Copyright (C) 2014
 *
 * Brick Yang <printfxxx@163.com>
 *
 * This program is free software. You can redistribute it and/or
 * modify it as you like.
 */

/**
 * @file	compat.h
 * @brief	Header file for compatible kernel APIs implementation
 */

#ifndef _COMPAT_H_
#define _COMPAT_H_

#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/kconfig.h>

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 9)
#include <linux/mm.h>
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
#include <linux/bitfield.h>
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
#include <linux/atomic.h>
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
#include <linux/skbuff.h>
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)
#include <linux/fwnode.h>
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
#include <linux/of.h>
#include <linux/device.h>
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
#include <linux/of_mdio.h>
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) || \
    LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0)
#include <linux/netdevice.h>
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
#include <linux/phy.h>
#include <linux/acpi.h>
#endif

#define DEFINE_KSYM_PTR(x)	typeof(x) *ksym_##x
#define CALL_KSYM_PTR(x, ...)	ksym_##x(__VA_ARGS__)

int __init compat_init(void);

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
#define for_each_cpu(cpu, cpumask)	for_each_cpu_mask(cpu, (*cpumask))
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
#define ndo_start_xmit	hard_start_xmit
#define ndo_get_stats	get_stats

const struct net_device_stats *dev_get_stats(struct net_device *dev)
{
	const struct net_device_ops *ops = dev->netdev_ops;

	if (ops->ndo_get_stats)
		return ops->ndo_get_stats(dev);
	else
		return &dev->stats;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
typedef int netdev_tx_t;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
#ifndef NETIF_F_GRO
#define NETIF_F_GRO		0
#endif

#ifndef NETIF_F_RXCSUM
#define NETIF_F_RXCSUM		0
#endif

#ifndef NETIF_F_IPV6_CSUM
#define NETIF_F_IPV6_CSUM	0
#endif

#define SET_NETDEV_FEATURES(ndev, wants)	((ndev)->features = (wants))

static inline
void netdev_update_features(struct net_device *dev)
{
	netdev_features_change(dev);
}
#else
#define SET_NETDEV_FEATURES(ndev, wants)	((ndev)->wanted_features = (wants))
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
typedef u32 netdev_features_t;

static inline
bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
	return (netif_tx_queue_stopped(dev_queue) || netif_tx_queue_frozen(dev_queue));
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
struct netdev_notifier_info;
static inline
struct net_device *netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
{
	return (struct net_device *)info;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
#define raw_read_seqcount_begin(s)	read_seqcount_begin(s)
#define raw_write_seqcount_begin(s)	write_seqcount_begin(s)
#define raw_write_seqcount_end(s)	write_seqcount_end(s)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
#ifndef U64_MAX
#define U64_MAX		((u64)(~0ULL))
#endif

#ifndef S64_MAX
#define S64_MAX		((s64)(U64_MAX >> 1))
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
#ifdef CONFIG_ACPI
static inline
bool is_acpi_node(struct fwnode_handle *fwnode)
{
	return fwnode && fwnode->type == FWNODE_ACPI;
}
#else
static inline
bool is_acpi_node(struct fwnode_handle *fwnode)
{
	return false;
}
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 9)
static inline
bool page_is_pfmemalloc(struct page *page)
{
	return page->pfmemalloc;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
#define sock_create_kern(n, f, t, p, r)	\
	sock_create_kern(f, t, p, r)

#define to_of_node(n)	of_node(n)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
#define atomic_read_acquire(v)			smp_load_acquire(&(v)->counter)
#define atomic_set_release(v, i)		smp_store_release(&(v)->counter, (i))
#define atomic_cmpxchg_relaxed(v, o, n)		atomic_cmpxchg(v, o, n)
#define atomic_cmpxchg_acquire(v, o, n)		atomic_cmpxchg(v, o, n)
#define atomic_cmpxchg_release(v, o, n)		atomic_cmpxchg(v, o, n)
#define atomic_long_cmpxchg_relaxed(v, o, n)	atomic_long_cmpxchg(v, o, n)
#define atomic_long_cmpxchg_acquire(v, o, n)	atomic_long_cmpxchg(v, o, n)
#define atomic_long_cmpxchg_release(v, o, n)	atomic_long_cmpxchg(v, o, n)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
#define is_acpi_device_node(n)	is_acpi_node(n)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
#define to_acpi_device_node(n)	acpi_node(n)
#else
#define to_acpi_device_node(n)	to_acpi_node(n)
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
static inline
const char *phydev_name(const struct phy_device *phydev)
{
	return dev_name(&phydev->dev);
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
#define __bf_shf(x)	(__builtin_ffsll(x) - 1)

#define FIELD_PREP(_mask, _val)						\
	({								\
		((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask);	\
	})

#define FIELD_GET(_mask, _reg)						\
	({								\
		(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask));	\
	})
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
#ifdef CONFIG_ACPI
#define of_fwnode_handle(node)	(&(node)->fwnode)
#else
#define of_fwnode_handle(node)	NULL
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
static inline bool skb_unref(struct sk_buff *skb)
{
	if (unlikely(!skb))
		return false;
	if (likely(atomic_read(&skb->users) == 1))
		smp_rmb();
	else if (likely(!atomic_dec_and_test(&skb->users)))
		return false;

	return true;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
const char *phy_speed_to_str(int speed);
const char *phy_duplex_to_str(unsigned int duplex);
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
#ifdef CONFIG_ACPI
#define ACPI_HANDLE_FWNODE(fwnode)	\
	acpi_device_handle(to_acpi_device_node(fwnode))
#else
#define ACPI_HANDLE_FWNODE(fwnode)	NULL
#endif

int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
{
	__set_bit(nr, addr);
}

static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
{
	__clear_bit(nr, addr);
}

static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
{
	return test_bit(nr, addr);
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
static inline void linkmode_zero(unsigned long *dst)
{
	bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
{
	bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
				const unsigned long *b)
{
	bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
				const unsigned long *b)
{
	bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static inline bool linkmode_empty(const unsigned long *src)
{
	return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
				  const unsigned long *src2)
{
	return bitmap_andnot(dst, src1, src2,  __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
{
	u32 result = 0;

	if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
		result |= ADVERTISE_10HALF;
	if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
		result |= ADVERTISE_10FULL;
	if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
		result |= ADVERTISE_100HALF;
	if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
		result |= ADVERTISE_100FULL;
	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
		result |= ADVERTISE_PAUSE_CAP;
	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
		result |= ADVERTISE_PAUSE_ASYM;

	return result;
}
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
#define dev_change_flags(dev, flags, extack)	\
	dev_change_flags(dev, flags)

static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
				    int set)
{
	if (set)
		linkmode_set_bit(nr, addr);
	else
		linkmode_clear_bit(nr, addr);
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
#define atomic_long_read_acquire(v)	smp_load_acquire(&(v)->counter)
#define atomic_long_set_release(v, i)	smp_store_release(&(v)->counter, (i))
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
struct sk_buff *build_skb_around(struct sk_buff *skb,
				 void *data, unsigned int frag_size);

#define __arch_counter_get_cntvct()	arch_counter_get_cntvct()
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)
struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
		      const char *name,
		      unsigned int index);
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
__weak __alias(_dev_fwnode)
struct fwnode_handle *dev_fwnode(struct device *dev);
__weak __alias(_device_match_name)
int device_match_name(struct device *dev, const void *name);
__weak __alias(_device_match_fwnode)
int device_match_fwnode(struct device *dev, const void *fwnode);

static inline
struct fwnode_handle *_dev_fwnode(struct device *dev)
{
	return IS_ENABLED(CONFIG_OF) && dev->of_node ?
		&dev->of_node->fwnode : dev->fwnode;
}

static inline
int _device_match_name(struct device *dev, const void *name)
{
	return sysfs_streq(dev_name(dev), name);
}

static inline
int _device_match_fwnode(struct device *dev, const void *fwnode)
{
	return dev_fwnode(dev) == fwnode;
}

static inline
struct device *bus_find_device_by_fwnode(struct bus_type *bus,
					 const struct fwnode_handle *fwnode)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)
	return bus_find_device(bus, NULL, (void *)fwnode,
			(int (*)(struct device *, void *))device_match_fwnode);
#else
	return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
#endif
}
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
#if IS_ENABLED(CONFIG_OF_MDIO)
struct phy_device *of_phy_attach(struct net_device *dev,
				 struct device_node *phy_np,
				 u32 flags, phy_interface_t iface);
#else
static inline
struct phy_device *of_phy_attach(struct net_device *dev,
				 struct device_node *phy_np,
				 u32 flags, phy_interface_t iface)
{
	return NULL;
}
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
__weak __alias(_fwnode_phy_find_device)
struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
__weak __alias(_fwnode_get_phy_node)
struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode);
__weak __alias(_acpi_get_local_address)
int acpi_get_local_address(acpi_handle handle, u32 *addr);

#if IS_ENABLED(CONFIG_PHYLIB)
struct phy_device *__fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
struct fwnode_handle *__fwnode_get_phy_node(struct fwnode_handle *fwnode);

static inline
struct phy_device *_fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
{
	return __fwnode_phy_find_device(phy_fwnode);
}

static inline
struct fwnode_handle *_fwnode_get_phy_node(struct fwnode_handle *fwnode)
{
	return __fwnode_get_phy_node(fwnode);
}
#else
static inline
struct phy_device *_fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
{
	return NULL;
}

static inline
struct fwnode_handle *_fwnode_get_phy_node(struct fwnode_handle *fwnode)
{
	return NULL;
}
#endif

#ifdef CONFIG_ACPI
int __acpi_get_local_address(acpi_handle handle, u32 *addr);

static inline
int _acpi_get_local_address(acpi_handle handle, u32 *addr)
{
	return __acpi_get_local_address(handle, addr);
}
#else
static inline
int _acpi_get_local_address(acpi_handle handle, u32 *addr)
{
	 return -ENODEV;
}
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 14, 0)
#define txq_trans_update(dev, txq)	txq_trans_update(txq)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0)
#define netif_get_flags(dev)		dev_get_flags(dev)
#endif

#endif	/* _COMPAT_H_ */
