/*
 * =====================================================================================
 *       Filename:  _kvl_atomic.h
 *
 *    Description:  原子操作
 *
 *         Author:  Kevin Liang , ( Liang Bing ) (http://changed-wind.blogbus.com), kevinliang@tencent.com
 *        Company:  Tencent.com Inc.
 *
 * =====================================================================================
 */
 
 /**********************************************************************************************/
 //changed by kevenzhu  2010-06-18
 //make it as an common infrastruct

#ifndef  _QQ_ATOMIC_H
#define  _QQ_ATOMIC_H


#include	"linux/types.h"

#ifdef _SMP_
#define LOCK_PREFIX "lock "
#else
#define LOCK_PREFIX ""
#endif

typedef int bool;

/*define the atomic counter*/
typedef struct{
#if defined  __X86_32__
    volatile uint32_t counter;
#else        //__X86_64__
    volatile uint64_t counter;
#endif
}atomic_st;


#if defined  __X86_32__
typedef     volatile uint32_t atomic_t;
#else       //__X86_64__
typedef     volatile uint64_t atomic_t;
#endif


#define atomic_read(v)  ((v))
#define atomic_set(v,i) ((v) = (i))



/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  atomic_add
 *  Description:  原子加，参数为数据指针
 * =====================================================================================
 */
static inline void _atomic_add(atomic_t *v, int i )
{
        __asm__ __volatile__ (
        LOCK_PREFIX "addl %1,%0"
        : "+m" (*v)
        : "ir" (i));
}


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  atomic_sub
 *  Description:  原子减，参数为数据指针
 * =====================================================================================
 */
static inline void _atomic_sub(atomic_t *v, int i )
{
    __asm__ __volatile__ (
        LOCK_PREFIX "subl %1,%0"
        : "+m" (*v)
        : "ir" (i));
}

/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  atomic_inc
 *  Description:  原子自增,参数为数据指针
 * =====================================================================================
 */
static __inline__ void _atomic_inc(atomic_t *inc )
{
    __asm__ __volatile__(
        LOCK_PREFIX "incl %0"
        :"+m"(*inc)
    );
}

/*
 * ===  FUNCTION  ======================================================================
 *         Name:  atomic_dec
 *  Description:  原子自减，参数为数据指针
 * =====================================================================================
 */
static __inline__ void _atomic_dec(atomic_t *dec)
{
    __asm__ __volatile__(
        LOCK_PREFIX "decl %0"
        :"+m"(*dec)
    );
}

/*for the 32bits platform*/
#if defined __X86_32__


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  __add_and_return
 *  Description:  32位系统下，486以上，增加然后返回
 * =====================================================================================
 */
static inline int _atomic_add_return(int i, atomic_t *v ){
    
    int __i = i;
    __asm__ __volatile__(
        LOCK_PREFIX "xaddl %0, %1"
        : "+r" (i), "+m" (*v)
        : : "memory");
    return i + __i;
}

/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  __set_64bit
 *  Description:  32位系统下，64位数据的原子赋值核心函数
 * =====================================================================================
 */
static inline void _set_64bit(unsigned long long *ptr,
                                unsigned int low,
                                unsigned int high){
    __asm__ __volatile__(
        "\n1:\t"
        "movl (%0), %%eax\n\t"
        "movl 4(%0), %%edx\n\t"
        LOCK_PREFIX "cmpxchg8b (%0)\n\t"
        "jnz 1b"
        : /*  no outputs */
        : "D"(ptr),
        "b"(low),
        "c"(high)
        : "eax", "edx", "memory");
}

static inline void _set_64bit_constant(unsigned long long *ptr,
                                        unsigned long long value){

    _set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
}

#define ll_low(x)   *(((unsigned int *)&(x)) + 0)
#define ll_high(x)  *(((unsigned int *)&(x)) + 1)

static inline void _set_64bit_var( unsigned long long *ptr,
                                    unsigned long long value){

    _set_64bit(ptr, ll_low(value), ll_high(value));
}



/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  CAS_32bit
 *  Description:  如果old和ptr相等，则new_one值被赋予ptr所指向地址
 * =====================================================================================
 */
static __inline__ uint32_t _CAS(volatile void *ptr, 
                                unsigned long old, 
                                unsigned long new_one, 
                                int size ){

    bool ret = 0;
    switch (size) {
    
    case 1 : {  
        __asm__ __volatile__( 
        
            LOCK_PREFIX "cmpxchgb %b1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "q"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");
            
        return ret;
    }
    
    case 2 : {
        __asm__ __volatile__( 
        
            LOCK_PREFIX "cmpxchgw %w1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "r"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");
            
        return ret;
    }
    
    case 4 : { 
            
        __asm__ __volatile__( 
        
            LOCK_PREFIX "cmpxchgl %1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "r"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");
            
        return ret;
    }
    }
    return ret;
}


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  CAS2_32bit
 *  Description:  如果old1和old2分别等于ptr所指向的地址的第一个四字节和第二个字节，则分别
 *                把new1和new2赋予这两个四字节
 * =====================================================================================
 */
static __inline__ bool _CAS2 (volatile void * ptr, 
                                unsigned long old1, 
                                unsigned long old2, 
                                unsigned long new1, 
                                unsigned long new2){
    bool ret = 0;
    
    __asm__ __volatile__ ( 
        LOCK_PREFIX "cmpxchg8b (%1) \n\t sete %%al"
        :   "=a"(ret)
        :   "D"(ptr), "a"(old1), "d"(old2), "b"(new1), "c"(new2)
        :   "memory"
    );
    
    return ret;
}


#else //_X86_64_


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  __add_and_return
 *  Description:  64位系统，486以上，增加值为32位,已有内存屏障
 * =====================================================================================
 */
static inline int _atomic_add_return(int i, atomic_t *v)
{
    int __i = i;
    __asm__ __volatile__(
        LOCK_PREFIX "xaddl %0, %1"
        : "+r" (i), "+m" (*v)
        : : "memory"
    );
    return i + __i;
}


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  __add_and_return
 *  Description:  64位系统，486以上，增加值为64位，已有内存屏障
 * =====================================================================================
 */
static inline long _atomic64_add_return(long i, atomic_t *v)
{
    long __i = i;
    __asm__ __volatile__(
        LOCK_PREFIX "xaddq %0, %1;"
        : "+r" (i), "+m" (*v)
        : : "memory"
    );
    return i + __i;
}


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  _set_64bit
 *  Description:  64位系统下,64位数据的原子赋值
 * =====================================================================================
 */
static inline void _set_64bit(   volatile unsigned long *ptr, 
                                unsigned long val){
    *ptr = val;
}


/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  CAS_64bit
 *  Description:  64位的CAS
 * =====================================================================================
 */
static __inline__ uint32_t _CAS( volatile void *ptr, 
                                unsigned long old, 
                                unsigned long new_one, 
                                int size ){
        
    bool ret = 0;
    switch (size) {
    case 1 : {
        __asm__ __volatile__(

            LOCK_PREFIX "cmpxchgb %b1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "q"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");

        return ret;
    }

    case 2 : {
        __asm__ __volatile__( 
        
            LOCK_PREFIX "cmpxchgw %w1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "r"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");
            
        return ret;
    }
    
    case 4 : { 
            
        __asm__ __volatile__( 
        
            LOCK_PREFIX "cmpxchgl %k1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "r"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");
            
        return ret;
    }
    
    case 8 : { 
            
        __asm__ __volatile__( 
        
            LOCK_PREFIX "cmpxchgq %1,%2 \n\t sete %%al"
            : "=a"(ret)
            : "r"(new_one), "m"(*(volatile unsigned long*)ptr), "0"(old)
            : "memory");
            
        return ret;
    }
    }
    //should NEVER happen
    return ret;
}

/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  CAS2_64bit
 *  Description:  64位CAS2，需要INTEL64架构的CPU，即EMT64支持cx16位才可以，AMD86_64指令集
 *                无法支持cx16，同时gcc编译时需要加-mcx16选项，打开cmpxchg16B支持
 *                noted by kevinliang, 2009.6.15,
 *                新的amd optron处理器也支持cx16标志了 noted by kevinliang 2009.7.10
 *                please make sure the ptr is 16bytes aligned, added by kevenzhu
 * =====================================================================================
 */
static __inline__ bool _CAS2 (   volatile void * ptr, 
                                unsigned long old1, 
                                unsigned long old2, 
                                unsigned long new1, 
                                unsigned long new2)
{
    bool ret = 0;
    
    __asm__ __volatile__ ( 
        LOCK_PREFIX "cmpxchg16b (%1) \n\t sete %%al"
        :   "=a"(ret)
        :   "D"(ptr), "a"(old1), "d"(old2), "b"(new1), "c"(new2)
        :   "memory"
    );
    
    return ret;
}

#endif

#if 1
#define atomic_add(v,i)     _atomic_add(v,i)
#define atomic_sub(v,i)     _atomic_sub(v,i)
#define atomic_inc(v)       _atomic_inc(v)
#define atomic_dec(v)       _atomic_dec(v)
#define set_64bit(p,l,h)    _set_64bit(p,l,h)
#define set_64bit_ex(p,v)   _set_64bit_constant(p,v)
#define atomic_add_return(i,v)   _atomic_add_return(i,v)

#if defined  __X86_64__
#define atomic64_add_return(i,v) _atomic64_add_return(i,v)
#endif 

#if defined __X86_64__
#define CAS(p,old,newone)   _CAS(p,old,newone,8)
#else
#define CAS(p,old,newone)   _CAS(p,old,newone,4)
#endif //endif __X86_64__

#define CAS2(p,old1,old2,new1,new2) _CAS2(p,old1,old2,new1,new2)
#endif //endif 1

#define hplogd_trylock(lock)   (*(lock) == 0 && CAS(lock,0,1))
#define hplogd_unlock(lock)    (*(lock) = 0)



/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  mem_barrier
 *  Description:  内存屏障
 * =====================================================================================
 */
#define barrier() __asm__ __volatile__("": : :"memory")

#define mb() \
    __asm__ __volatile__("": : :"memory")

#define rmb() \
    __asm__ __volatile__("": : :"memory")

#define wmb() \
    __asm__ __volatile__("": : :"memory")

#define read_barrier_depends() \
    __asm__ __volatile__("": : :"memory")

#ifdef _SMP_
#define smp_mb()    mb()
#define smp_rmb()   rmb()
#define smp_wmb()   wmb()
#define smp_read_barrier_depends()  read_barrier_depends()
#else
#define smp_mb()    barrier()
#define smp_rmb()   barrier()
#define smp_wmb()   barrier()
#define smp_read_barrier_depends()  do { } while (0)
#endif

#define kvl_mb()    smp_mb()

#endif
