/*
    libnbds
    Non-blocking Data Structures Library

    Copyright (C) 2011 Paweł Dziepak

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include "atomic.h"

namespace nbds {
	namespace arch {
		/* Compare and swap (cmpxchg) */
		u32 cmp_and_swp_32(u32 *dst, u32 cmp, u32 src) {
			u64 res;
			asm volatile("lock ; cmpxchg %%ecx, (%%edx)" : 
					"=a" (res) :
					"a" (cmp), "c" (src), "d" (dst) :
					"memory");
			return res;
		}

		u16 cmp_and_swp_16(u16 *dst, u16 cmp, u16 src) {
			u64 res;
			asm volatile("lock ; cmpxchg %%cx, (%%edx)" : 
					"=a" (res) :
					"a" (cmp), "c" (src), "d" (dst) :
					"memory");
			return res;
		}

		u8 cmp_and_swp_8(u8 *dst, u8 cmp, u8 src) {
			u64 res;
			asm volatile("lock ; cmpxchg %%cl, (%%edx)" : 
					"=a" (res) :
					"a" (cmp), "c" (src), "d" (dst) :
					"memory");
			return res;
		}

		/* Fetch and add (xadd) */
		u32 ftch_and_add_32(u32 *dst, u32 val) {
			u32 res;
			asm volatile("lock ; xadd %%eax, (%%edx)" : 
					"=a" (res) :
					"a" (val), "d" (dst) :
					"memory");
			return res;
		}

		u16 ftch_and_add_16(u16 *dst, u16 val) {
			u16 res;
			asm volatile("lock ; xadd %%ax, (%%edx)" : 
					"=a" (res) :
					"a" (val), "d" (dst) :
					"memory");
			return res;
		}

		u8 ftch_and_add_8(u8 *dst, u8 val) {
			u8 res;
			asm volatile("lock ; xadd %%al, (%%edx)" : 
					"=a" (res) :
					"a" (val), "d" (dst) :
					"memory");
			return res;
		}

		/* Exchange (xchg) */
		u32 xchg_32(u32 *dst, u32 val) {
			u32 res;
			asm volatile("lock ; xchg %%eax, (%%edx)" : 
					"=a" (res) :
					"a" (val), "d" (dst) :
					"memory");
			return res;
		}

		u16 xchg_16(u16 *dst, u16 val) {
			u16 res;
			asm volatile("lock ; xchg %%ax, (%%edx)" : 
					"=a" (res) :
					"a" (val), "d" (dst) :
					"memory");
			return res;
		}

		u8 xchg_8(u8 *dst, u8 val) {
			u8 res;
			asm volatile("lock ; xchg %%al, (%%edx)" : 
					"=a" (res) :
					"a" (val), "d" (dst) :
					"memory");
			return res;
		}

		/* Compare and swap 2 (cmpxchg, cmpxchg8b) */
		void cmp_and_swp2_32(u32 *dst, const u32 *cmp, const u32 *src, 
					u32 *res) {
			asm volatile("pushl %%ebx\n\t"
				     "movl %%esi, %%ebx\n\t"
				     "lock ; cmpxchg8b (%%edi)\n\t"
				     "popl %%ebx" : 
					"=a" (res[0]), "=d" (res[1]) :
					"a" (cmp[0]), "d" (cmp[1]),
					"S" (src[0]), "c" (src[1]),
					"D" (dst) :
					"memory");
		}

		void cmp_and_swp2_16(u16 *dst, const u16 *cmp, const u16 *src,
					u16 *res) {
			asm volatile("lock ; cmpxchg %%ecx, (%%edx)" : 
					"=a" (*(u32*)res) :
					"a" (*(const u32*)cmp),
					"c" (*(const u32*)src),
					"d" (dst) :
					"memory");
		}

		void cmp_and_swp2_8(u8 *dst, const u8 *cmp, const u8 *src,
					u8 *res) {
			asm volatile("lock ; cmpxchg %%cx, (%%edx)" : 
					"=a" (*(u16*)res) :
					"a" (*(const u16*)cmp),
					"c" (*(const u16*)src),
					"d" (dst) :
					"memory");
		}
	}
}

