/*
* ========== Copyright Header Begin ==========================================
* 
* OpenSPARC T1 Processor File: v9_inst_c.c
* Copyright (c) 2006 Sun Microsystems, Inc.  All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* 
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* 
* The above named program is distributed in the hope that it will be 
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
* General Public License for more details.
* 
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* 
* ========== Copyright Header End ============================================
*/
/*
 * C version of v9_inst_asm.s  file
 *
 */

/*------------------------------------------------------------------------*/
/* Include files */
/*------------------------------------------------------------------------*/

#include <sys/types.h>
#include <stdio.h>

/*------------------------------------------------------------------------*/
/* Local variables and defines */
/*------------------------------------------------------------------------*/

#define MOVRZ	 1
#define MOVRLEZ	 2
#define MOVRLZ	 3
#define MOVRNZ	 5
#define MOVRGZ	 6
#define MOVRGEZ	 7

#define V9_icc_n(_n)	((_n)<<3)
#define V9_icc_z(_z)	((_z)<<2)
#define V9_icc_v(_v)	((_v)<<1)
#define V9_icc_c(_c)	(_c)

#define V9_xcc_n(_n)	((_n)<<7)
#define V9_xcc_z(_z)	((_z)<<6)
#define V9_xcc_v(_v)	((_v)<<5)
#define V9_xcc_c(_c)	((_c)<<4)
#define	MASK64( _high, _low )	( ( (uint64_t)((~(uint64_t)0)>>(63-(_high))) ) & ( (uint64_t)( (~(uint64_t)0)<<(_low)) ) )

#define SET_LOGIC_CCR(ccr,result) \
	ccr = V9_xcc_n((result>>63)&1)		\
		| V9_xcc_z(result==0LL)			\
		| V9_icc_n((result>>31)&1)		\
		| V9_icc_z((result&MASK64(31,0))==0LL); \

/*------------------------------------------------------------------------*/
void	iflush(caddr_t adr)
{
}
/*------------------------------------------------------------------------*/
#ifndef ICACHE_MDF
uint64_t	add_instr(uint64_t r1, uint64_t r2)
{
	return (r1 + r2);
}
/*------------------------------------------------------------------------*/
#else
void	add_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr)
{
	*wr = (*r1) + (*r2);
}
/*------------------------------------------------------------------------*/
#endif

void	addcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) + (*r2);
	uint64_t v, c;

	v = ((*r1) & (*r2) & ~(*wr)) | (~(*r1) & ~(*r2) & (*wr));
	c = ((*r1) & (*r2)) | (~(*wr) & ((*r1) | (*r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);

}
/*------------------------------------------------------------------------*/
uint64_t	addx_instr(uint64_t r1, uint64_t r2, uint32_t p_icc_c)
{
	return (r1 + r2 + p_icc_c);
}
/*------------------------------------------------------------------------*/
void	addxcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) + (*r2) + ((*ccr_b) & 1);
	uint64_t v, c;

	v = ((*r1) & (*r2) & ~(*wr)) | (~(*r1) & ~(*r2) & (*wr));
	c = ((*r1) & (*r2)) | (~(*wr) & ((*r1) | (*r2)));

	*ccr_b	= V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);

}
/*------------------------------------------------------------------------*/
void	taddcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) + (*r2);
	uint64_t v, c;

	v = ((*r1) & (*r2) & ~(*wr)) | (~(*r1) & ~(*r2) & (*wr));
	c = ((*r1) & (*r2)) | (~(*wr) & ((*r1) | (*r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);

	if ((((*r1) | (*r2)) & ((uint64_t) 0x03)) != 0ULL) 
	    *ccr_b |= V9_icc_v(1);
}
/*------------------------------------------------------------------------*/
uint64_t	sub_instr(uint64_t r1, uint64_t r2)
{
	return r1 - r2;
}
/*------------------------------------------------------------------------*/
void	subcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	uint64_t v, c;

	*wr = (*r1) - (*r2);

	v = ((*r1) & ~(*r2) & ~(*wr)) | (~(*r1) & (*r2) & (*wr));
	c = (~(*r1) & (*r2)) | ((*wr) & (~(*r1) | (*r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((*wr >> 63) & 1);
	*ccr_b |= V9_icc_n((*wr >> 31) & 1);
	*ccr_b |= V9_xcc_z(*wr ? 0 : 1);
	*ccr_b |= V9_icc_z((*wr & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	subcc_instr_rd0(uint64_t* r1, uint64_t* r2, uint8_t* ccr_b)
{
	uint64_t v, c;
	uint64_t result;

	result = (*r1) - (*r2);

	v = ((*r1) & ~(*r2) & ~(result)) | (~(*r1) & (*r2) & (result));
	c = (~(*r1) & (*r2)) | ((result) & (~(*r1) | (*r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((result >> 63) & 1);
	*ccr_b |= V9_icc_n((result >> 31) & 1);
	*ccr_b |= V9_xcc_z(result ? 0 : 1);
	*ccr_b |= V9_icc_z((result & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	subcc_instr_simm_rd0(uint64_t* r1, uint32_t r2, uint8_t* ccr_b)
{
	uint64_t v, c;
	uint64_t result;

	result = (*r1) - (uint64_t)(r2);

	v = ((*r1) & ~(r2) & ~(result)) | (~(*r1) & (r2) & (result));
	c = (~(*r1) & (r2)) | ((result) & (~(*r1) | (r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((result >> 63) & 1);
	*ccr_b |= V9_icc_n((result >> 31) & 1);
	*ccr_b |= V9_xcc_z(result ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	subcc_instr_simm(uint64_t* r1, uint32_t r2, uint64_t* wr, uint8_t* ccr_b)
{
	uint64_t v, c;

	*wr = (*r1) - (uint64_t)(r2);

	v = ((*r1) & ~(r2) & ~(*wr)) | (~(*r1) & (r2) & (*wr));
	c = (~(*r1) & (r2)) | ((*wr) & (~(*r1) | (r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((*wr >> 63) & 1);
	*ccr_b |= V9_icc_n((*wr >> 31) & 1);
	*ccr_b |= V9_xcc_z(*wr ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	andcc_instr_rd0(uint64_t* r1, uint64_t* r2, uint8_t* ccr)
{
	uint64_t result;

	result = (*r1) & (*r2);
	SET_LOGIC_CCR(*ccr,result);
}
/*------------------------------------------------------------------------*/
void	andcc_instr_simm_rd0(uint64_t* r1, uint32_t r2, uint8_t* ccr)
{
	uint64_t result;

	result = (*r1) & (uint64_t)(r2);
	SET_LOGIC_CCR(*ccr,result);
}
/*------------------------------------------------------------------------*/
void	andcc_instr_simm(uint64_t* r1, uint32_t r2, uint64_t* wr, uint8_t* ccr)
{
	*wr = (*r1) & (uint64_t)(r2);
	SET_LOGIC_CCR(*ccr,*wr);
}
/*------------------------------------------------------------------------*/
uint64_t	subx_instr(uint64_t r1, uint64_t r2, uint32_t p_icc_c)
{
	return (r1 - r2 - (uint64_t)p_icc_c);
}
/*------------------------------------------------------------------------*/
void	subxcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	uint64_t v, c;

	*wr = (*r1) - (*r2) - (uint64_t)((*ccr_b) & 1);

	v = ((*r1) & ~(*r2) & ~(*wr)) | (~(*r1) & (*r2) & (*wr));
	c = (~(*r1) & (*r2)) | ((*wr) & (~(*r1) | (*r2)));

	*ccr_b  = V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((*wr >> 63) & 1);
	*ccr_b |= V9_icc_n((*wr >> 31) & 1);
	*ccr_b |= V9_xcc_z(*wr ? 0 : 1);
	*ccr_b |= V9_icc_z((*wr & MASK64(31,0)) ? 0 : 1);

}
/*------------------------------------------------------------------------*/
void	tsubcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) - (*r2);
	uint64_t v, c;
	v = ((*r1) & ~(*r2) & ~(*wr)) | (~(*r1) & (*r2) & (*wr));
	c = (~(*r1) & (*r2)) | ((*wr) & (~(*r1) | (*r2)));

	*ccr_b	= V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((*wr >> 63) & 1);
	*ccr_b |= V9_icc_n((*wr >> 31) & 1);
	*ccr_b |= V9_xcc_z(*wr ? 0 : 1);
	*ccr_b |= V9_icc_z((*wr & MASK64(31,0)) ? 0 : 1);

	if ((((*r1) | (*r2)) & ((uint64_t) 0x03)) != 0ULL) 
	    *ccr_b |= V9_icc_v(1);
}
/*------------------------------------------------------------------------*/
uint64_t	and_instr(uint64_t r1, uint64_t r2)
{
	return (r1 & r2);
}
/*------------------------------------------------------------------------*/
void	andcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) & (*r2);
	SET_LOGIC_CCR(*ccr_b,*wr);
}
/*------------------------------------------------------------------------*/
uint64_t	andn_instr(uint64_t r1, uint64_t r2)
{
	return r1 & (~r2);
}
/*------------------------------------------------------------------------*/
void	andncc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) & ~(*r2);
	SET_LOGIC_CCR(*ccr_b,*wr);
}
/*------------------------------------------------------------------------*/
uint64_t	or_instr(uint64_t r1, uint64_t r2)
{
	return (r1 | r2);
}
/*------------------------------------------------------------------------*/
void	orcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) | (*r2);
	SET_LOGIC_CCR(*ccr_b,*wr);
}
/*------------------------------------------------------------------------*/
uint64_t	orn_instr(uint64_t r1, uint64_t r2)
{
	return (r1 | ~(r2));
}
/*------------------------------------------------------------------------*/
void	orncc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) | ~(*r2);
	SET_LOGIC_CCR(*ccr_b,*wr);
}
/*------------------------------------------------------------------------*/
uint64_t	xor_instr(uint64_t r1, uint64_t r2)
{
	return (r1 ^ r2);
}
/*------------------------------------------------------------------------*/
void	xorcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = (*r1) ^ (*r2);
	SET_LOGIC_CCR(*ccr_b,*wr);
}
/*------------------------------------------------------------------------*/
uint64_t	xnor_instr(uint64_t r1, uint64_t r2)
{
	return ~(r1 ^ r2);
}
/*------------------------------------------------------------------------*/
void	xnorcc_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	*wr = ~((*r1) ^ (*r2));
	SET_LOGIC_CCR(*ccr_b,*wr);
}
/*------------------------------------------------------------------------*/
void	sll_instr(uint64_t* r1, uint32_t wtmp, uint64_t* wr)
{
	*wr = (*r1) << wtmp;
}
/*------------------------------------------------------------------------*/
uint64_t	srl_instr(uint64_t r1, uint32_t r2)
{
	return ((r1 & 0xffffffffUL)>> r2);
}
/*------------------------------------------------------------------------*/
void	srlx_instr(uint64_t* r1, uint32_t wtmp, uint64_t* wr)
{
	*wr = (*r1) >> wtmp;
}
/*------------------------------------------------------------------------*/
uint64_t	sra_instr(uint64_t r1, uint32_t wtmp)
{
	int32_t temp;
	int64_t ret;

	temp = (int32_t)r1;
	temp >>= wtmp;
	ret = (int64_t)temp;

	return ret;
}
/*------------------------------------------------------------------------*/
void	srax_instr(uint64_t* r1, uint32_t wtmp, uint64_t* wr)
{
	*wr = ((int64_t) *r1) >> wtmp;
}
/*------------------------------------------------------------------------*/
void	mulx_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr)
{
	*wr = (*r1) * (*r2);
}
/*------------------------------------------------------------------------*/
void	udivx_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr)
{
	*wr = (*r1) / (*r2);
}
/*------------------------------------------------------------------------*/
void	sdivx_instr(uint64_t* r1, uint64_t* r2, uint64_t* wr)
{
	*wr = (int64_t)(*r1) / (int64_t)(*r2);
}
/*------------------------------------------------------------------------*/
void	udiv_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint32_t y)
{
	uint64_t div;

	div = y;
	div <<= 32 ;
	div += r1;
	*wr = div / r2;
	if (*wr > (uint64_t)0xffffffffLL) {
	    *wr = (uint64_t) 0xffffffffLL;
	} 
}
/*------------------------------------------------------------------------*/
void	sdiv_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint32_t y)
{
	int64_t div;
	int64_t result;

	div =  ((uint64_t)y << 32) | (uint64_t)r1;
	result = div / (int32_t) r2;
	if ((result > 0LL) && (result > 0x7fffffffLL)) {
	    result = 0x7fffffffLL;
	} else if ((result < 0LL) && (result < 0xffffffff80000000LL)) {
	    result = 0xffffffff80000000LL;
	} 
	if ((div == 0x8000000000000000ULL) && (r2 == 0xffffffffUL)) {
	    result = 0x7fffffffLL;
	}
	*wr = (uint64_t) result;
}
/*------------------------------------------------------------------------*/
void	umulcc_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint8_t* ccr_b, 
	uint32_t* y)
{
	*wr = (uint64_t) r1 * (uint64_t) r2;
	*y = (uint32_t) ((*wr) >> 32);
	*ccr_b = V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	udivcc_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint8_t* ccr_b,
	uint32_t y)
{
	uint64_t div, result;
	uint64_t v;

	div = y;
	div <<= 32 ;
	div += r1;
	result = div / (uint64_t) r2;
	
	if (result > (uint64_t)0xffffffffLL) {
	    v = 1;
	    *wr = (uint64_t) 0xffffffffLL;
	} else {
	    v = 0;
	    *wr = result;
	}

	*ccr_b = V9_icc_v(v);
	*ccr_b |= V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr != 0LL) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	smulcc_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint8_t* ccr_b,
	uint32_t* y)
{
	int32_t sr1 = (int32_t) r1;
	int32_t sr2 = (int32_t) r2;
	*wr = (uint64_t) ((int64_t) sr1 * (int64_t) sr2);
	*y = (uint32_t) ((*wr) >> 32);
	*ccr_b = V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	sdivcc_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint8_t* ccr_b,
	uint32_t y)
{
	int64_t div, result;
	uint64_t v;

	div = y;
	div <<= 32 ;
	div += r1;
	result = div / (int32_t) r2;
	
	if ((result > 0LL) && (result > 0x7fffffffLL)) {
	    v = 1;
	    *wr = 0x7fffffffLL;
	} else if ((result < 0LL) && (result < 0xffffffff80000000LL)) {
	    v = 1;
	    *wr = 0xffffffff80000000LL;
	} else {
	    v = 0;
	    *wr = (uint64_t) result;
	}
	if ((div == 0x8000000000000000ULL) && (r2 == 0xffffffffUL)) {
	    v = 1;
	    *wr = 0x7fffffffLL;
	}

	*ccr_b = V9_icc_v(v);
	*ccr_b |= V9_xcc_n(((*wr) >> 63) & 1);
	*ccr_b |= V9_icc_n(((*wr) >> 31) & 1);
	*ccr_b |= V9_xcc_z((*wr != 0LL) ? 0 : 1);
	*ccr_b |= V9_icc_z(((*wr) & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
void	movr_instr(uint32_t* movr_asm, uint64_t* r1, uint64_t* r2,
	uint64_t* wr, uint32_t* rd)
{
	int32_t movr_cc;
	int64_t sr1 = (int64_t) *r1;

	movr_cc = (movr_asm[2] >> 10) & 7;
	switch (movr_cc) {
	case MOVRZ:	if (sr1 == 0) { *wr = *r2; }
			else *rd = 0;
			break;
	case MOVRLEZ:	if (sr1 <= 0) { *wr = *r2; }
			else *rd = 0;
			break;
	case MOVRLZ:	if (sr1 < 0) { *wr = *r2; }
			else *rd = 0;
			break;
	case MOVRNZ:	if (sr1 != 0) { *wr = *r2; }
			else *rd = 0;
			break;
	case MOVRGZ:	if (sr1 > 0) { *wr = *r2; }
			else *rd = 0;
			break;
	case MOVRGEZ:	if (sr1 >= 0) { *wr = *r2; }
			else *rd = 0;
			break;
	}
}
/*------------------------------------------------------------------------*/
void	umul_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint32_t* y)
{
	*wr = (uint64_t) r1 * (uint64_t) r2;
	*y = (uint32_t) ((*wr) >> 32);
}
/*------------------------------------------------------------------------*/
void	smul_asm(uint32_t r1, uint32_t r2, uint64_t* wr, uint32_t* y)
{
	int32_t sr1 = (int32_t) r1;
	int32_t sr2 = (int32_t) r2;
	*wr = (uint64_t) ((int64_t) sr1 * (int64_t) sr2);
	*y = (uint32_t) ((*wr) >> 32);
}
/*------------------------------------------------------------------------*/
void	subcc_instr_il(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr_b)
{
	uint64_t v, c;

	*wr = (*r1) - (*r2);

	v = ((*r1) & ~(*r2) & ~(*wr)) | (~(*r1) & (*r2) & (*wr));
	c = (~(*r1) & (*r2)) | ((*wr) & (~(*r1) | (*r2)));

	*ccr_b	= V9_xcc_v((v >> 63) & 1);
	*ccr_b |= V9_icc_v((v >> 31) & 1);
	*ccr_b |= V9_xcc_c((c >> 63) & 1);
	*ccr_b |= V9_icc_c((c >> 31) & 1);
	*ccr_b |= V9_xcc_n((*wr >> 63) & 1);
	*ccr_b |= V9_icc_n((*wr >> 31) & 1);
	*ccr_b |= V9_xcc_z(*wr ? 0 : 1);
	*ccr_b |= V9_icc_z((*wr & MASK64(31,0)) ? 0 : 1);
}
/*------------------------------------------------------------------------*/
//void	subcc_instr_rd0_il      (uint64_t*, uint64_t*, uint8_t*);
/*------------------------------------------------------------------------*/
//void	subcc_instr_simm_rd0_il (uint64_t*, uint64_t*, uint8_t*);
/*------------------------------------------------------------------------*/
//void	subcc_instr_simm_il     (uint64_t*, uint64_t*, uint64_t*, uint8_t*);
/*------------------------------------------------------------------------*/
void	sll_instr_il(uint64_t* r1, uint32_t wtmp, uint64_t* wr)
{
	*wr = (*r1) << wtmp;
}
/*------------------------------------------------------------------------*/
void	srlx_instr_il_32(uint64_t* r1, uint32_t r2, uint64_t* wr)
{
	*wr = (*r1 >> r2);
}
/*------------------------------------------------------------------------*/
uint64_t	srl_instr_il_32(uint64_t r1, uint32_t r2)
{
	return ((r1 & 0xffffffffUL)>> r2);
}
/*------------------------------------------------------------------------*/
void	srlx_instr_il_64(uint64_t* r1, uint32_t r2, uint64_t* wr)
{
	*wr = (*r1 >> r2);
}
/*------------------------------------------------------------------------*/
uint64_t	srl_instr_il_64(uint64_t r1, uint32_t r2)
{
	return ((r1 & 0xffffffffUL)>> r2);
}
/*------------------------------------------------------------------------*/
void	andcc_instr_il(uint64_t* r1, uint64_t* r2, uint64_t* wr, uint8_t* ccr)
{

	*wr = (*r1) & (*r2);
	SET_LOGIC_CCR(*ccr,*wr);
}
/*------------------------------------------------------------------------*/
//void	andcc_instr_rd0_il      (uint64_t*, uint64_t*, uint8_t*)
/*------------------------------------------------------------------------*/
void	andcc_instr_simm_rd0_il(uint64_t* r1, int64_t* r2, uint8_t* ccr)
{
	uint64_t result;

	result = (*r1) & (*r2);
	SET_LOGIC_CCR(*ccr,result);
}
/*------------------------------------------------------------------------*/
void	andcc_instr_simm_il(uint64_t* r1, int64_t* r2, uint64_t* wr, uint8_t* ccr)
{
	*wr = (*r1) & (*r2);
	SET_LOGIC_CCR(*ccr,*wr);
}
/*------------------------------------------------------------------------*/

