/*
 * Copyright (c) 2005, 2014 Qualcomm Technologies, Inc.  All Rights Reserved.
 * Qualcomm Technologies Proprietary and Confidential.
 */

/***********************************************************************
*
*   FILE : kalimba_basic_op.h
*
*   PURPOSE:
*      Kalimba implementation of QC basic ops
*    
*
***********************************************************************/
#ifndef KALIMBA_BASIC_OP_H
#define KALIMBA_BASIC_OP_H

// Kalimba intrinsics
#include "sizes_and_limits.h"
#ifdef USE_KAL_INTRINSICS

asm int32 kal_s16_add_s16_s16_sat(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1<R0-R5>, var2:bank1<R0-R5>
        .target var1
    ]
    @{var1} = @{var1} - @{var2}*-1.0;
    @{var1} = @{var1} ASHIFT 16;
    @{var1} = @{var1} ASHIFT -16;
}

asm int32 kal_s32_add_s32_s32_sat(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1<R0-R5>, var2:bank1<R0-R5>
        .target var1
    ]
    @{var1} = @{var1} - @{var2}*-1.0;
}

asm int64 kal_s64_add_s64_s64(int64 var1, int64 var2)
{
    @[
       .restrict var1:large_rmac<rMAC>, var2:large_rmac<rMACB>
       .target var1
       .scratch a1, b1, a2, b2
       .restrict a1: bank1, a2:bank1, b1:bank1, b2:bank1
    ]
// this inline function is very innefficient if the int64 isn't
// already represented by one of the rMAC registers, therefore, 
// the inline "C" version below may turn out to be faster
// for a given application (or operation)
    @{a1} = rMAC0;
    @{a2} = rMAC1;
    @{b1} = rMACB0;
    @{b2} = rMACB1;
    @{a1} = @{a1} + @{b1};
    @{a2} = @{a2} + @{b2} + CARRY;
    rMAC0 = @{a1};
    rMAC12 = @{a2}(SE);
}

static inline int64 inline_s64_add_s64_s64(int64 var1, int64 var2)
{
	int64 L_Sum;

	L_Sum =  var1 + var2;
	return (L_Sum);
}

asm int32 kal_s32_add_s32_s32(int32 var1, int32 var2)
{
    @[
        .restrict  :bank1
        .restrict var1:bank1, var2:bank1
    ]
    @{} = @{var1} + @{var2};
}

asm int32 kal_s32_sub_s32_s32(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1, var2:bank1
        .restrict  :bank1
    ]
    @{} = @{var1} - @{var2};
}

asm int32 kal_s32_sub_s32_s32_sat(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1<R0-R5>, var2:bank1<R0-R5>
        .target var1
    ]
    @{var1} = @{var1} + @{var2}*-1.0;
}

asm int64 kal_s64_mult_s32_s32(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1, var2:bank1
        .restrict :large_rmac<rMAC>
    ]
    @{} = @{var1} * @{var2};
    @{} = @{} ASHIFT -1(56bit);
}

asm int64 kal_s64_mult_s32_s16(int32 var1, int32 var2)
{
    @[
      .restrict var1:bank1, var2:bank1
      .restrict :large_rmac<rMAC>
       .clobber var2
    ]
    @{} = @{var1} * @{var2};
    @{} = @{} ASHIFT -1(56bit); // {TBD: consolidate with shifts above}
}

// This basic_op does a 64-bit multiplication and then shifts right by 32 bits.
// Because rMAC multiplications are fractional there is an additional 1 bit
// shift required to produce an integer shift in keeping with the original
// specification.
asm int64 kal_s64_mult_s32_s32_shift(int32 var1, int32 var2, int16 shift)
{
    @[
        .restrict var1:bank1
        .restrict var2:bank1
        .restrict shift:bank1
        .clobber shift
        .restrict :large_rmac<rMAC>
    ]
    @{shift} = @{shift} - (32 + 1);
    @{} = @{var1} * @{var2};
    @{} = @{} ASHIFT @{shift}(56bit);
}

asm int32 kal_s32_mult_s32_s16_rnd_sat(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1
        .restrict var2:bank1
        .restrict :bank1
        .clobber var2
    ]
    @{var2} = @{var2} LSHIFT 16;
    @{} = @{var1} * @{var2}(frac);
}

asm int32 kal_s32_mult_s16_s16(int32 var1, int32 var2)
{
    @[
       .restrict var1:bank1
       .restrict var2:bank1
       .restrict :bank1
    ]
    @{} = @{var1} * @{var2}(int);            
}

// *** must specify either rMAC or rMACB in ".restrict" statement below
//  else large errors appear in output
asm int64 kal_s64_shl_s64(int64 var1, int32 shift)
{
    @[
       .restrict var1:large_rmac<rMAC> /* *** */
       .restrict shift:bank1
       .target var1
    ]
    Null = @{shift};
    if NEG @{var1} = @{var1} ASHIFT @{shift}(56bit);
    Null = @{shift};
    if POS @{var1} = @{var1} LSHIFT @{shift}(56bit);
}

static inline int64 inline_s64_shl_s64(int64 var1, int16 shift)
{
	if(shift > 0) return var1 << shift;
	else          return var1 >> (-shift);
}

asm int32 kal_s32_shl_s32_sat(int32 var1, int32 shift)
{
    @[
        .restrict var1:bank1
        .restrict shift:bank1
        .restrict :bank1
    ]
    @{} = @{var1} ASHIFT @{shift};
}

asm int32 kal_s32_shr_s32_sat(int32 var1, int32 shift)
{
    @[
        .restrict var1:bank1
        .restrict shift:bank1
        .clobber shift
        .restrict :bank1
    ]
    @{shift} = @{shift}*-1(int);
    @{} = @{var1} ASHIFT @{shift};
}

asm int32 kal_s32_shl_s32(int32 var1, int32 shift)
{
    @[
        .restrict var1:bank1
        .restrict shift:bank1
        .restrict :bank1
    ]
    Null = @{shift};
    if NEG @{} = @{var1} ASHIFT @{shift};
    Null = @{shift};
    if POS @{} = @{var1} LSHIFT @{shift};
}

asm int32 kal_s32_saturate_s64(int64 var1)
{
    @[
        .restrict var1:large_rmac<rMAC>
        .clobber var1
        .restrict :bank1
    ]
    @{var1} = @{var1} LSHIFT 8(56bit); // sign extend @bit63
    @{} = @{var1} ASHIFT 24;           // r72->r32 transfer, with saturation 
}


static inline int32 inline_s32_saturate_s64(int64 var1)
{

	if      (var1 > (int64)  2147483647LL) return  (int32) 2147483647LL;
	else if (var1 < (int64) -2147483648LL) return  (int32) -2147483648LL;
	else                                 return  (int32) var1;
}

asm int32 kal_s16_norm_s32(int32 var1)
{
    @[
        .restrict var1:bank1
        .restrict :bank1
        .target var1
    ]
    Null = @{var1};
    if NZ @{var1} = signdet @{var1};
}

asm int32 kal_s16_shl_s16_sat(int32 var1, int32 shift)
{
    @[
        .restrict var1:bank1<R0-R5>, shift:bank1<R0-R5>
        .restrict :bank1
    ]
    @{} = @{var1} ASHIFT @{shift};
    @{} = @{} ASHIFT 16;
    @{} = @{} ASHIFT -16;	
}

asm int32 kal_s16_saturate_s32(int32 var1)
{
    @[
        .restrict var1:bank1<R0-R5>
        .restrict :bank1
    ]
    @{} = @{var1} ASHIFT 16;
    @{} = @{} ASHIFT -16;
}

asm int32 kal_s32_modwrap_s32_u32(int32 var1, uint32 var2)
{
    @[
        .restrict var1:bank1<R0-R5>, var2:bank1<R0-R5>
        .target var1
    ]
    Null = @{var1};
    if NEG @{var1} = @{var1} + @{var2};
    Null = @{var1} - @{var2};
    if GE @{var1} = @{var1} - @{var2};
}

asm int32 kal_s32_max_s32_s32(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1, var2:bank1
        .target var1
    ]
    @{var1} = MAX @{var2};
}

asm int32 kal_s32_min_s32_s32(int32 var1, int32 var2)
{
    @[
        .restrict var1:bank1, var2:bank1
        .target var1
    ]
    @{var1} = MIN @{var2};
}

asm int32 kal_s16_extract_s32_h(int32 var1)
{
    @[
        .restrict var1:bank1
        .restrict :bank1
    ]
    @{} = @{var1} ASHIFT -16;
}

asm int32 kal_s16_extract_s32_l(int32 var1)
{
    @[
        .restrict var1:bank1
        .restrict :bank1
    ]
    @{} = @{var1} AND 0x0000FFFF;
}

asm int32 kal_s32_div_s32_s32_normalized(int32 var1, int32 var2, int16* var3)
{
    @[
        .restrict var1:bank1, var2:bank1, var3:bank1
        .scratch s1, s2
        .restrict s1:bank1, s2:bank1
        .scratch a1
        .restrict a1:large_rmac<rMAC>
        .restrict :bank1
    ]
    @{s1} = SIGNDET @{var1};
    @{s1} = @{s1} - 1;
    @{var1} = @{var1} ASHIFT @{s1};
    @{s2} = SIGNDET @{var2};
    @{var2} = @{var2} ASHIFT @{s2};
    @{s2} = @{s2} - @{s1};
    @{a1} = @{var1} ASHIFT -1;
    Div = @{a1} / @{var2};
    MH[@{var3}] = @{s2};
    @{} = divResult;
    @{} = @{} ASHIFT -16;
}

static inline int64 inline_s64_mac_s32_s32(int64 mac64, int32 var1, int32 var2)
{
   return mac64 + (int64) var1 * var2;
}

static inline int64 inline_s64_sub_s64_s64(int64 var1, int64 var2)
{
	return var1 - var2;
}

asm int32 kal_s32_neg_s32_sat(int32 var1)
{		
    @[
        .restrict var1:bank1<R0-R5>
        .restrict :bank1
    ]
	@{} = @{var1} * -1.0;
}

asm uint32 kal_u32_abs_s32_sat(int32 var1)
{
    @[
        .restrict var1:bank1<R0-R5>
        .restrict :bank1
    ]
	@{} = ABS @{var1};	
}

asm int32 kal_c32_complex_s16_s16(int16 var1, int16 var2)
{
    @[
        .restrict var1:bank1, var2:bank1 
        .restrict :bank1
    ]
	@{var1} = @{var1} AND 0x0000FFFF;
	@{var2} = @{var2} LSHIFT 16;
	@{} = @{var1} + @{var2};	
}

static inline int64 inline_c64_complex_s32_s32(int32 var1, int32 var2)
{
	return( (((int64)var1) & 0xFFFFFFFF) | (((int64)var2) << 32) );	
}

static inline int64 inline_s64_add_s32_s32(int32 var1, int32 var2)
{
	return ((int64) var1 + (int64) var2);
}

static inline int64 inline_s64_add_s64_s32(int64 var1, int32 var2)
{
	return ( (int64) var1 + var2);	
}

asm int16 kal_s16_norm_s64(int64 var1)
{
    @[
        .restrict var1:large_rmac<rMAC>
		.change R5,R6,R7
        .restrict :bank1
    ]
	R5 = 31;
	R6 = 32;
	R7 = signdet rMAC;
	Null = R7;
	if NEG @{} = R7 + R5;
	Null = R7;
	if POS @{} = R7 - R6;
	Null = R7 - 63;
	if Z @{} = Null;
}

asm int32 kal_s32_mult_s16_s16_shift_sat(int16 var1, int16 var2)
{
    @[
        .restrict var1:bank1, var2:bank1 
        .restrict :bank1
    ]
	@{} = @{var1} * @{var2}(int);
	@{} = @{} ASHIFT 1;	
}

asm int32 kal_s32_cl0_s32(int32 var1)
{
    @[
        .restrict var1:bank1<R2>
        .restrict :bank1
    ]
	Null = @{var1};
	if POS @{} = signdet @{var1};
	@{} = @{} + 1;
	Null = @{var1};
    if NEG @{} = Null;
}

asm int16 kal_s16_neg_s16_sat(int16 var1)
{
    @[
        .restrict var1:bank1<R0-R5>
        .restrict :bank1
    ]
	@{} = @{var1} * -1.0;
    @{} = @{} ASHIFT 16;
    @{} = @{} ASHIFT -16;
}

asm int32 kal_s32_mult_s16_u16(int16 var1, uint16 var2)
{
    @[
        .restrict var1:bank1, var2:bank1
        .restrict :bank1
    ]
    @{} = @{var1} * @{var2}(int);	
}

asm int16 kal_s16_max_s16_s16(int16 var1, int16 var2)
{
     @[
        .restrict var1:bank1, var2:bank1
        .target var1
    ]
    @{var1} = MAX @{var2};
}  

asm int16 kal_s16_min_s16_s16(int16 var1, int16 var2)
{
     @[
        .restrict var1:bank1, var2:bank1
        .target var1
    ]
    @{var1} = MIN @{var2};
}  

asm uint32 kal_u32_mult_u16_u16(uint16 var1, uint16 var2)
{
    @[
        .restrict var1:bank1, var2:bank1
        .restrict :bank1
    ]
    @{} = @{var1} * @{var2}(int);	
}

static inline int32 inline_s32_extract_s64_l(int64 var1)
{
   return( (int32) (UMAX_32 & (var1)) ); 
}

asm int32 kal_s32_mac_s32_s16_s16_sat(int32 var1, int16 var2, int16 var3)
{
    @[
        .restrict var1:bank1, var2:bank1, var3:bank1
        .scratch a1
        .restrict a1:large_rmac<rMAC>
        .target var1
    ]
    @{a1} = @{var2} * @{var3}(int);
    @{var1} = @{var1} - @{a1}*-1.0;
}

asm int32 kal_s32_mac_s32_s32_s1_rnd_sat(int32 mac32, int32 var1, int32 var2)
{
    @[
        .scratch a1
        .restrict a1:large_rmac<rMAC>
        .restrict mac32:bank1, var1:bank1, var2:bank1
        .target mac32    
    ]
    @{mac32} = @{mac32} + @{var1} * @{var2};
}
#endif // USE_KAL_INTRINSICS

#endif /* KALIMBA_BASIC_OP_H */