/************************************************************************
 *
 * builtins.h
 *
 * (c) Copyright 1996-2004 Analog Devices, Inc.  All rights reserved.
 *
 ************************************************************************/

/*

  This header prototypes various functions built into the compiler.

  When used with compilers other than the one provided with
  VisualDSP++ for TigerSHARC DSP, this header provides C reference
  implementations of the built-in functions.  The following are
  notes on using this header on different compiler and/or machine
  combinations.

  When using a compiler other than the one provided with VisualDSP++
  for TigerSHARC DSP, please make sure your compiler supports inline
  functions as the C reference versions of the intrinsics use the
  'inline' keyword.  Also ensure that the compiler supports 16-,
  32- and 64-bit integers.

  When using this file on a machine without 8-bit byte-addressing,
  and 16-bit short-addressing, define __NO_BYTE_ADDRESSING__ before
  including this file.  If the machine has 8-bit char types and
  16-bit short types, then do not define this macro.

  To use the reference implementation of XCORRS on TS101 define the
  macro __USE_RAW_XCORRS__ before including this file.

  When using the Microsoft C compiler, use the /TP command-line option
  to select C++ compilation: "inline" is not recognised as a keyword in
  C compilation mode.

  To allow your compilation to ignore the __builtin_sysreg_read* and
  __builtin_sysreg_write* builtins, define __IGNORE_SYSREG_BUILTINS__
  before including this file which will allow the code to compile.
  To allow your compilation to ignore the __builtin_idle* builtins,
  define __IGNORE_IDLE_BUILTINS__ before including this file.

  To use the raw versions of the builtins even when using the
  compiler provided with VisualDSP++ for TigerSHARC DSP, define
  __USE_RAW_BUILTINS__ before including this file.

  If using a compiler for which the type definitions are not
  appropriate (for example int is not 32-bit) then define the types
  listed below in a header named "definerawtypes.h" and define
  __OVERRIDE_RAW_TYPES__ before including this file.

    __int8   __int16   __int32   __int64
    __uint8  __uint16  __uint32  __uint64

  The 'u' types should be unsigned and the sizes should match the
  bit-size in the type names.

*/

#if defined (__ADSPTS__)
#pragma once
#ifndef __NO_BUILTIN
#pragma system_header /* builtins.h */
#endif
#endif /* __ADSPTS */

#ifndef __BUILTINS_DEFINED
#define __BUILTINS_DEFINED

#ifdef __cplusplus
extern "C" {
#endif

/* A macro to enclose 64-bit constants */

#if !defined (_MSC_VER)
#define __ULLCONST(X) X##ULL
#else
#define __ULLCONST(X) X
#endif

/* Internal types */

#if defined (__OVERRIDE_RAW_TYPES__)

#include "definerawtypes.h"

#else

#if defined (__ADSPTS__)
#if !defined (__TS_BYTE_ADDRESS) && !defined (__NO_BYTE_ADDRESSING__)
#define __NO_BYTE_ADDRESSING__
#endif
#endif

/* Where we have byte addressing, the following types need to be defined */

#if !defined (__NO_BYTE_ADDRESSING__)
typedef unsigned char __uint8;
typedef unsigned short int __uint16;
#endif

/* Define the following type on all platforms */

typedef unsigned int __uint32;

#if defined (_MSC_VER)

/* Visual C++ requires the following type to be defined */

typedef unsigned __int64 __uint64;

#else

/* Other compilers require the following type definitions */

#if !defined (__NO_BYTE_ADDRESSING__)
typedef char __int8;
typedef short int __int16;
#endif

typedef int __int32;
typedef long long int __int64;
typedef unsigned long long int __uint64;

#endif /* _MSC_VER */

#endif /* __OVERRIDE_RAW_TYPES__ */

#ifndef _RAW_TYPES
#define _RAW_TYPES
typedef __int32 _raw32;
typedef __int64 _raw64;
#endif

#if defined (__ADSPTS__) && !defined (__USE_RAW_BUILTINS__)

float __builtin_recip(float);
float __builtin_rsqrt(float);
int __builtin_fix(float);
int __builtin_clip(int, int);
long __builtin_lclip(long, long);
float __builtin_fclipf(float, float);
int __builtin_avg(int, int);
long __builtin_lavg(long, long);
int __builtin_avgt(int, int);
long __builtin_lavgt(long, long);
float __builtin_favgf(float, float);
float __builtin_copysignf(float, float);
int __builtin_conv_RtoLR(long);
long __builtin_conv_LRtoR(int);
float __builtin_conv_RtoF(int);
int __builtin_conv_FtoR(float);
long long __builtin_mult_i1x32_wide(int, int);
unsigned long long __builtin_mult_u1x32_wide(unsigned, unsigned);
int __builtin_multr_fr1x32(int, int);
int __builtin_mult_fr1x32(int, int);
int __builtin_mult_sat(int, int);
int __builtin_multr_fr1x32_sat(int, int);
int __builtin_mult_fr1x32_sat(int, int);
int __builtin_add_sat(int, int);
unsigned __builtin_uadd_sat(unsigned, unsigned);
int __builtin_sub_sat(int, int);
unsigned __builtin_usub_sat(unsigned, unsigned);
int __builtin_neg_sat(int);
int __builtin_count_ones(int);
int __builtin_lcount_ones(long);
int __builtin_addbitrev(int, int);
long long __builtin_llabs(long long);
long long __builtin_llmin(long long, long long);
long long __builtin_llmax(long long, long long);
long long __builtin_llclip(long long, long long);
long long __builtin_llavg(long long, long long);
int __builtin_llcount_ones(long long);
int __builtin_add_2x16(int, int);
unsigned __builtin_add_u2x16(unsigned, unsigned);
int __builtin_sub_2x16(int, int);
unsigned __builtin_sub_u2x16(unsigned, unsigned);
int __builtin_mult_i2x16(int, int);
unsigned __builtin_mult_u2x16(unsigned, unsigned);
int __builtin_multr_fr2x16(int, int);
int __builtin_mult_fr2x16(int, int);
int __builtin_cmult_i2x16(int, int);
int __builtin_cmult_fr2x16(int, int);
int __builtin_cmultr_fr2x16(int, int);
int __builtin_cmult_conj_i2x16(int, int);
int __builtin_cmult_conj_fr2x16(int, int);
int __builtin_cmultr_conj_fr2x16(int, int);
int __builtin_cmult_i2x16_sat(int, int);
int __builtin_cmult_fr2x16_sat(int, int);
int __builtin_cmultr_fr2x16_sat(int, int);
int __builtin_cmult_conj_i2x16_sat(int, int);
int __builtin_cmult_conj_fr2x16_sat(int, int);
int __builtin_cmultr_conj_fr2x16_sat(int, int);
long long __builtin_cmult_i2x16_wide(int, int);
long long __builtin_cmult_conj_i2x16_wide(int, int);
int __builtin_add_2x16_sat(int, int);
unsigned __builtin_add_u2x16_sat(unsigned, unsigned);
int __builtin_sub_2x16_sat(int, int);
unsigned __builtin_sub_u2x16_sat(unsigned, unsigned);
int __builtin_neg_2x16_sat(int);
int __builtin_neg_4x8_sat(int);
long long __builtin_neg_8x8_sat(long long);
int __builtin_multr_fr2x16_sat(int, int);
int __builtin_mult_fr2x16_sat(int, int);
long long __builtin_mult_i2x16_wide(int, int);
unsigned long long __builtin_mult_u2x16_wide(unsigned, unsigned);
int __builtin_mult_i2x16_sat(int, int);
long long __builtin_mult_i2x16_wide_sat(int, int);
int __builtin_sum_2x16(int);
int __builtin_sum_2x32(long long);
int __builtin_abs_2x16(int);
int __builtin_max_2x16(int, int);
unsigned __builtin_max_u2x16(unsigned, unsigned);
int __builtin_min_2x16(int, int);
unsigned __builtin_min_u2x16(unsigned, unsigned);
int __builtin_clip_2x16(int, int);
long long __builtin_merge_2x16(int, int);
long long __builtin_add_4x16(long long, long long);
long long __builtin_sub_4x16(long long, long long);
long long __builtin_mult_i4x16(long long, long long);
unsigned long long __builtin_mult_u4x16(unsigned long long, unsigned long long);
__builtin_quad __builtin_mult_i4x16_wide(long long, long long);
__builtin_quad __builtin_mult_u4x16_wide(unsigned long long, unsigned long long);
long long __builtin_multr_fr4x16(long long, long long);
long long __builtin_mult_fr4x16(long long, long long);
long long __builtin_add_4x16_sat(long long, long long);
unsigned long long __builtin_add_u4x16_sat(unsigned long long, unsigned long long);
long long __builtin_sub_4x16_sat(long long, long long);
unsigned long long __builtin_sub_u4x16_sat(unsigned long long, unsigned long long);
long long __builtin_neg_4x16_sat(long long);
long long __builtin_mult_i4x16_sat(long long, long long);
__builtin_quad __builtin_mult_i4x16_wide_sat(long long, long long);
long long __builtin_multr_fr4x16_sat(long long, long long);
long long __builtin_mult_fr4x16_sat(long long, long long);
int __builtin_sum_4x16(long long);
long long __builtin_abs_4x16(long long);
long long __builtin_max_4x16(long long, long long);
unsigned long long __builtin_max_u4x16(unsigned long long, unsigned long long);
long long __builtin_min_4x16(long long, long long);
unsigned long long __builtin_min_u4x16(unsigned long long, unsigned long long);
long long __builtin_clip_4x16(long long, long long);
long long __builtin_add_2x32(long long, long long);
unsigned long long __builtin_add_2x32u(unsigned long long, unsigned long long);
long long __builtin_sub_2x32(long long, long long);
unsigned long long __builtin_sub_2x32u(unsigned long long, unsigned long long);
__builtin_quad __builtin_add_4x32(__builtin_quad, __builtin_quad);
__builtin_quad __builtin_sub_4x32(__builtin_quad, __builtin_quad);
long long __builtin_add_2x32_sat(long long, long long);
unsigned long long __builtin_add_u2x32_sat(unsigned long long, unsigned long long);
long long __builtin_sub_2x32_sat(long long, long long);
unsigned long long __builtin_sub_u2x32_sat(unsigned long long, unsigned long long);
__builtin_quad __builtin_add_4x32_sat(__builtin_quad, __builtin_quad);
__builtin_quad __builtin_add_u4x32_sat(__builtin_quad, __builtin_quad);
__builtin_quad __builtin_sub_4x32_sat(__builtin_quad, __builtin_quad);
__builtin_quad __builtin_sub_u4x32_sat(__builtin_quad, __builtin_quad);
int __builtin_ashift_4x8(int, int);
int __builtin_ashift_2x16(int, int);
long long __builtin_ashift_8x8(long long, int);
long long __builtin_ashift_4x16(long long, int);
long long __builtin_ashift_2x32(long long, int);
int __builtin_lshift_4x8(int, int);
int __builtin_lshift_2x16(int, int);
long long __builtin_lshift_8x8(long long, int);
long long __builtin_lshift_4x16(long long, int);
long long __builtin_lshift_2x32(long long, int);
int __builtin_rotate_1x32(int, int);
long long __builtin_rotate_2x32(long long, int);
long long __builtin_rotate_1x64(long long, int);
int __builtin_compact_to_i2x16(long long);
int __builtin_compact_to_i2x16_sat(long long);
long long __builtin_expand_i2x16(int);
long long __builtin_expand_i2x16_ze(int);
int __builtin_compact_to_fr2x16(long long);
int __builtin_compact_to_fr2x16_trunc(long long);
int __builtin_compact_to_fr4x8(long long);
int __builtin_compact_to_fr4x8_trunc(long long);
long long __builtin_expand_fr2x16(int);
__builtin_quad __builtin_compose_128(long long, long long);
long long __builtin_high_64(__builtin_quad);
long long __builtin_low_64(__builtin_quad);
unsigned long long __builtin_compose_64u(unsigned int, unsigned int);
long long __builtin_compose_64(int, int);
unsigned int __builtin_high_32u(unsigned long long);
int __builtin_high_32(long long);
unsigned int __builtin_low_32u(unsigned long long);
int __builtin_low_32(long long);
long double __builtin_f_compose_64(float, float);
float __builtin_f_high_32(long double);
float __builtin_f_low_32(long double);
void *__builtin_alloca_aligned(int, int);
long long __builtin_tmax_add(long long, long long, __builtin_quad);
long long __builtin_tmax_add_8s(long long, long long, __builtin_quad);
long long __builtin_tmax_sub(long long, long long, __builtin_quad);
long long __builtin_tmax_sub_8s(long long, long long, __builtin_quad);
int __builtin_tmax(int, int);
int __builtin_tmax_4s(int, int);
long long __builtin_max_add(long long, long long, __builtin_quad);
long long __builtin_max_add_8s(long long, long long, __builtin_quad);
long long __builtin_max_sub(long long, long long, __builtin_quad);
long long __builtin_max_sub_8s(long long, long long, __builtin_quad);
__builtin_quad __builtin_acs_tmax(long long, long long, int, long long);
__builtin_quad __builtin_acs_tmax_8s(long long, long long, int, long long);
__builtin_quad __builtin_acs_max(long long, long long, int, long long);
__builtin_quad __builtin_acs_max_8s(long long, long long, int, long long);
long long __builtin_acs_res2(__builtin_quad);
int __builtin_despread(__builtin_quad, long long, int);
int __builtin_despread_i(__builtin_quad, long long, int);
long long __builtin_despread_res2(int);
__builtin_quad __builtin_xcorrs(__builtin_quad, long long, long long, int, __builtin_quad, __builtin_quad, __builtin_quad, __builtin_quad);
__builtin_quad __builtin_xcorrs_clr(__builtin_quad, long long, long long, int);
__builtin_quad __builtin_xcorrs_ext(__builtin_quad, long long, long long, int, __builtin_quad, __builtin_quad, __builtin_quad, __builtin_quad);
__builtin_quad __builtin_xcorrs_clr_ext(__builtin_quad, long long, long long, int);
__builtin_quad __builtin_xcorrs_i(__builtin_quad, long long, long long, int, __builtin_quad, __builtin_quad, __builtin_quad, __builtin_quad);
__builtin_quad __builtin_xcorrs_i_clr(__builtin_quad, long long, long long, int);
__builtin_quad __builtin_xcorrs_i_ext(__builtin_quad, long long, long long, int, __builtin_quad, __builtin_quad, __builtin_quad, __builtin_quad);
__builtin_quad __builtin_xcorrs_i_clr_ext(__builtin_quad, long long, long long, int);
__builtin_quad __builtin_xcorrs_res2(__builtin_quad);
__builtin_quad __builtin_xcorrs_res3(__builtin_quad);
__builtin_quad __builtin_xcorrs_res4(__builtin_quad);
long long __builtin_xcorrs_res5(__builtin_quad);
long long __builtin_xcorrs_res6(__builtin_quad);
long long __builtin_permute_8b(long long, int);
__builtin_quad __builtin_permute_8s(long long, int);
int __builtin_add_4x8(int, int);
int __builtin_sub_4x8(int, int);
int __builtin_add_4x8_sat(int, int);
unsigned __builtin_add_u4x8_sat(unsigned, unsigned);
int __builtin_sub_4x8_sat(int, int);
unsigned __builtin_sub_u4x8_sat(unsigned, unsigned);
int __builtin_sum_4x8(int);
int __builtin_abs_4x8(int);
int __builtin_max_4x8(int, int);
int __builtin_min_4x8(int, int);
int __builtin_clip_4x8(int, int);
long long __builtin_merge_4x8(int, int);
long long __builtin_add_8x8(long long, long long);
long long __builtin_sub_8x8(long long, long long);
long long __builtin_add_8x8_sat(long long, long long);
unsigned long long __builtin_add_u8x8_sat(unsigned long long, unsigned long long);
long long __builtin_sub_8x8_sat(long long, long long);
unsigned long long __builtin_sub_u8x8_sat(unsigned long long, unsigned long long);
int __builtin_sum_8x8(long long);
long long __builtin_abs_8x8(long long);
long long __builtin_max_8x8(long long, long long);
long long __builtin_min_8x8(long long, long long);
long long __builtin_clip_8x8(long long, long long);
int __builtin_fext(int, int);
int __builtin_fext_se(int, int);
int __builtin_fext_ze(int, int);
int __builtin_fdep_se(int, int, int);
int __builtin_fdep(int, int, int);
int __builtin_fdep_zf(int, int, int);
long long __builtin_fext2(long long, int);
long long __builtin_fext2_se(long long, int);
long long __builtin_fext2_ze(long long, int);
long long __builtin_fdep2_se(long long, long long, int);
long long __builtin_fdep2(long long, long long, int);
long long __builtin_fdep2_zf(long long, long long, int);
int __builtin_fext_long_control(int, long long);
int __builtin_fext_long_control_se(int, long long);
int __builtin_fext_long_control_ze(int, long long);
int __builtin_fdep_long_control_se(int, int, long long);
int __builtin_fdep_long_control(int, int, long long);
int __builtin_fdep_long_control_zf(int, int, long long);
long long __builtin_fext2_long_control(long long, long long);
long long __builtin_fext2_long_control_se(long long, long long);
long long __builtin_fext2_long_control_ze(long long, long long);
long long __builtin_fdep2_long_control_se(long long, long long, long long);
long long __builtin_fdep2_long_control(long long, long long, long long);
long long __builtin_fdep2_long_control_zf(long long, long long, long long);
int __builtin_compact_to_i4x8(long long);
int __builtin_compact_to_i4x8_sat(long long);
long long __builtin_expand_i4x8(int);
long long __builtin_expand_i4x8_ze(int);
int __builtin_sysreg_read(int);
long long __builtin_sysreg_read2(int);
__builtin_quad __builtin_sysreg_read4(int);
void __builtin_sysreg_write(int, unsigned int);
void __builtin_sysreg_write2(int, unsigned long long);
void __builtin_sysreg_write4(int, __builtin_quad);
void __builtin_idle(void);
void __builtin_idle_lp(void);
void __builtin_btbinv(void);
void __builtin_btben(void);
int __builtin_exp(int);
int __builtin_exp2(long long);
int __builtin_lead_zero(int);
int __builtin_lllead_zero(long long);
int __builtin_lead_ones(int);
int __builtin_lllead_ones(long long);
#ifdef __TS_BYTE_ADDRESS
int *__builtin_dab_2x32_res1(int *, int *, int);
int *__builtin_dab_4x32_res1(int *, int *, int);
long long __builtin_dab_2x32_res2(int *);
__builtin_quad __builtin_dab_4x32_res2(int *);
short *__builtin_dab_2x16_res1(short *, short *, int);
short *__builtin_dab_4x16_res1(short *, short *, int);
short *__builtin_dab_8x16_res1(short *, short *, int);
int __builtin_dab_2x16_res2(short *);
long long __builtin_dab_4x16_res2(short *);
__builtin_quad __builtin_dab_8x16_res2(short *);
#else
void *__builtin_dab_2x32_res1(void *, void *, int);
void *__builtin_dab_4x32_res1(void *, void *, int);
long long __builtin_dab_2x32_res2(void *);
__builtin_quad __builtin_dab_4x32_res2(void *);
void *__builtin_dab_2x16_res1(void *, int, void *, int);
void *__builtin_dab_4x16_res1(void *, int, void *, int);
void *__builtin_dab_8x16_res1(void *, int, void *, int);
int __builtin_dab_2x16_res2(void *);
long long __builtin_dab_4x16_res2(void *);
__builtin_quad __builtin_dab_8x16_res2(void *);
#endif
/* prototypes for circular buffer builtins */
long __builtin_circindex (long, long, unsigned long);
void *__builtin_circptr (const void *, long, const void *, unsigned long);

#if defined(__WORKAROUND_ANOMALY_0133)

/* Due to anomaly 03-00-0133 on TS101, interrupts must be disabled two
   cycles before executing an 'idle'.  These macros replace the builtins. */

#define __builtin_idle()                                         \
  asm volatile ("xr0 = imaskh;;\n\t"                             \
                "xr1 = 0xefffffff;;\n\t"                         \
                "xr1 = r0 and r1;;\n\t"                          \
                "imaskh = xr1;;\n\t"                             \
                "nop;;\n\t"                                      \
                "idle; imaskh = xr0;;" : : : "xr0", "xr1", "XSTAT");

#define __builtin_idle_lp()                                      \
  asm volatile ("xr0 = imaskh;;\n\t"                             \
                "xr1 = 0xefffffff;;\n\t"                         \
                "xr1 = r0 and r1;;\n\t"                          \
                "imaskh = xr1;;\n\t"                             \
                "nop;;\n\t"                                      \
                "idle (lp); imaskh = xr0;;" : : : "xr0", "xr1", "XSTAT");

#elif defined(__WORKAROUND_ANOMALY_0306)

/* Due to anomaly 03-00-0133 on TS20x, there must be three lines of
   nops before issuing an idle instruction. */

#define __builtin_idle()                                         \
  asm volatile ("nop;;\n\t"                                      \
                "nop;;\n\t"                                      \
                "nop;;\n\t"                                      \
                "idle;;");

#define __builtin_idle_lp()                                      \
  asm volatile ("nop;;\n\t"                                      \
                "nop;;\n\t"                                      \
                "nop;;\n\t"                                      \
                "idle (lp);;");

#endif

#define __acs_tmax(I1,I2,I3,I4,O1,O2) {       \
  __builtin_quad res1;                        \
  res1 = __builtin_acs_tmax(I1,I2,I3,I4);     \
  O2 = __builtin_acs_res2(res1);              \
  O1 = res1;                                  \
}

#define __acs_tmax_8s(I1,I2,I3,I4,O1,O2) {    \
  __builtin_quad res1;                        \
  res1 = __builtin_acs_tmax_8s(I1,I2,I3,I4);  \
  O2 = __builtin_acs_res2(res1);              \
  O1 = res1;                                  \
}

#define __acs_max(I1,I2,I3,I4,O1,O2) {        \
  __builtin_quad res1;                        \
  res1 = __builtin_acs_max(I1,I2,I3,I4);      \
  O2 = __builtin_acs_res2(res1);              \
  O1 = res1;                                  \
}

#define __acs_max_8s(I1,I2,I3,I4,O1,O2) {     \
  __builtin_quad res1;                        \
  res1 = __builtin_acs_max_8s(I1,I2,I3,I4);   \
  O2 = __builtin_acs_res2(res1);              \
  O1 = res1;                                  \
}

#define __despread(I1,I2,I3,O1,O2) {          \
  int res1;                                   \
  res1 = __builtin_despread(I1,I2,I3);        \
  O2 = __builtin_despread_res2(res1);         \
  O1 = res1;                                  \
}

#define __despread_i(I1,I2,I3,O1,O2) {        \
  int res1;                                   \
  res1 = __builtin_despread_i(I1,I2,I3);      \
  O2 = __builtin_despread_res2(res1);         \
  O1 = res1;                                  \
}

#if !defined (__ADSPTS101__)

#define __META_XCORRS(PRIMARY,SIGNAL,CODESLO,CODESHI,CUT,ACCUM) {      \
  __builtin_quad res1 =                                                \
    PRIMARY((SIGNAL), *((long long int *) (CODESLO)),                  \
                      *((long long int *) (CODESHI)),                  \
                      (CUT),                                           \
                      *((__builtin_quad *) (ACCUM)),                   \
                      *((__builtin_quad *) (ACCUM) + 1),               \
                      *((__builtin_quad *) (ACCUM) + 2),               \
                      *((__builtin_quad *) (ACCUM) + 3));              \
  *((__builtin_quad *) (ACCUM)) = res1;                                \
  *((__builtin_quad *) (ACCUM) + 1) = __builtin_xcorrs_res2(res1);     \
  *((__builtin_quad *) (ACCUM) + 2) = __builtin_xcorrs_res3(res1);     \
  *((__builtin_quad *) (ACCUM) + 3) = __builtin_xcorrs_res4(res1);     \
  *((long long int *) (CODESLO)) = __builtin_xcorrs_res5(res1);        \
  *((long long int *) (CODESHI)) = __builtin_xcorrs_res6(res1);        \
}

#define __META_XCORRS_CLR(PRIMARY,SIGNAL,CODESLO,CODESHI,CUT,ACCUM) {  \
  __builtin_quad res1 =                                                \
    PRIMARY((SIGNAL), *((long long int *) (CODESLO)),                  \
                      *((long long int *) (CODESHI)),                  \
                      (CUT));                                          \
  *((__builtin_quad *) (ACCUM)) = res1;                                \
  *((__builtin_quad *) (ACCUM) + 1) = __builtin_xcorrs_res2(res1);     \
  *((__builtin_quad *) (ACCUM) + 2) = __builtin_xcorrs_res3(res1);     \
  *((__builtin_quad *) (ACCUM) + 3) = __builtin_xcorrs_res4(res1);     \
  *((long long int *) (CODESLO)) = __builtin_xcorrs_res5(res1);        \
  *((long long int *) (CODESHI)) = __builtin_xcorrs_res6(res1);        \
}

#define __xcorrs(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS(__builtin_xcorrs,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_clr(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS_CLR(__builtin_xcorrs_clr,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_ext(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS(__builtin_xcorrs_ext,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_clr_ext(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS_CLR(__builtin_xcorrs_clr_ext,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_i(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS(__builtin_xcorrs_i,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_i_clr(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS_CLR(__builtin_xcorrs_i_clr,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_i_ext(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS(__builtin_xcorrs_i_ext,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#define __xcorrs_i_clr_ext(SIGNAL,CDSLO,CDSHI,CUT,ACCUM) \
  __META_XCORRS_CLR(__builtin_xcorrs_i_clr_ext,SIGNAL,CDSLO,CDSHI,CUT,ACCUM)

#endif /* !defined (__ADSPTS101__) */

#ifdef __TS_BYTE_ADDRESS

#define __builtin_dab_2x32(a) \
        (*a = __builtin_dab_2x32_res1((int *)*a, 0, 0),\
         __builtin_dab_2x32_res2((int *)*a))

#define __builtin_dab_4x32(a) \
        (*a = __builtin_dab_4x32_res1((int *)*a, 0, 0),\
         __builtin_dab_4x32_res2((int *)*a))

#define __builtin_dab_2x16(a) \
        (*a = __builtin_dab_2x16_res1((short *)*a, 0, 0),\
         __builtin_dab_2x16_res2((short *)*a))

#define __builtin_dab_4x16(a) \
        (*a = __builtin_dab_4x16_res1((short *)*a, 0, 0),\
         __builtin_dab_4x16_res2((short *)*a))

#define __builtin_dab_8x16(a) \
        (*a = __builtin_dab_8x16_res1((short *)*a, 0, 0),\
         __builtin_dab_8x16_res2((short *)*a))

#else

#define __builtin_dab_2x32(a) \
        (*a = __builtin_dab_2x32_res1(*a, 0, 0), \
         __builtin_dab_2x32_res2(*a))

#define __builtin_dab_4x32(a) \
        (*a = __builtin_dab_4x32_res1(*a, 0, 0), \
         __builtin_dab_4x32_res2(*a))

#define __builtin_dab_2x16(a, o) \
        (*a = __builtin_dab_2x16_res1(*a, o, 0, 0), \
         __builtin_dab_2x16_res2(*a))

#define __builtin_dab_4x16(a, o) \
        (*a = __builtin_dab_4x16_res1(*a, o, 0, 0), \
         __builtin_dab_4x16_res2(*a))

#define __builtin_dab_8x16(a, o) \
        (*a = __builtin_dab_8x16_res1(*a, o, 0, 0), \
         __builtin_dab_8x16_res2(*a))

#endif /* __TS_BYTE_ADDRESS */

#ifdef __TS_BYTE_ADDRESS

#define __builtin_dabcb_2x32(a, b, l) \
        (*a = __builtin_dab_2x32_res1((int *)*a, b, l),\
         __builtin_dab_2x32_res2((int *)*a))

#define __builtin_dabcb_4x32(a, b, l) \
        (*a = __builtin_dab_4x32_res1((int *)*a, b, l),\
         __builtin_dab_4x32_res2((int *)*a))

#define __builtin_dabcb_2x16(a, b, l) \
        (*a = __builtin_dab_2x16_res1((short *)*a, b, l),\
         __builtin_dab_2x16_res2((short *)*a))

#define __builtin_dabcb_4x16(a, b, l) \
        (*a = __builtin_dab_4x16_res1((short *)*a, b, l),\
         __builtin_dab_4x16_res2((short *)*a))

#define __builtin_dabcb_8x16(a, b, l) \
        (*a = __builtin_dab_8x16_res1((short *)*a, b, l),\
         __builtin_dab_8x16_res2((short *)*a))

#else

#define __builtin_dabcb_2x32(a, b, l) \
        (*a = __builtin_dab_2x32_res1(*a, b, l), \
         __builtin_dab_2x32_res2(*a))

#define __builtin_dabcb_4x32(a, b, l) \
        (*a = __builtin_dab_4x32_res1(*a, b, l), \
         __builtin_dab_4x32_res2(*a))

#define __builtin_dabcb_2x16(a, o, b, l) \
        (*a = __builtin_dab_2x16_res1(*a, o, b, l), \
         __builtin_dab_2x16_res2(*a))

#define __builtin_dabcb_4x16(a, o, b, l) \
        (*a = __builtin_dab_4x16_res1(*a, o, b, l), \
         __builtin_dab_4x16_res2(*a))

#define __builtin_dabcb_8x16(a, o, b, l) \
        (*a = __builtin_dab_8x16_res1(*a, o, b, l), \
         __builtin_dab_8x16_res2(*a))

#endif /* __TS_BYTE_ADDRESS */

#define __builtin_frmul __builtin_mult_fr1x32
#define __builtin_frmulr __builtin_multr_fr1x32
#define __builtin_frmul_sat __builtin_mult_fr1x32_sat
#define __builtin_frmulr_sat __builtin_multr_fr1x32_sat

#else /* ! (defined (__ADSPTS__) && !defined (__USE_RAW_BUILTINS__)) */

#ifdef __cplusplus
} /* extern "C" */
#endif
#include <math.h> /* required for __raw_rsqrt */
#ifdef __cplusplus
extern "C" {
#endif

typedef __int32 __int2x16;
typedef __int64 __int2x32;
typedef __int64 __int4x16;
typedef __int32 __int4x8;
typedef __int64 __int8x8;

/* Unions for component extraction/deposit */

typedef struct {
  __int32 i0, i1, i2, i3;
} __raw_quad;

/* Internal functions */

inline static float __raw_recip (float a) {
  /* NOTE: this function does not make use of the look-up tables so 
  ** it will give a different result from the actual RECIPS instruction
  ** e.g. RECIPS(2) returns 0.499023 rather than 0.5
  */
  return (1/a);
}

inline static float __raw_rsqrt (float a) {
  /* NOTE: this function does not make use of the look-up tables so 
  ** it will give a different result from the actual RSQRTS instruction
  ** e.g. RSQRTS(4) returns 0.499023 rather than 0.5
  */
  return ( 1/(sqrt(a)) );
}

inline static __int32 __raw_fix (float a)
{
  __int32 b;
  float   c;
  b = (float)a;
  c = a - (float)b;
  return (c < 0.5 ? b : b + 1);
}

inline static __int32 __sat8 (__int32 a) {
  if (a > 0x7f) {
    a = 0x7f;
  } else if ((a < 0) && (-a > 0x7f)) {
    a = 0x80;
  }
  return a;
}

inline static __int32 __usat8 (__int32 a) {
  if (a > 0xff) {
    a = 0xff;
  } else if (a < 0) {
    a = 0;
  }
  return a;
}

inline static __int32 __sat8_from_i16 (__int32 a) {
  if (a & 0x8000) {
    if  ((0x10000 - a ) > 0xff)
      a = 0x80;
  } else if (a > 0x7f) {
    a = 0x7f;
  }
  return a & 0xff;
}

inline static __int32 __sat16 (__int32 a) {
  if (a > 0x7fff) {
    a = 0x7fff;
  } else if ((a < 0) && (-a > 0x7fff)) {
    a = 0x8000;
  }
  return a;
}

inline static __int32 __usat16 (__int32 a) {
  if (a > 0xffff) {
    a = 0xffff;
  } else if (a < 0) {
    a = 0;
  }
  return a;
}

inline static __int32 __sat32 (__int64 a) {
  if (a > 0x7fffffff) {
    a = 0x7fffffff;
  } else if ((a < 0) && (-a > 0x7fffffff)) {
    a = 0x80000000;
  }
  return (__int32) a;
}

inline static __int32 __usat32 (__int64 a) {
  if (a > 0xffffffff) {
    a = 0xffffffff;
  } else if (a < 0) {
    a = 0;
  }
  return (__int32) a;
}

/*
  The C standard does not define whether '>>' is arithmetic or logical.
  These implement arithmetic right shifts (32 or 64 bit).
*/

inline static __int32 __ashiftr (__int32 a, __int32 b) {
  __int32 himask, lomask, s;
  himask = 0xffffffff << (32 - b);
  lomask = 0xffffffff ^ himask;
  s = (a >> b) & lomask;
  if (a & 0x80000000) s = himask | s;
  return s;
}

inline static __int64 __ashiftr64 (__int64 a, __int64 b) {
  __int64 himask, lomask, s;
  himask = __ULLCONST (0xffffffffffffffff) << (32 - b);
  lomask = __ULLCONST (0xffffffffffffffff) ^ himask;
  s = (a >> b) & lomask;
  if (a & __ULLCONST (0x8000000000000000)) s = himask | s;
  return s;
}

inline static __int32 __sel8i (__int32 a, __int32 b) {
  a >>= (b << 3);
  a = a & 0xff;
  return ((a & 0x80) ? (0xffffff00 | a) : a);
}

inline static __uint32 __usel8i (__int32 a, __int32 b) {
  a >>= (b << 3);
  return a & 0xff;
}

inline static __int32 __sel8ll (__int64 a, __int32 b) {
  __int32 tmp;
  a >>= (b << 3);
  tmp = (__int32) a & 0xff;
  return ((tmp & 0x80) ? (0xffffff00 | tmp) : tmp);
}

inline static __uint32 __usel8ll (__int64 a, __int32 b) {
  a >>= (b << 3);
  return (__int32) a & 0xff;
}

inline static __int32 __sel8q (__raw_quad a, __int32 b) {
  __int32 word;
  switch (b >> 2) {
    case 0:  word = a.i0; break;
    case 1:  word = a.i1; break;
    case 2:  word = a.i2; break;
    case 3:  word = a.i3; break;
    default: word = 0;    break;
  }
  word >>= ((b & 3) << 3);
  word = word & 0xff;
  return ((word & 0x80) ? (0xffffff00 | word) : word);
}

inline static __uint32 __usel8q (__raw_quad a, __int32 b) {
  __int32 word;
  switch (b >> 2) {
    case 0:  word = a.i0; break;
    case 1:  word = a.i1; break;
    case 2:  word = a.i2; break;
    case 3:  word = a.i3; break;
    default: word = 0;    break;
  }
  word >>= ((b & 3) << 3);
  return word & 0xff;
}

inline static __int32 __sel16i (__int32 a, __int32 b) {
  a >>= (b << 4);
  a = a & 0xffff;
  return ((a & 0x8000) ? (0xffff0000 | a) : a);
}

inline static __uint32 __usel16i (__int32 a, __int32 b) {
  a >>= (b << 4);
  return a & 0xffff;
}

inline static __int32 __sel16ll (__int64 a, __int32 b) {
  __int32 tmp;
  a >>= (b << 4);
  tmp = (__int32) a & 0xffff;
  return ((tmp & 0x8000) ? (0xffff0000 | tmp) : tmp);
}

inline static __uint32 __usel16ll (__int64 a, __int32 b) {
  a >>= (b << 4);
  return (__uint32) a & 0xffff;
}

inline static __int32 __sel16q (__raw_quad a, __int32 b) {
  __int32 word;
  switch (b >> 1) {
    case 0:  word = a.i0; break;
    case 1:  word = a.i1; break;
    case 2:  word = a.i2; break;
    case 3:  word = a.i3; break;
    default: word = 0;    break;
  }
  word >>= ((b & 1) << 4);
  word = word & 0xffff;
  return ((word & 0x8000) ? (0xffff0000 | word) : word);
}

inline static __uint32 __usel16q (__raw_quad a, __int32 b) {
  __int32 word;
  switch (b >> 1) {
    case 0:  word = a.i0; break;
    case 1:  word = a.i1; break;
    case 2:  word = a.i2; break;
    case 3:  word = a.i3; break;
    default: word = 0;    break;
  }
  word >>= ((b & 1) << 4);
  return word & 0xffff;
}

inline static __int32 __sel32ll (__int64 a, __int32 b) {
  if (b > 0) {
    return (__int32) (a >> 32);
  }
  return (__int32) (a & 0xffffffff);
}

inline static __uint32 __usel32ll (__int64 a, __int32 b) {
  if (b > 0) {
    return (__uint32) (a >> 32);
  }
  return (__uint32) (a & 0xffffffff);
}

inline static __int32 __sel32q (__raw_quad a, __int32 b) {
  switch (b) {
    case 0:  return a.i0;
    case 1:  return a.i1;
    case 2:  return a.i2;
    case 3:  return a.i3;
    default: return 0;
  }
}

inline static __uint32 __usel32q (__raw_quad a, __int32 b) {
  switch (b) {
    case 0:  return (__uint32) a.i0;
    case 1:  return (__uint32) a.i1;
    case 2:  return (__uint32) a.i2;
    case 3:  return (__uint32) a.i3;
    default: return 0;
  }
}

inline static __uint32 __umax (__uint32 x, __uint32 y) {
  return ( x > y ? x : y );
}

inline static __uint32 __umin (__uint32 x, __uint32 y) {
  return ( x < y ? x : y );
}

inline static __int32 __expand_low_of_i2x16 (__int2x16 a) {
  return __sel16i (a, 0);
}

inline static __int32 __expand_high_of_i2x16 (__int2x16 a) {
  return __sel16i (a, 1);
}

inline static __int4x8 __compact_to_i4x8_from_i32 (__int32 a,
                                                   __int32 b,
                                                   __int32 c,
                                                   __int32 d) {
  return  (a & 0xff)        |
         ((b & 0xff) <<  8) |
         ((c & 0xff) << 16) |
         ((d & 0xff) << 24);
}

inline static __int8x8 __compact_to_i8x8_from_i32 (__int32 a,
                                                   __int32 b,
                                                   __int32 c,
                                                   __int32 d,
                                                   __int32 e,
                                                   __int32 f,
                                                   __int32 g,
                                                   __int32 h) {
  return  (__int64) (a & 0xff)        |
         ((__int64) (b & 0xff) <<  8) |
         ((__int64) (c & 0xff) << 16) |
         ((__int64) (d & 0xff) << 24) |
         ((__int64) (e & 0xff) << 32) |
         ((__int64) (f & 0xff) << 40) |
         ((__int64) (g & 0xff) << 48) |
         ((__int64) (h & 0xff) << 56);
}

/* Composition and Extraction */

inline static __int32 __raw_low_32 (__int2x32 a) {
  return __sel32ll (a, 0);
}

inline static __uint32 __raw_low_32u (__int2x32 a) {
  return (__uint32) __sel32ll (a, 0);
}

inline static __int32 __raw_high_32 (__int2x32 a) {
  return __sel32ll (a, 1);
}

inline static __uint32 __raw_high_32u (__int2x32 a) {
  return (__uint32) __sel32ll (a, 1);
}

inline static __int64 __raw_low_64 (__raw_quad a) {
  return ((__int64) a.i1 << 32) | ((__int64) a.i0 & 0xffffffff);
}

inline static __int64 __raw_high_64 (__raw_quad a) {
  return ((__int64) a.i3 << 32) | ((__int64) a.i2 & 0xffffffff);
}

inline static __int2x32 __raw_compose_64 (__int32 lo, __int32 hi) {
  return ((__int64) hi << 32) | ((__int64) lo & 0xffffffff);
}

inline static __int2x32 __raw_compose_64u (__uint32 lo, __uint32 hi) {
  return ((__uint64) hi << 32) | ((__uint64) lo & 0xffffffff);
}

inline static __raw_quad __raw_compose_128 (__int64 lo, __int64 hi) {
  __raw_quad c;
  c.i0 = __raw_low_32  (lo);
  c.i1 = __raw_high_32 (lo);
  c.i2 = __raw_low_32  (hi);
  c.i3 = __raw_high_32 (hi);
  return c;
}

inline static __int2x16 __raw_compact_to_i2x16 (__int2x32 a) {
  return (__sel32ll (a, 1) << 16) | (__sel32ll (a, 0) & 0xffff);
}

inline static __int2x16 __raw_compact_to_i2x16_sat (__int2x32 a) {
  return (__sat16 (__sel32ll (a, 1)) << 16) |
          (__sat16 (__sel32ll (a, 0)) & 0xffff);
}

inline static __int2x32 __raw_expand_i2x16 (__int2x16 a) {
  return __raw_compose_64 (__sel16i (a, 0), __sel16i (a, 1));
}

inline static __int2x32 __raw_expand_i2x16_ze (__int2x16 a) {
  return __raw_compose_64 (__usel16i (a, 0), __usel16i (a, 1));
}

inline static __int2x16 __raw_compact_to_fr2x16 (__int2x32 a) {
  __int32 r1 = __sel16ll (a, 3);
  __int32 r2 = __usel16ll (a, 2);
  __int32 q1 = __sel16ll (a, 1);
  __int32 q2 = __usel16ll (a, 0);

  if (r2 == 0x8000)
    r1 += r1 & 0x1;
  else if (r2 > 0x8000)
    r1 += 1;
  if (q2 == 0x8000)
    q1 += q1 & 0x1;
  else if (q2 > 0x8000)
    q1 += 1;

  return (__sat16 (r1) << 16) | (__sat16 (q1) & 0xffff);
}

inline static __int2x16 __raw_compact_to_fr2x16_trunc (__int2x32 a) {
  return ((__usel32ll (a, 1) & 0xffff0000) | (__usel32ll (a, 0) >> 16));
}

inline static __int4x8 __raw_compact_to_fr4x8 (__int4x16 a) {
  __int32 r1 = __sel8ll (a,7);
  __int32 r2 = __usel8ll (a,6);
  __int32 r3 = __sel8ll (a,5);
  __int32 r4 = __usel8ll (a,4);
  __int32 q1 = __sel8ll (a,3);
  __int32 q2 = __usel8ll (a,2);
  __int32 q3 = __sel8ll (a,1);
  __int32 q4 = __usel8ll (a,0);

  if (r1 != 0x7f) {
    if (r2 == 0x80) {
      r1 += r1 & 0x1;
    } else if (r2 > 0x80) {
      r1 += 1;
    }
  }
  if (r3 != 0x7f) {
    if (r4 == 0x80) {
      r3 += r3 & 0x1;
    } else if (r4 > 0x80) {
      r3 += 1;
    }
  }
  if (q1 != 0x7f) {
    if (q2 == 0x80) {
      q1 += q1 & 0x1;
    } else if (q2 > 0x80) {
      q1 += 1;
    }
  }
  if (q3 != 0x7f) {
    if (q4 == 0x80) {
      q3 += q3 & 0x1;
    } else if (q4 > 0x80) {
      q3 += 1;
    }
  }

return ((q3 & 0xff) | ((q1 & 0xff) << 8) | ((r3 & 0xff) << 16) | ((r1 & 0xff) << 24) );
}

inline static __int4x8 __raw_compact_to_fr4x8_trunc (__int4x16 a) {
  return  __usel8ll (a, 1)        |
         (__usel8ll (a, 3) << 8)  |
         (__usel8ll (a, 5) << 16) |
         (__usel8ll (a, 7) << 24);
}

inline static __int4x8 __raw_compact_to_i4x8 (__int4x16 a) {
  return  __usel8ll (a, 0)        |
         (__usel8ll (a, 2) << 8)  |
         (__usel8ll (a, 4) << 16) |
         (__usel8ll (a, 6) << 24);
}

inline static __int4x8 __raw_compact_to_i4x8_sat (__int4x16 a) {
  return (int)(
         __sat8_from_i16(__usel16ll (a, 0))       |
         __sat8_from_i16(__usel16ll (a, 1)) << 8  |
         __sat8_from_i16(__usel16ll (a, 2)) << 16 |
         __sat8_from_i16(__usel16ll (a, 3)) << 24);
}

inline static __int4x16 __raw_expand_i4x8 (__int4x8 a) {
  return  (__int64) (__sel8i (a, 0) & 0xffff)        |
         ((__int64) (__sel8i (a, 1) & 0xffff) << 16) |
         ((__int64) (__sel8i (a, 2) & 0xffff) << 32) |
         ((__int64) (__sel8i (a, 3) & 0xffff) << 48);
}

inline static __uint64 __raw_expand_i4x8_ze (__int4x8 a) {
  return  (__uint64) (__usel8i (a, 0) & 0xffff)        |
         ((__uint64) (__usel8i (a, 1) & 0xffff) << 16) |
         ((__uint64) (__usel8i (a, 2) & 0xffff) << 32) |
         ((__uint64) (__usel8i (a, 3) & 0xffff) << 48);
}
/* 32-bit and 64-bit operations */

inline static __int32 __raw_add_sat (__int32 a, __int32 b) {
  return __sat32 ((__int64) a + (__int64) b);
}

inline static __uint32 __raw_uadd_sat (__uint32 a, __uint32 b) {
  return __usat32 ((__uint64) a + (__uint64) b);
}

inline static __int32 __raw_sub_sat (__int32 a, __int32 b) {
  return __sat32 ((__int64) a - (__int64) b);
}

inline static __uint32 __raw_usub_sat (__uint32 a, __uint32 b) {
  return __usat32 ((__uint64) a - (__uint64) b);
}

inline static __int32 __raw_neg_sat (__int32 a) {
  return __sat32 (-(__int64) a);
}

inline static __int32 __raw_avg (__int32 a, __int32 b) {
  __int64 sum = a + b;

  if (sum > 0) {
    if (sum & 1) {
      if (sum & 2) sum++;
    }
  } else {
    if ((-sum) & 1) {
      if ((-sum) & 2) sum--;
    }
  }
  return (__int32) (sum / 2);
}

inline static long __raw_lavg (long a, long b) {
  __int64 sum = a + b;

  if (sum > 0) {
    if (sum & 1) {
      if (sum & 2) sum++;
    }
  } else {
    if ((-sum) & 1) {
      if ((-sum) & 2) sum--;
    }
  }
  return (long) (sum / 2);
}

inline static __int32 __raw_avgt (__int32 a, __int32 b) {
  __int64 sum = a + b;
  if (sum < 0) sum--;
  return (__int32) (sum / 2);
}

inline static long __raw_lavgt (long a, long b) {
  __int64 sum = a + b;
  if (sum < 0) sum--;
  return (long) (sum / 2);
}

inline static __int64 __raw_llavg (__int64 a, __int64 b) {
  __int64 sum = a + b;
  __int32 carry;

  carry = 0;
  if ((a < 0) & (b < 0)) {
    if ((!(a & __ULLCONST (0x4000000000000000))) &&
        (!(b & __ULLCONST (0x4000000000000000)))) carry = 1;
  } else if ((a > 0) && (b > 0)) {
    if ((a & __ULLCONST (0x4000000000000000)) &&
        (b & __ULLCONST (0x4000000000000000))) carry = 1;
  }
  if (sum > 0) {
    if (sum & 1) {
      if (sum & 2) sum++;
    }
  } else {
    if ((-sum) & 1) {
      if ((-sum) & 2) sum--;
    }
  }
  sum = sum / 2;
  if (sum < 0) {
    if (carry) sum = sum & __ULLCONST (0xbfffffffffffffff);
  } else {
    if (carry) sum = sum | __ULLCONST (0x4000000000000000);
  }
  return sum;
}

inline static __int32 __raw_abs (__int32 a) {
  if (a == 0x80000000) {
    return 0x7fffffff;
  }
  return (a > 0) ? a : -a;
}

inline static __int64 __raw_llabs (__int64 a) {
  if (a == __ULLCONST (0x8000000000000000)) {
    return __ULLCONST (0x7fffffffffffffff);
  }
  return (a > 0) ? a : -a;
}

inline static __int32 __raw_clip (__int32 a, __int32 b) {
  __int32 modb;
  modb = __raw_abs (b);
  if (a == 0x80000000) {
    return -modb;
  }
  if (__raw_abs (a) < modb) return a;
  if (a > 0) return modb;
  return -modb;
}

inline static long __raw_lclip (long a, long b) {
  long modb;
  modb = __raw_abs (b);
  if (a == 0x80000000) {
    return -modb;
  }
  if (__raw_abs (a) < modb) return a;
  if (a > 0) return modb;
  return -modb;
}

inline static __int64 __raw_llclip (__int64 a, __int64 b) {
  __int64 modb;
  modb = __raw_llabs (b);
  if (a == __ULLCONST (0x8000000000000000)) {
    return -modb;
  }
  if (__raw_llabs (a) < modb) return a;
  if (a > 0) return modb;
  return -modb;
}

inline static __int32 __raw_max (__int32 x, __int32 y) {
  return ( x > y ? x : y );
}

inline static __int64 __raw_llmax (__int64 x, __int64 y) {
  return ( x > y ? x : y );
}

inline static __int32 __raw_min (__int32 x, __int32 y) {
  return ( x < y ? x : y );
}

inline static __int64 __raw_llmin (__int64 x, __int64 y) {
  return ( x < y ? x : y );
}

inline static __int64 __raw_mult_i1x32_wide (__int32 a, __int32 b) {
  return (__int64) a * (__int64) b;
}

inline static __uint64 __raw_mult_u1x32_wide (__uint32 a, __uint32 b) {
  return (__uint64) a * (__uint64) b;
}

inline static __int32 __raw_exp (__uint32 x) {
  __uint32 b;
  __int32 i;

  if (x == 0) return -31;

  i = 0;
  b = x & 0x80000000U;
  x <<= 1;
  while ((x != 0) && ((x & 0x80000000U) == b)) {
    x <<= 1;
    i--;
  }

  return i;
}

inline static __int32 __raw_exp2 (__int64 x) {
  __uint64 b;
  __int32 i;

  if (x == 0) return -63;

  i = 0;
  b = x & __ULLCONST (0x8000000000000000);
  x <<= 1;
  while ((x != 0) && ((x & __ULLCONST (0x8000000000000000)) == b)) {
    x <<= 1;
    i--;
  }

  return i;
}

inline static __int32 __raw_lead_ones (__uint32 val) {
  __uint32 c = 0, i = 1;
  i <<= 31;
  for (; i; i>>=1) {
    if (val & i)
       c++;
    else
       break;
  }
  return c;
}

inline static __int32 __raw_lllead_ones (__uint64 val) {
  __int32 c = 0;
  __uint64 i = 1;
  i <<= 63;
  for (; i; i>>=1) {
    if (val & i)
       c++;
    else
       break;
  }
  return c;
}

inline static __int32 __raw_lead_zero (__uint32 val) {
  __uint32 c = 0, i = 1;
  i <<= 31;
  for (; i; i>>=1) {
    if (!(val & i))
       c++;
    else
       break;
  }
  return c;
}

inline static __int32 __raw_lllead_zero (__uint64 val) {
  __int32 c = 0;
  __uint64 i = 1;
  i <<= 63;
  for (; i; i>>=1) {
    if (!(val & i))
       c++;
    else
       break;
  }
  return c;
}

inline static __int32 __raw_count_ones (__int32 val) {
  __int32 c = 0, i;
  for (i=0; i<32; i++) {
    c += (val & 0x1);
    val >>= 1;
  }
  return c;
}

inline static __int32 __raw_lcount_ones (long val) {
  __int32 c = 0, i;
  for (i=0; i<32; i++) {
    c += (val & 0x1);
    val >>= 1;
  }
  return c;
}

inline static __int32 __raw_llcount_ones (__int64 val) {
  __int32 c = 0, i;
  for (i=0; i<64; i++) {
    c += (__int32) (val & 0x1);
    val >>= 1;
  }
  return c;
}

inline static __int32 __raw_addbitrev (__int32 a, __int32 b) {
  __int32 r, ba, bb, c, i;

  r = 0;
  c = 0;
  for (i=0; i<32; i++) {
    ba = (a & 0x80000000) ? 1 : 0;
    bb = (b & 0x80000000) ? 1 : 0;
    a <<= 1;
    b <<= 1;
    r = (r << 1) | ((ba + bb + c) & 1);
    c = ((ba + bb + c) & 2) >> 1;
  }

  return r;
}

/* 2x32 operations */

inline static __int2x32 __raw_add_2x32 (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__sel32ll (a, 0) + __sel32ll (b, 0),
                           __sel32ll (a, 1) + __sel32ll (b, 1));
}

inline static __int2x32 __raw_add_2x32u (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__usel32ll (a, 0) + __usel32ll (b, 0),
                           __usel32ll (a, 1) + __usel32ll (b, 1));
}

inline static __int2x32 __raw_add_2x32_sat (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__sat32 ((__int64) __sel32ll (a, 0) +
                                    (__int64) __sel32ll (b, 0)),
                           __sat32 ((__int64) __sel32ll (a, 1) +
                                    (__int64) __sel32ll (b, 1)));
}

inline static __int2x32 __raw_add_u2x32_sat (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__usat32 ((__uint64) __usel32ll (a, 0) +
                                     (__uint64) __usel32ll (b, 0)),
                           __usat32 ((__uint64) __usel32ll (a, 1) +
                                     (__uint64) __usel32ll (b, 1)));
}

inline static __int2x32 __raw_sub_2x32 (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__sel32ll (a, 0) - __sel32ll (b, 0),
                           __sel32ll (a, 1) - __sel32ll (b, 1));
}

inline static __int2x32 __raw_sub_2x32u (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__usel32ll (a, 0) - __usel32ll (b, 0),
                           __usel32ll (a, 1) - __usel32ll (b, 1));
}

inline static __int2x32 __raw_sub_2x32_sat (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__sat32 ((__int64) __sel32ll (a, 0) -
                                    (__int64) __sel32ll (b, 0)),
                           __sat32 ((__int64) __sel32ll (a, 1) -
                                    (__int64) __sel32ll (b, 1)));
}

inline static __int2x32 __raw_sub_u2x32_sat (__int2x32 a, __int2x32 b) {
  return __raw_compose_64 (__usat32 ((__uint64) __usel32ll (a, 0) -
                                     (__uint64) __usel32ll (b, 0)),
                           __usat32 ((__uint64) __usel32ll (a, 1) -
                                     (__uint64) __usel32ll (b, 1)));
}

inline static __int32 __raw_sum_2x32 (__int2x32 a) {
  return __sel32ll (a, 0) + __sel32ll (a, 1);
}

/* 4x32 operations */

inline static __raw_quad __raw_add_4x32 (__raw_quad a, __raw_quad b) {
  __raw_quad c;
  c.i0 = a.i0 + b.i0;
  c.i1 = a.i1 + b.i1;
  c.i2 = a.i2 + b.i2;
  c.i3 = a.i3 + b.i3;
  return c;
}

inline static __raw_quad __raw_add_4x32_sat (__raw_quad a, __raw_quad b) {
  __raw_quad c;
  c.i0 = __sat32 ((__int64) a.i0 + (__int64) b.i0);
  c.i1 = __sat32 ((__int64) a.i1 + (__int64) b.i1);
  c.i2 = __sat32 ((__int64) a.i2 + (__int64) b.i2);
  c.i3 = __sat32 ((__int64) a.i3 + (__int64) b.i3);
  return c;
}

inline static __raw_quad __raw_add_u4x32_sat (__raw_quad a, __raw_quad b) {
  __raw_quad c;
  c.i0 = __usat32 ((__uint64) a.i0 + (__uint64) b.i0);
  c.i1 = __usat32 ((__uint64) a.i1 + (__uint64) b.i1);
  c.i2 = __usat32 ((__uint64) a.i2 + (__uint64) b.i2);
  c.i3 = __usat32 ((__uint64) a.i3 + (__uint64) b.i3);
  return c;
}

inline static __raw_quad __raw_sub_4x32 (__raw_quad a, __raw_quad b) {
  __raw_quad c;
  c.i0 = a.i0 - b.i0;
  c.i1 = a.i1 - b.i1;
  c.i2 = a.i2 - b.i2;
  c.i3 = a.i3 - b.i3;
  return c;
}

inline static __raw_quad __raw_sub_4x32_sat (__raw_quad a, __raw_quad b) {
  __raw_quad c;
  c.i0 = __sat32 ((__int64) a.i0 - (__int64) b.i0);
  c.i1 = __sat32 ((__int64) a.i1 - (__int64) b.i1);
  c.i2 = __sat32 ((__int64) a.i2 - (__int64) b.i2);
  c.i3 = __sat32 ((__int64) a.i3 - (__int64) b.i3);
  return c;
}

inline static __raw_quad __raw_sub_u4x32_sat (__raw_quad a, __raw_quad b) {
  __raw_quad c;
  c.i0 = __usat32 ((__int64) a.i0 - (__int64) b.i0);
  c.i1 = __usat32 ((__int64) a.i1 - (__int64) b.i1);
  c.i2 = __usat32 ((__int64) a.i2 - (__int64) b.i2);
  c.i3 = __usat32 ((__int64) a.i3 - (__int64) b.i3);
  return c;
}

/* 2x16 operations */

inline static __int2x16 __raw_add_2x16 (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sel16i (a, 0) + __sel16i (b, 0),
      __sel16i (a, 1) + __sel16i (b, 1)));
}

inline static __int2x16 __raw_add_u2x16 (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      (__uint32) __sel16i (a, 0) + (__uint32) __sel16i (b, 0),
      (__uint32) __sel16i (a, 1) + (__uint32) __sel16i (b, 1)));
}

inline static __int2x16 __raw_add_2x16_sat (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sat16 (__sel16i (a, 0) + __sel16i (b, 0)),
      __sat16 (__sel16i (a, 1) + __sel16i (b, 1))));
}

inline static __int2x16 __raw_add_u2x16_sat (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __usat16 (__usel16i (a, 0) + __usel16i (b, 0)),
      __usat16 (__usel16i (a, 1) + __usel16i (b, 1))));
}

inline static __int2x16 __raw_sub_2x16 (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sel16i (a, 0) - __sel16i (b, 0),
      __sel16i (a, 1) - __sel16i (b, 1)));
}

inline static __int2x16 __raw_sub_u2x16 (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      (__uint32) __sel16i (a, 0) - (__uint32) __sel16i (b, 0),
      (__uint32) __sel16i (a, 1) - (__uint32) __sel16i (b, 1)));
}

inline static __int2x16 __raw_sub_2x16_sat (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sat16 (__sel16i (a, 0) - __sel16i (b, 0)),
      __sat16 (__sel16i (a, 1) - __sel16i (b, 1))));
}

inline static __int2x16 __raw_sub_u2x16_sat (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __usat16 (__usel16i (a, 0) - __usel16i (b, 0)),
      __usat16 (__usel16i (a, 1) - __usel16i (b, 1))));
}

inline static __int2x16 __raw_neg_2x16_sat (__int2x16 a) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sat16(-__sel16i(a, 0)),
      __sat16(-__sel16i(a, 1))));
}

inline static __int2x16 __raw_abs_2x16 (__int2x16 a) {
  __int32 a0, a1;
  a0 = __sel16i (a, 0);
  a1 = __sel16i (a, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 ((a0 > 0) ? a0 : -a0, (a1 > 0) ? a1 : -a1));
}

inline static __int2x16 __raw_clip_2x16 (__int2x16 a, __int2x16 b) {
  __int32 a0, a1;
  __int32 b0, b1;
  a0 = __sel16i (a, 0);
  a1 = __sel16i (a, 1);
  b0 = __sel16i (b, 0);
  b1 = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__raw_clip (a0, b0), __raw_clip (a1, b1)));
}

inline static __int4x16 __raw_merge_2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (alo, blo)),
    __raw_compact_to_i2x16 (__raw_compose_64 (ahi, bhi)));
}

inline static __int2x16 __raw_max_2x16 (__int2x16 a, __int2x16 b) {
  __int32 a0, a1;
  __int32 b0, b1;
  a0 = __sel16i (a, 0);
  a1 = __sel16i (a, 1);
  b0 = __sel16i (b, 0);
  b1 = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__raw_max (a0, b0), __raw_max (a1, b1)));
}

inline static __int2x16 __raw_max_u2x16 (__int2x16 a, __int2x16 b) {
  __uint32 a0, a1;
  __uint32 b0, b1;
  a0 = __sel16i (a, 0);
  a1 = __sel16i (a, 1);
  b0 = __sel16i (b, 0);
  b1 = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__umax (a0, b0), __umax (a1, b1)));
}

inline static __int2x16 __raw_min_2x16 (__int2x16 a, __int2x16 b) {
  __int32 a0, a1;
  __int32 b0, b1;
  a0 = __sel16i (a, 0);
  a1 = __sel16i (a, 1);
  b0 = __sel16i (b, 0);
  b1 = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__raw_min (a0, b0), __raw_min (a1, b1)));
}

inline static __int2x16 __raw_min_u2x16 (__int2x16 a, __int2x16 b) {
  __uint32 a0, a1;
  __uint32 b0, b1;
  a0 = __sel16i (a, 0);
  a1 = __sel16i (a, 1);
  b0 = __sel16i (b, 0);
  b1 = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__umin (a0, b0), __umin (a1, b1)));
}

inline static __int32 __raw_sum_2x16 (__int2x16 a) {
  return __sel16i (a, 0) + __sel16i (a, 1);
}

inline static __int32 __raw_mult_sat(__int32 a, __int32 b)
{
  __int64 result = ((__int64)a * (__int64)b);
  if (result & __ULLCONST (0x8000000000000000)) {
    if  (-result > 0x7fffffff) {
      return 0x80000000;
    } else
      return result | 0x80000000;
  } else if (result > 0x7fffffff) {
    return 0x7fffffff;
  }
  else
   return (int)result;
}

inline static __int32 __raw_mult_fr1x32(__int32 a, __int32 b)
{
  return ((__int64)a * (__int64)b) >> 31;
}

inline static __int32 __raw_multr_fr1x32(__int32 a, __int32 b)
{
  __int64 result = ((__int64)a * (__int64)b);
  if (result & 0x40000000)
    return ((result >> 31) + 1);
  else
    return (result >> 31);
}

inline static __int32 __raw_mult_fr1x32_sat(__int32 a, __int32 b)
{
  __int64 result = ((__int64)a * (__int64)b);
  result = result >> 31;

  if (result >= 0xffffffff)
    return 0xffffffff;
  else
    return result;
}

inline static __int32 __raw_multr_fr1x32_sat(__int32 a, __int32 b)
{
  __int64 result = ((__int64)a * (__int64)b);

  if (result & 0x40000000)
    result = (result >> 31) + 1;
  else
    result = result >> 31;

  if (result >= 0xffffffff)
    return 0xffffffff;
  else
    return result;
}

inline static __int2x16 __raw_mult_i2x16 (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sel16i (a, 0) * __sel16i (b, 0),
      __sel16i (a, 1) * __sel16i (b, 1)));
}

inline static __int2x16 __raw_mult_u2x16 (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __usel16i (a, 0) * __usel16i (b, 0),
      __usel16i (a, 1) * __usel16i (b, 1)));
}

inline static __int2x16 __raw_mult_i2x16_sat (__int2x16 a, __int2x16 b) {
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      __sat16 (__sel16i (a, 0) * __sel16i (b, 0)),
      __sat16 (__sel16i (a, 1) * __sel16i (b, 1))));
}

inline static __int2x32 __raw_mult_i2x16_wide (__int2x16 a, __int2x16 b) {
  return __raw_compose_64 (
    __sel16i (a, 0) * __sel16i (b, 0),
    __sel16i (a, 1) * __sel16i (b, 1));
}

inline static __int2x32 __raw_mult_u2x16_wide (__int2x16 a, __int2x16 b) {
  return __raw_compose_64 (
    __usel16i (a, 0) * __usel16i (b, 0),
    __usel16i (a, 1) * __usel16i (b, 1));
}

inline static __int2x16 __raw_cmult_i2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (alo * blo - ahi * bhi,
                      ahi * blo + alo * bhi));
}

inline static __int2x16 __raw_cmult_i2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (alo * blo - ahi * bhi,
                      ahi * blo + alo * bhi));
}

inline static __int2x16 __raw_cmult_conj_i2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (alo * blo + ahi * bhi,
                      ahi * blo - alo * bhi));
}

inline static __int2x16 __raw_cmult_conj_i2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (alo * blo + ahi * bhi,
                      ahi * blo - alo * bhi));
}

inline static __int2x32 __raw_cmult_i2x16_wide (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compose_64 (alo * blo - ahi * bhi,
                           ahi * blo + alo * bhi);
}

inline static __int2x32 __raw_cmult_conj_i2x16_wide (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compose_64 (alo * blo + ahi * bhi,
                           ahi * blo - alo * bhi);
}

/* fr2x16 operations */

inline static __int2x16 __raw_mult_fr2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__ashiftr (alo * blo, 15),
                      __ashiftr (ahi * bhi, 15)));
}

inline static __int2x16 __raw_mult_fr2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (__ashiftr (alo * blo, 15),
                      __ashiftr (ahi * bhi, 15)));
}

inline static __int2x16 __raw_multr_fr2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_fr2x16 (
    __raw_compose_64 (alo * blo * 2,
                      ahi * bhi * 2));
} 
    
inline static __int2x16 __raw_multr_fr2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_fr2x16 (
    __raw_compose_64 (alo * blo * 2,
                      ahi * bhi * 2));
}

inline static __int2x16 __raw_cmult_fr2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__ashiftr (alo * blo - ahi * bhi, 15),
                      __ashiftr (ahi * blo + alo * bhi, 15)));
}

inline static __int2x16 __raw_cmult_fr2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (__ashiftr (alo * blo - ahi * bhi, 15),
                      __ashiftr (ahi * blo + alo * bhi, 15)));
}

inline static __int2x16 __raw_cmultr_fr2x16 (__int2x16 a, __int2x16 b)
{
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__ashiftr (alo * blo - ahi * bhi + 0x4000, 15),
                      __ashiftr (ahi * blo + alo * bhi + 0x4000, 15)));
}

inline static __int2x16 __raw_cmultr_fr2x16_sat (__int2x16 a, __int2x16 b)
{
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (__ashiftr (alo * blo - ahi * bhi + 0x4000, 15),
                          __ashiftr (ahi * blo + alo * bhi + 0x4000, 15)));
}

inline static __int2x16 __raw_cmult_conj_fr2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__ashiftr (alo * blo + ahi * bhi, 15),
                      __ashiftr (ahi * blo - alo * bhi, 15)));
}

inline static __int2x16 __raw_cmult_conj_fr2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (__ashiftr (alo * blo + ahi * bhi, 15),
                      __ashiftr (ahi * blo - alo * bhi, 15)));
}

inline static __int2x16 __raw_cmultr_conj_fr2x16 (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (__ashiftr (alo * blo + ahi * bhi + 0x4000, 15),
                      __ashiftr (ahi * blo - alo * bhi + 0x4000, 15)));
}

inline static __int2x16 __raw_cmultr_conj_fr2x16_sat (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16_sat (
    __raw_compose_64 (__ashiftr (alo * blo + ahi * bhi + 0x4000, 15),
                      __ashiftr (ahi * blo - alo * bhi + 0x4000, 15)));
}

/* 4x16 operations */

inline static __int4x16 __raw_add_4x16 (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_add_2x16 (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_add_2x16 (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_add_4x16_sat (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_add_2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_add_2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_add_u4x16_sat (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_add_u2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_add_u2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_sub_4x16 (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_sub_2x16 (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_sub_2x16 (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_sub_4x16_sat (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_sub_2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_sub_2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_sub_u4x16_sat (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_sub_u2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_sub_u2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_neg_4x16_sat (__int4x16 a) {
  return __raw_compose_64 (
    __raw_compact_to_i2x16(
      __raw_compose_64 (__sat16(-__sel16ll(a, 0)),
                        __sat16(-__sel16ll(a, 1)))),
    __raw_compact_to_i2x16(
      __raw_compose_64 (__sat16(-__sel16ll(a, 2)),
                        __sat16(-__sel16ll(a, 3)))));
}

inline static __int4x16 __raw_abs_4x16 (__int4x16 a) {
  __int32 a0, a1, a2, a3;
  a0 = __sel16ll (a, 0);
  a1 = __sel16ll (a, 1);
  a2 = __sel16ll (a, 2);
  a3 = __sel16ll (a, 3);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 ((a0 > 0) ? a0 : -a0,
                                              (a1 > 0) ? a1 : -a1)),
    __raw_compact_to_i2x16 (__raw_compose_64 ((a2 > 0) ? a2 : -a2,
                                              (a3 > 0) ? a3 : -a3)));
}

inline static __int4x16 __raw_clip_4x16 (__int4x16 a, __int4x16 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel16ll (a, 0);
  a1 = __sel16ll (a, 1);
  a2 = __sel16ll (a, 2);
  a3 = __sel16ll (a, 3);
  b0 = __sel16ll (b, 0);
  b1 = __sel16ll (b, 1);
  b2 = __sel16ll (b, 2);
  b3 = __sel16ll (b, 3);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (__raw_clip (a0, b0),
                                              __raw_clip (a1, b1))),
    __raw_compact_to_i2x16 (__raw_compose_64 (__raw_clip (a2, b2),
                                              __raw_clip (a3, b3))));
}

inline static __int4x16 __raw_max_4x16 (__int4x16 a, __int4x16 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel16ll (a, 0);
  a1 = __sel16ll (a, 1);
  a2 = __sel16ll (a, 2);
  a3 = __sel16ll (a, 3);
  b0 = __sel16ll (b, 0);
  b1 = __sel16ll (b, 1);
  b2 = __sel16ll (b, 2);
  b3 = __sel16ll (b, 3);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (__raw_max (a0, b0),
                                              __raw_max (a1, b1))),
    __raw_compact_to_i2x16 (__raw_compose_64 (__raw_max (a2, b2),
                                              __raw_max (a3, b3))));
}

inline static __int4x16 __raw_max_u4x16 (__int4x16 a, __int4x16 b) {
  __uint32 a0, a1, a2, a3;
  __uint32 b0, b1, b2, b3;
  a0 = __sel16ll (a, 0);
  a1 = __sel16ll (a, 1);
  a2 = __sel16ll (a, 2);
  a3 = __sel16ll (a, 3);
  b0 = __sel16ll (b, 0);
  b1 = __sel16ll (b, 1);
  b2 = __sel16ll (b, 2);
  b3 = __sel16ll (b, 3);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (__umax (a0, b0),
                                              __umax (a1, b1))),
    __raw_compact_to_i2x16 (__raw_compose_64 (__umax (a2, b2),
                                              __umax (a3, b3))));
}

inline static __int4x16 __raw_min_4x16 (__int4x16 a, __int4x16 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel16ll (a, 0);
  a1 = __sel16ll (a, 1);
  a2 = __sel16ll (a, 2);
  a3 = __sel16ll (a, 3);
  b0 = __sel16ll (b, 0);
  b1 = __sel16ll (b, 1);
  b2 = __sel16ll (b, 2);
  b3 = __sel16ll (b, 3);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (__raw_min (a0, b0),
                                              __raw_min (a1, b1))),
    __raw_compact_to_i2x16 (__raw_compose_64 (__raw_min (a2, b2),
                                              __raw_min (a3, b3))));
}

inline static __int4x16 __raw_min_u4x16 (__int4x16 a, __int4x16 b) {
  __uint32 a0, a1, a2, a3;
  __uint32 b0, b1, b2, b3;
  a0 = __sel16ll (a, 0);
  a1 = __sel16ll (a, 1);
  a2 = __sel16ll (a, 2);
  a3 = __sel16ll (a, 3);
  b0 = __sel16ll (b, 0);
  b1 = __sel16ll (b, 1);
  b2 = __sel16ll (b, 2);
  b3 = __sel16ll (b, 3);
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (__umin (a0, b0),
                                              __umin (a1, b1))),
    __raw_compact_to_i2x16 (__raw_compose_64 (__umin (a2, b2),
                                              __umin (a3, b3))));
}

inline static __int32 __raw_sum_4x16 (__int4x16 a) {
  return __sel16ll (a, 0) +
         __sel16ll (a, 1) +
         __sel16ll (a, 2) +
         __sel16ll (a, 3);
}

inline static __int4x16 __raw_mult_i4x16 (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_mult_i2x16 (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_mult_i2x16 (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_mult_u4x16 (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_mult_i2x16 (__usel32ll (a, 0), __usel32ll (b, 0)),
    __raw_mult_i2x16 (__usel32ll (a, 1), __usel32ll (b, 1)));
}

inline static __int4x16 __raw_mult_i4x16_sat (__int4x16 a, __int4x16 b) {
  return __raw_compose_64 (
    __raw_mult_i2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_mult_i2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __raw_quad __raw_mult_i4x16_wide (__int4x16 a, __int4x16 b) {
  return __raw_compose_128 (
    __raw_mult_i2x16_wide (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_mult_i2x16_wide (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __raw_quad __raw_mult_u4x16_wide (__int4x16 a, __int4x16 b) {
  return __raw_compose_128 (
    __raw_mult_u2x16_wide (__usel32ll (a, 0), __usel32ll (b, 0)),
    __raw_mult_u2x16_wide (__usel32ll (a, 1), __usel32ll (b, 1)));
}

/* fr4x16 operations */

inline static __int4x16 __raw_mult_fr4x16 (__int4x16 a,__int4x16 b) {
  return __raw_compose_64 (
    __raw_mult_fr2x16 (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_mult_fr2x16 (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_mult_fr4x16_sat (__int4x16 a,__int4x16 b) {
  return __raw_compose_64 (
    __raw_mult_fr2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_mult_fr2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_multr_fr4x16 (__int4x16 a,__int4x16 b) {
  return __raw_compose_64 (
    __raw_multr_fr2x16 (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_multr_fr2x16 (__sel32ll (a, 1), __sel32ll (b, 1)));
}

inline static __int4x16 __raw_multr_fr4x16_sat (__int4x16 a,__int4x16 b) {
  return __raw_compose_64 (
    __raw_multr_fr2x16_sat (__sel32ll (a, 0), __sel32ll (b, 0)),
    __raw_multr_fr2x16_sat (__sel32ll (a, 1), __sel32ll (b, 1)));
}

/* 4x8 operations */

inline static __int4x8 __raw_neg_4x8_sat (__int4x8 a) {
  return __compact_to_i4x8_from_i32 (
    __sat8 (-(__sel8i (a, 0))),
    __sat8 (-(__sel8i (a, 1))),
    __sat8 (-(__sel8i (a, 2))),
    __sat8 (-(__sel8i (a, 3))));
}

inline static __int4x8 __raw_add_4x8 (__int4x8 a, __int4x8 b) {
  return __compact_to_i4x8_from_i32 (
    __sel8i (a, 0) + __sel8i (b, 0),
    __sel8i (a, 1) + __sel8i (b, 1),
    __sel8i (a, 2) + __sel8i (b, 2),
    __sel8i (a, 3) + __sel8i (b, 3));
}

inline static __int4x8 __raw_add_4x8_sat (__int4x8 a, __int4x8 b) {
  return __compact_to_i4x8_from_i32 (
    __sat8 (__sel8i (a, 0) + __sel8i (b, 0)),
    __sat8 (__sel8i (a, 1) + __sel8i (b, 1)),
    __sat8 (__sel8i (a, 2) + __sel8i (b, 2)),
    __sat8 (__sel8i (a, 3) + __sel8i (b, 3)));
}

inline static __int4x8 __raw_add_u4x8_sat (__int4x8 a, __int4x8 b) {
  return __compact_to_i4x8_from_i32 (
    __usat8 (__usel8i (a, 0) + __usel8i (b, 0)),
    __usat8 (__usel8i (a, 1) + __usel8i (b, 1)),
    __usat8 (__usel8i (a, 2) + __usel8i (b, 2)),
    __usat8 (__usel8i (a, 3) + __usel8i (b, 3)));
}

inline static __int4x8 __raw_sub_4x8 (__int4x8 a, __int4x8 b) {
  return __compact_to_i4x8_from_i32 (
    __sel8i (a, 0) - __sel8i (b, 0),
    __sel8i (a, 1) - __sel8i (b, 1),
    __sel8i (a, 2) - __sel8i (b, 2),
    __sel8i (a, 3) - __sel8i (b, 3));
}

inline static __int4x8 __raw_sub_4x8_sat (__int4x8 a, __int4x8 b) {
  return __compact_to_i4x8_from_i32 (
    __sat8 (__sel8i (a, 0) - __sel8i (b, 0)),
    __sat8 (__sel8i (a, 1) - __sel8i (b, 1)),
    __sat8 (__sel8i (a, 2) - __sel8i (b, 2)),
    __sat8 (__sel8i (a, 3) - __sel8i (b, 3)));
}

inline static __int4x8 __raw_sub_u4x8_sat (__int4x8 a, __int4x8 b) {
  return __compact_to_i4x8_from_i32 (
    __usat8 (__usel8i (a, 0) - __usel8i (b, 0)),
    __usat8 (__usel8i (a, 1) - __usel8i (b, 1)),
    __usat8 (__usel8i (a, 2) - __usel8i (b, 2)),
    __usat8 (__usel8i (a, 3) - __usel8i (b, 3)));
}

inline static __int4x8 __raw_abs_4x8 (__int4x8 a) {
  __int32 a0, a1, a2, a3;
  a0 = __sel8i (a, 0);
  a1 = __sel8i (a, 1);
  a2 = __sel8i (a, 2);
  a3 = __sel8i (a, 3);
  return __compact_to_i4x8_from_i32 ((a0 > 0) ? a0 : -a0,
                                     (a1 > 0) ? a1 : -a1,
                                     (a2 > 0) ? a2 : -a2,
                                     (a3 > 0) ? a3 : -a3);
}

inline static __int4x8 __raw_clip_4x8 (__int4x8 a, __int4x8 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel8i (a, 0);
  a1 = __sel8i (a, 1);
  a2 = __sel8i (a, 2);
  a3 = __sel8i (a, 3);
  b0 = __sel8i (b, 0);
  b1 = __sel8i (b, 1);
  b2 = __sel8i (b, 2);
  b3 = __sel8i (b, 3);
  return __compact_to_i4x8_from_i32 (__raw_clip (a0, b0), __raw_clip (a1, b1),
                                     __raw_clip (a2, b2), __raw_clip (a3, b3));
}

inline static __int8x8 __raw_merge_4x8 (__int4x8 a, __int4x8 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel8i (a, 0);
  a1 = __sel8i (a, 1);
  a2 = __sel8i (a, 2);
  a3 = __sel8i (a, 3);
  b0 = __sel8i (b, 0);
  b1 = __sel8i (b, 1);
  b2 = __sel8i (b, 2);
  b3 = __sel8i (b, 3);
  return __compact_to_i8x8_from_i32 (a0, b0, a1, b1, a2, b2, a3, b3);
}

inline static __int4x8 __raw_max_4x8 (__int4x8 a, __int4x8 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel8i (a, 0);
  a1 = __sel8i (a, 1);
  a2 = __sel8i (a, 2);
  a3 = __sel8i (a, 3);
  b0 = __sel8i (b, 0);
  b1 = __sel8i (b, 1);
  b2 = __sel8i (b, 2);
  b3 = __sel8i (b, 3);
  return __compact_to_i4x8_from_i32 (__raw_max (a0, b0), __raw_max (a1, b1),
                                     __raw_max (a2, b2), __raw_max (a3, b3));
}

inline static __int4x8 __raw_min_4x8 (__int4x8 a, __int4x8 b) {
  __int32 a0, a1, a2, a3;
  __int32 b0, b1, b2, b3;
  a0 = __sel8i (a, 0);
  a1 = __sel8i (a, 1);
  a2 = __sel8i (a, 2);
  a3 = __sel8i (a, 3);
  b0 = __sel8i (b, 0);
  b1 = __sel8i (b, 1);
  b2 = __sel8i (b, 2);
  b3 = __sel8i (b, 3);
  return __compact_to_i4x8_from_i32 (__raw_min (a0, b0), __raw_min (a1, b1),
                                     __raw_min (a2, b2), __raw_min (a3, b3));
}

inline static __int32 __raw_sum_4x8 (__int4x8 a) {
  return __sel8i (a, 0) +
         __sel8i (a, 1) +
         __sel8i (a, 2) +
         __sel8i (a, 3);
}

/* 8x8 operations */

inline static __int8x8 __raw_neg_8x8_sat (__int8x8 a) {
  return __compact_to_i8x8_from_i32 (
    __sat8 (-(__sel8ll (a, 0))),
    __sat8 (-(__sel8ll (a, 1))),
    __sat8 (-(__sel8ll (a, 2))),
    __sat8 (-(__sel8ll (a, 3))),
    __sat8 (-(__sel8ll (a, 4))),
    __sat8 (-(__sel8ll (a, 5))),
    __sat8 (-(__sel8ll (a, 6))),
    __sat8 (-(__sel8ll (a, 7))));
}

inline static __int32 __raw_sum_8x8 (__int8x8 a) {
  return __sel8i (a, 0) +
         __sel8i (a, 1) +
         __sel8i (a, 2) +
         __sel8i (a, 3) +
         __sel8i (a, 4) +
         __sel8i (a, 5) +
         __sel8i (a, 6) +
         __sel8i (a, 7);
}

inline static __int8x8 __raw_add_8x8 (__int8x8 a, __int8x8 b) {
  return __compact_to_i8x8_from_i32 (
    __sel8ll (a, 0) + __sel8ll (b, 0),
    __sel8ll (a, 1) + __sel8ll (b, 1),
    __sel8ll (a, 2) + __sel8ll (b, 2),
    __sel8ll (a, 3) + __sel8ll (b, 3),
    __sel8ll (a, 4) + __sel8ll (b, 4),
    __sel8ll (a, 5) + __sel8ll (b, 5),
    __sel8ll (a, 6) + __sel8ll (b, 6),
    __sel8ll (a, 7) + __sel8ll (b, 7));
}

inline static __int8x8 __raw_add_8x8_sat (__int8x8 a, __int8x8 b) {
  return __compact_to_i8x8_from_i32 (
    __sat8 (__sel8ll (a, 0) + __sel8ll (b, 0)),
    __sat8 (__sel8ll (a, 1) + __sel8ll (b, 1)),
    __sat8 (__sel8ll (a, 2) + __sel8ll (b, 2)),
    __sat8 (__sel8ll (a, 3) + __sel8ll (b, 3)),
    __sat8 (__sel8ll (a, 4) + __sel8ll (b, 4)),
    __sat8 (__sel8ll (a, 5) + __sel8ll (b, 5)),
    __sat8 (__sel8ll (a, 6) + __sel8ll (b, 6)),
    __sat8 (__sel8ll (a, 7) + __sel8ll (b, 7)));
}

inline static __int8x8 __raw_add_u8x8_sat (__int8x8 a, __int8x8 b) {
  return __compact_to_i8x8_from_i32 (
    __usat8 (__usel8ll (a, 0) + __usel8ll (b, 0)),
    __usat8 (__usel8ll (a, 1) + __usel8ll (b, 1)),
    __usat8 (__usel8ll (a, 2) + __usel8ll (b, 2)),
    __usat8 (__usel8ll (a, 3) + __usel8ll (b, 3)),
    __usat8 (__usel8ll (a, 4) + __usel8ll (b, 4)),
    __usat8 (__usel8ll (a, 5) + __usel8ll (b, 5)),
    __usat8 (__usel8ll (a, 6) + __usel8ll (b, 6)),
    __usat8 (__usel8ll (a, 7) + __usel8ll (b, 7)));
}

inline static __int8x8 __raw_sub_8x8 (__int8x8 a, __int8x8 b) {
  return __compact_to_i8x8_from_i32 (
    __sel8ll (a, 0) - __sel8ll (b, 0),
    __sel8ll (a, 1) - __sel8ll (b, 1),
    __sel8ll (a, 2) - __sel8ll (b, 2),
    __sel8ll (a, 3) - __sel8ll (b, 3),
    __sel8ll (a, 4) - __sel8ll (b, 4),
    __sel8ll (a, 5) - __sel8ll (b, 5),
    __sel8ll (a, 6) - __sel8ll (b, 6),
    __sel8ll (a, 7) - __sel8ll (b, 7));
}

inline static __int8x8 __raw_sub_8x8_sat (__int8x8 a, __int8x8 b) {
  return __compact_to_i8x8_from_i32 (
    __sat8 (__sel8ll (a, 0) - __sel8ll (b, 0)),
    __sat8 (__sel8ll (a, 1) - __sel8ll (b, 1)),
    __sat8 (__sel8ll (a, 2) - __sel8ll (b, 2)),
    __sat8 (__sel8ll (a, 3) - __sel8ll (b, 3)),
    __sat8 (__sel8ll (a, 4) - __sel8ll (b, 4)),
    __sat8 (__sel8ll (a, 5) - __sel8ll (b, 5)),
    __sat8 (__sel8ll (a, 6) - __sel8ll (b, 6)),
    __sat8 (__sel8ll (a, 7) - __sel8ll (b, 7)));
}

inline static __int8x8 __raw_sub_u8x8_sat (__int8x8 a, __int8x8 b) {
  return __compact_to_i8x8_from_i32 (
    __usat8 (__usel8ll (a, 0) - __usel8ll (b, 0)),
    __usat8 (__usel8ll (a, 1) - __usel8ll (b, 1)),
    __usat8 (__usel8ll (a, 2) - __usel8ll (b, 2)),
    __usat8 (__usel8ll (a, 3) - __usel8ll (b, 3)),
    __usat8 (__usel8ll (a, 4) - __usel8ll (b, 4)),
    __usat8 (__usel8ll (a, 5) - __usel8ll (b, 5)),
    __usat8 (__usel8ll (a, 6) - __usel8ll (b, 6)),
    __usat8 (__usel8ll (a, 7) - __usel8ll (b, 7)));
}

inline static __int8x8 __raw_abs_8x8 (__int8x8 a) {
  __int32 a0, a1, a2, a3, a4, a5, a6, a7;
  a0 = __sel8ll (a, 0);
  a1 = __sel8ll (a, 1);
  a2 = __sel8ll (a, 2);
  a3 = __sel8ll (a, 3);
  a4 = __sel8ll (a, 4);
  a5 = __sel8ll (a, 5);
  a6 = __sel8ll (a, 6);
  a7 = __sel8ll (a, 7);
  return __compact_to_i8x8_from_i32 ((a0 > 0) ? a0 : -a0,
                                     (a1 > 0) ? a1 : -a1,
                                     (a2 > 0) ? a2 : -a2,
                                     (a3 > 0) ? a3 : -a3,
                                     (a4 > 0) ? a4 : -a4,
                                     (a5 > 0) ? a5 : -a5,
                                     (a6 > 0) ? a6 : -a6,
                                     (a7 > 0) ? a7 : -a7);
}

inline static __int8x8 __raw_clip_8x8 (__int8x8 a, __int8x8 b) {
  __int32 a0, a1, a2, a3, a4, a5, a6, a7;
  __int32 b0, b1, b2, b3, b4, b5, b6, b7;
  a0 = __sel8ll (a, 0);
  a1 = __sel8ll (a, 1);
  a2 = __sel8ll (a, 2);
  a3 = __sel8ll (a, 3);
  a4 = __sel8ll (a, 4);
  a5 = __sel8ll (a, 5);
  a6 = __sel8ll (a, 6);
  a7 = __sel8ll (a, 7);
  b0 = __sel8ll (b, 0);
  b1 = __sel8ll (b, 1);
  b2 = __sel8ll (b, 2);
  b3 = __sel8ll (b, 3);
  b4 = __sel8ll (b, 4);
  b5 = __sel8ll (b, 5);
  b6 = __sel8ll (b, 6);
  b7 = __sel8ll (b, 7);
  return __compact_to_i8x8_from_i32 (__raw_clip (a0, b0), __raw_clip (a1, b1),
                                     __raw_clip (a2, b2), __raw_clip (a3, b3),
                                     __raw_clip (a4, b4), __raw_clip (a5, b5),
                                     __raw_clip (a6, b6), __raw_clip (a7, b7));
}

inline static __int8x8 __raw_max_8x8 (__int8x8 a, __int8x8 b) {
  __int32 a0, a1, a2, a3, a4, a5, a6, a7;
  __int32 b0, b1, b2, b3, b4, b5, b6, b7;
  a0 = __sel8ll (a, 0);
  a1 = __sel8ll (a, 1);
  a2 = __sel8ll (a, 2);
  a3 = __sel8ll (a, 3);
  a4 = __sel8ll (a, 4);
  a5 = __sel8ll (a, 5);
  a6 = __sel8ll (a, 6);
  a7 = __sel8ll (a, 7);
  b0 = __sel8ll (b, 0);
  b1 = __sel8ll (b, 1);
  b2 = __sel8ll (b, 2);
  b3 = __sel8ll (b, 3);
  b4 = __sel8ll (b, 4);
  b5 = __sel8ll (b, 5);
  b6 = __sel8ll (b, 6);
  b7 = __sel8ll (b, 7);
  return __compact_to_i8x8_from_i32 (__raw_max (a0, b0), __raw_max (a1, b1),
                                     __raw_max (a2, b2), __raw_max (a3, b3),
                                     __raw_max (a4, b4), __raw_max (a5, b5),
                                     __raw_max (a6, b6), __raw_max (a7, b7));
}

inline static __int8x8 __raw_min_8x8 (__int8x8 a, __int8x8 b) {
  __int32 a0, a1, a2, a3, a4, a5, a6, a7;
  __int32 b0, b1, b2, b3, b4, b5, b6, b7;
  a0 = __sel8ll (a, 0);
  a1 = __sel8ll (a, 1);
  a2 = __sel8ll (a, 2);
  a3 = __sel8ll (a, 3);
  a4 = __sel8ll (a, 4);
  a5 = __sel8ll (a, 5);
  a6 = __sel8ll (a, 6);
  a7 = __sel8ll (a, 7);
  b0 = __sel8ll (b, 0);
  b1 = __sel8ll (b, 1);
  b2 = __sel8ll (b, 2);
  b3 = __sel8ll (b, 3);
  b4 = __sel8ll (b, 4);
  b5 = __sel8ll (b, 5);
  b6 = __sel8ll (b, 6);
  b7 = __sel8ll (b, 7);
  return __compact_to_i8x8_from_i32 (__raw_min (a0, b0), __raw_min (a1, b1),
                                     __raw_min (a2, b2), __raw_min (a3, b3),
                                     __raw_min (a4, b4), __raw_min (a5, b5),
                                     __raw_min (a6, b6), __raw_min (a7, b7));
}

/* Vector shifts */

inline static __int4x8 __raw_ashift_4x8 (__int4x8 a, __int32 s) {
  __int32 r[4];
  __int32 i;
  if (s >= 0) {
    if (s > 7) {
      return (__int4x8) 0;
    } else {
      for (i=0; i<4; i++) {
        r[i] = __sel8i (a, i) << s;
      }
    }
  } else {
    if (s < -7) {
      return (__int4x8) -1;
    } else {
      for (i=0; i<4; i++) {
        r[i] = __ashiftr (__sel8i (a, i), -s);
      }
    }
  }
  return __compact_to_i4x8_from_i32 (r[0], r[1], r[2], r[3]);
}

inline static __int8x8 __raw_ashift_8x8 (__int8x8 a, __int32 s) {
  __int32 r[8];
  __int32 i;
  if (s >= 0) {
    if (s > 7) {
      return (__int8x8) 0;
    } else {
      for (i=0; i<8; i++) {
        r[i] = __sel8ll (a, i) << s;
      }
    }
  } else {
    if (s < -7) {
      return (__int8x8) -1;
    } else {
      for (i=0; i<8; i++) {
        r[i] = __ashiftr (__sel8ll (a, i), -s);
      }
    }
  }
  return __compact_to_i8x8_from_i32 (r[0], r[1], r[2], r[3],
                                     r[4], r[5], r[6], r[7]);
}

inline static __int2x16 __raw_ashift_2x16 (__int2x16 a, __int32 s) {
  if (s >= 0) {
    if (s > 15) {
      return (__int2x16) 0;
    } else {
      return __raw_compact_to_i2x16 (
        __raw_compose_64 (__sel16i (a, 0) << s,
                          __sel16i (a, 1) << s));
    }
  } else {
    if (s < -15) {
      return (__int2x16) -1;
    } else {
      return __raw_compact_to_i2x16 (
        __raw_compose_64 (__ashiftr (__sel16i (a, 0), -s),
                          __ashiftr (__sel16i (a, 1), -s)));
    }
  }
}

inline static __int4x16 __raw_ashift_4x16 (__int4x16 a, __int32 s) {
  __int32 r[4];
  __int32 i;
  if (s >= 0) {
    if (s > 15) {
      return (__int4x16) 0;
    } else {
      for (i=0; i<4; i++) {
        r[i] = __sel16ll (a, i) << s;
      }
    }
  } else {
    if (s < -15) {
      return (__int4x16) -1;
    } else {
      for (i=0; i<4; i++) {
        r[i] = __ashiftr (__sel16ll (a, i), -s);
      }
    }
  }
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (r[0], r[1])),
    __raw_compact_to_i2x16 (__raw_compose_64 (r[2], r[3])));
}

inline static __int2x32 __raw_ashift_2x32 (__int2x32 a, __int32 s) {
  if (s >= 0) {
    if (s > 31) {
      return (__int2x32) 0;
    } else {
      return __raw_compose_64 (__sel32ll (a, 0) << s,
                               __sel32ll (a, 1) << s);
    }
  } else {
    if (s < -31) {
      return (__int2x32) -1;
    } else {
      return __raw_compose_64 (__ashiftr (__sel32ll (a, 0), -s),
                               __ashiftr (__sel32ll (a, 1), -s));
    }
  }
}

inline static __int4x8 __raw_lshift_4x8 (__int4x8 a, __int32 s) {
  __int32 r[4];
  __int32 i;
  if ((s < -7) || (s > 7)) {
    return (__int4x8) 0;
  } else {
    if (s >= 0) {
      for (i=0; i<4; i++) {
        r[i] = __sel8i (a, i) << s;
      }
    } else {
      for (i=0; i<4; i++) {
        r[i] = __usel8i (a, i) >> (-s);
      }
    }
  }
  return __compact_to_i4x8_from_i32 (r[0], r[1], r[2], r[3]);
}

inline static __int8x8 __raw_lshift_8x8 (__int8x8 a, __int32 s) {
  __int32 r[8];
  __int32 i;
  if ((s < -7) || (s > 7)) {
    return (__int8x8) 0;
  } else {
    if (s >= 0) {
      for (i=0; i<8; i++) {
        r[i] = __sel8ll (a, i) << s;
      }
    } else {
      for (i=0; i<8; i++) {
        r[i] = __usel8ll (a, i) >> (-s);
      }
    }
  }
  return __compact_to_i8x8_from_i32 (r[0], r[1], r[2], r[3],
                                     r[4], r[5], r[6], r[7]);
}

inline static __int2x16 __raw_lshift_2x16 (__int2x16 a, __int32 s) {
  if ((s < -15) || (s > 15)) {
    return (__int2x16) 0;
  } else {
    if (s >= 0) {
      return __raw_compact_to_i2x16 (
        __raw_compose_64 (__sel16i (a, 0) << s,
                          __sel16i (a, 1) << s));
    } else {
      return __raw_compact_to_i2x16 (
        __raw_compose_64 (__usel16i (a, 0) >> (-s),
                          __usel16i (a, 1) >> (-s)));
    }
  }
}

inline static __int4x16 __raw_lshift_4x16 (__int4x16 a, __int32 s) {
  __int32 r[4];
  __int32 i;
  if ((s < -15) || (s > 15)) {
    return (__int4x16) 0;
  } else {
    if (s >= 0) {
      for (i=0; i<4; i++) {
        r[i] = __sel16ll (a, i) << s;
      }
    } else {
      for (i=0; i<4; i++) {
        r[i] = __usel16ll (a, i) >> (-s);
      }
    }
  }
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (__raw_compose_64 (r[0], r[1])),
    __raw_compact_to_i2x16 (__raw_compose_64 (r[2], r[3])));
}

inline static __int2x32 __raw_lshift_2x32 (__int2x32 a, __int32 s) {
  if ((s < -31) || (s > 31)) {
    return (__int2x32) 0;
  } else {
    if (s >= 0) {
      return __raw_compose_64 (__sel32ll (a, 0) << s,
                               __sel32ll (a, 1) << s);
    } else {
      return __raw_compose_64 (__usel32ll (a, 0) >> (-s),
                               __usel32ll (a, 1) >> (-s));
    }
  }
}

/* Rotates */

inline static __int32 __raw_rotate_1x32 (__int32 a, __int32 s) {
  __int32 shifted, felloff;

  if (s > 0) {
    s = s % 32;
    shifted = a << s;
    felloff = (__uint32) a >> (32 - s);
    return (shifted | felloff);
  } else if (s < 0) {
    s = -((-s) % 32);
    shifted = (__uint32) a >> (-s);
    felloff = a << (32 + s);
    return (shifted | felloff);
  } else {
    return a;
  }
}

inline static __int2x32 __raw_rotate_2x32 (__int2x32 a, __int32 s) {
  __int32 shifted, felloff;
  __int32 hi, lo;

  lo = __sel32ll (a, 0);
  hi = __sel32ll (a, 1);
  if (s > 0) {
    s = s % 32;

    shifted = lo << s;
    felloff = (__uint32) lo >> (32 - s);
    lo = (shifted | felloff);

    shifted = hi << s;
    felloff = (__uint32) hi >> (32 - s);
    hi = (shifted | felloff);
  } else if (s < 0) {
    s = -((-s) % 32);

    shifted = (__uint32) lo >> (-s);
    felloff = lo << (32 + s);
    lo = (shifted | felloff);

    shifted = (__uint32) hi >> (-s);
    felloff = hi << (32 + s);
    hi = (shifted | felloff);
  }
  return __raw_compose_64 (lo, hi);
}

inline static __int64 __raw_rotate_1x64 (__int64 a, __int32 s) {
  __int64 shifted, felloff;

  if (s > 0) {
    s = s % 64;
    if (s == 0) return a;
    shifted = a << s;
    felloff = (__uint64) a >> (64 - s);
    return (shifted | felloff);
  } else if (s < 0) {
    s = -((-s) % 64);
    if (s == 0) return a;
    shifted = (__uint64) a >> (-s);
    felloff = a << (64 + s);
    return (shifted | felloff);
  } else {
    return a;
  }
}

/* Field extract/deposit */

inline static __int32 __raw_fext_ze (__int32 v, __int32 c) {
  __uint32 pos, len, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  res = ((__uint32) v >> pos) & ((__uint32) 0xffffffff >> (32 - len));
  return res;
}

inline static __int32 __raw_fext_se (__int32 v, __int32 c) {
  __uint32 pos, len, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  res = (v >> pos) & ((__uint32) 0xffffffff >> (32 - len));
  if (res & ((__uint32) 1 << (len-1))) {
    res |= (0xffffffff << len);
  }
  return res;
}

inline static __int64 __raw_fext2_ze (__int64 v, __int32 c) {
  __uint32 pos, len;
  __uint64 res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  res = ((__uint64) v >> pos) & (__ULLCONST (0xffffffffffffffff) >> (64 - len));
  return res;
}

inline static __int64 __raw_fext2_se (__int64 v, __int32 c) {
  __uint32 pos, len;
  __uint64 res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  res = (v >> pos) & (__ULLCONST (0xffffffffffffffff) >> (64 - len));
  if (res & ((__uint64) 1 << (len-1))) {
    res |= (__ULLCONST (0xffffffffffffffff) << len);
  }
  return res;
}

inline static __int32 __raw_fext_long_control_ze (__int32 v, __int64 c) {
  __uint32 pos, len, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  res = ((__uint32) v >> pos) & ((__uint32) 0xffffffff >> (32 - len));
  return res;
}

inline static __int32 __raw_fext_long_control_se (__int32 v, __int64 c) {
  __uint32 pos, len, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  res = (v >> pos) & ((__uint32) 0xffffffff >> (32 - len));
  if (res & ((__uint32) 1 << (len-1))) {
    res |= (0xffffffff << len);
  }
  return res;
}

inline static __int64 __raw_fext2_long_control_ze (__int64 v, __int64 c) {
  __uint32 pos, len;
  __uint64 res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  res = ((__uint64) v >> pos) & (__ULLCONST (0xffffffffffffffff) >> (64 - len));
  return res;
}

inline static __int64 __raw_fext2_long_control_se (__int64 v, __int64 c) {
  __uint32 pos, len;
  __uint64 res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  res = (v >> pos) & (__ULLCONST (0xffffffffffffffff) >> (64 - len));
  if (res & ((__uint64) 1 << (len-1))) {
    res |= (__ULLCONST (0xffffffffffffffff) << len);
  }
  return res;
}

inline static __int32 __raw_fdep (__int32 a, __int32 v, __int32 c) {
  __uint32 pos, len, mask, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  mask = (__uint32) 0xffffffff << (32 - len - pos);
  mask = mask >> (32 - len);
  mask = mask << pos;
  res = (a & (~mask)) | ((v << pos) & mask);
  return res;
}

inline static __int32 __raw_fdep_se (__int32 a, __int32 v, __int32 c) {
  __uint32 pos, len, mask, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  if (len == 0) return a;
  mask = (__uint32) 0xffffffff << (32 - len - pos);
  mask = mask >> (32 - len);
  mask = mask << pos;
  res = (a & (~mask));
  if (v & ((__uint32) 1 << (len-1))) {
    res |= (0xffffffff << (pos + len));
  } else {
    res &= (~((__uint32) 0xffffffff << (pos + len)));
  }
  res |= ((v << pos) & mask);
  return res;
}

inline static __int32 __raw_fdep_zf (__int32 a, __int32 v, __int32 c) {
  __uint32 pos, len, mask, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  mask = (__uint32) 0xffffffff << (32 - len - pos);
  mask = mask >> (32 - len);
  mask = mask << pos;
  res = (a & (~mask));
  res &= (~((__uint32) 0xffffffff << (pos + len)));
  res |= ((v << pos) & mask);
  return res;
}

inline static __int64 __raw_fdep2 (__int64 a, __int64 v, __int32 c) {
  __uint32 pos, len;
  __uint64 mask, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  mask = __ULLCONST (0xffffffffffffffff) << (64 - len - pos);
  mask = mask >> (64 - len);
  mask = mask << pos;
  res = (a & (~mask)) | ((v << pos) & mask);
  return res;
}

inline static __int64 __raw_fdep2_se (__int64 a, __int64 v, __int32 c) {
  __uint32 pos, len;
  __uint64 mask, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  if (len == 0) return a;
  mask = __ULLCONST (0xffffffffffffffff) << (64 - len - pos);
  mask = mask >> (64 - len);
  mask = mask << pos;
  res = (a & (~mask));
  if (v & ((__uint64) 1 << (len-1))) {
    res |= (__ULLCONST (0xffffffffffffffff) << (pos + len));
  } else {
    res &= (~(__ULLCONST (0xffffffffffffffff) << (pos + len)));
  }
  res |= ((v << pos) & mask);
  return res;
}

inline static __int64 __raw_fdep2_zf (__int64 a, __int64 v, __int32 c) {
  __uint32 pos, len;
  __uint64 mask, res;
  len = __sel8i (c, 0) & 0x7f;
  pos = __sel8i (c, 1);
  mask = __ULLCONST (0xffffffffffffffff) << (64 - len - pos);
  mask = mask >> (64 - len);
  mask = mask << pos;
  res = (a & (~mask));
  res &= (~(__ULLCONST (0xffffffffffffffff) << (pos + len)));
  res |= ((v << pos) & mask);
  return res;
}

inline static __int32 __raw_fdep_long_control (__int32 a,
                                               __int32 v,
                                               __int64 c) {
  __uint32 pos, len, mask, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  mask = (__uint32) 0xffffffff << (32 - len - pos);
  mask = mask >> (32 - len);
  mask = mask << pos;
  res = (a & (~mask)) | ((v << pos) & mask);
  return res;
}

inline static __int32 __raw_fdep_long_control_se (__int32 a,
                                                  __int32 v,
                                                  __int64 c) {
  __uint32 pos, len, mask, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  if (len == 0) return a;
  mask = (__uint32) 0xffffffff << (32 - len - pos);
  mask = mask >> (32 - len);
  mask = mask << pos;
  res = (a & (~mask));
  if (v & ((__uint32) 1 << (len-1))) {
    res |= (0xffffffff << (pos + len));
  } else {
    res &= (~((__uint32) 0xffffffff << (pos + len)));
  }
  res |= ((v << pos) & mask);
  return res;
}

inline static __int32 __raw_fdep_long_control_zf (__int32 a,
                                                  __int32 v,
                                                  __int64 c) {
  __uint32 pos, len, mask, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  mask = (__uint32) 0xffffffff << (32 - len - pos);
  mask = mask >> (32 - len);
  mask = mask << pos;
  res = (a & (~mask));
  res &= (~((__uint32) 0xffffffff << (pos + len)));
  res |= ((v << pos) & mask);
  return res;
}

inline static __int64 __raw_fdep2_long_control (__int64 a,
                                                __int64 v,
                                                __int64 c) {
  __uint32 pos, len;
  __uint64 mask, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  mask = __ULLCONST (0xffffffffffffffff) << (64 - len - pos);
  mask = mask >> (64 - len);
  mask = mask << pos;
  res = (a & (~mask)) | ((v << pos) & mask);
  return res;
}

inline static __int64 __raw_fdep2_long_control_se (__int64 a,
                                                   __int64 v,
                                                   __int64 c) {
  __uint32 pos, len;
  __uint64 mask, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  if (len == 0) return a;
  mask = __ULLCONST (0xffffffffffffffff) << (64 - len - pos);
  mask = mask >> (64 - len);
  mask = mask << pos;
  res = (a & (~mask));
  if (v & ((__uint64) 1 << (len-1))) {
    res |= (__ULLCONST (0xffffffffffffffff) << (pos + len));
  } else {
    res &= (~(__ULLCONST (0xffffffffffffffff) << (pos + len)));
  }
  res |= ((v << pos) & mask);
  return res;
}

inline static __int64 __raw_fdep2_long_control_zf (__int64 a,
                                                   __int64 v,
                                                   __int64 c) {
  __uint32 pos, len;
  __uint64 mask, res;
  len = __raw_low_32 (c);
  pos = __raw_high_32 (c);
  mask = __ULLCONST (0xffffffffffffffff) << (64 - len - pos);
  mask = mask >> (64 - len);
  mask = mask << pos;
  res = (a & (~mask));
  res &= (~(__ULLCONST (0xffffffffffffffff) << (pos + len)));
  res |= ((v << pos) & mask);
  return res;
}

/* DAB accesses */

inline static __int2x32 __raw_dab_2x32 (int **a) {
  __int2x32 ret;
  ret = __raw_compose_64 (*(*a), *(*a+1));
  (*a) += 2;
  return ret;
}

inline static __raw_quad __raw_dab_4x32 (int **a) {
  __raw_quad ret;
  ret = __raw_compose_128 (__raw_compose_64 (*(*a),     *(*a + 1)),
                           __raw_compose_64 (*(*a + 2), *(*a + 3)));
  (*a) += 4;
  return ret;
}

#if !defined (__TS_BYTE_ADDRESS)

inline static __int2x16 __raw_dab_2x16 (int **a, int b) {
  __int2x16 ret;
  if (b & 1) {
    ret = __raw_compact_to_i2x16 (
      __raw_compose_64 (__expand_high_of_i2x16 (*(*a + (b >> 1))),
                        __expand_low_of_i2x16  (*(*a + (b >> 1) + 1))));
  } else {
    ret = *(*a + (b >> 1));
  }
  (*a) += 1;
  return ret;
}

inline static __int4x16 __raw_dab_4x16 (int **a, int b) {
  __int2x16 l, h;
  l = __raw_dab_2x16 (a, b);
  h = __raw_dab_2x16 (a, b);
  return __raw_compose_64 (l, h);
}

inline static __raw_quad __raw_dab_8x16 (int **a, int b) {
  __int4x16 l, h;
  l = __raw_dab_4x16 (a, b);
  h = __raw_dab_4x16 (a, b);
  return __raw_compose_128 (l, h);
}

#else

inline static __int2x16 __raw_dab_2x16 (short int **a) {
  __int2x16 ret;
  ret = __raw_compact_to_i2x16 (__raw_compose_64 (*(*a + 0), *(*a + 1)));
  (*a) += 2;
  return ret;
}

inline static __int4x16 __raw_dab_4x16 (short int **a) {
  __int2x16 l, h;
  l = __raw_dab_2x16 (a);
  h = __raw_dab_2x16 (a);
  return __raw_compose_64 (l, h);
}

inline static __raw_quad __raw_dab_8x16 (short int **a) {
  __int4x16 l, h;
  l = __raw_dab_4x16 (a);
  h = __raw_dab_4x16 (a);
  return __raw_compose_128 (l, h);
}

#endif

/* Circular buffer DAB accesses */

inline static __int2x32 __raw_dabcb_2x32 (int **a, int *base, int len) {
  __int32 l, h;
  l = *(*a);
  (*a) += 1;
  if ((*a) >= (base + len)) (*a) = (*a) - len;
  h = *(*a);
  (*a) += 1;
  if ((*a) >= (base + len)) (*a) = (*a) - len;
  return __raw_compose_64 (l, h);
}

inline static __raw_quad __raw_dabcb_4x32 (int **a, int *base, int len) {
  __int32 r[4], i;
  for (i=0; i<4; i++) {
    r[i] = *(*a);
    (*a) += 1;
    if ((*a) >= (base + len)) (*a) = (*a) - len;
  }
  return __raw_compose_128 (__raw_compose_64 (r[0], r[1]),
                            __raw_compose_64 (r[2], r[3]));
}

#if !defined (__TS_BYTE_ADDRESS)

inline static __int2x16 __raw_dabcb_2x16 (int **a, int b, int *base, int len) {
  __int2x16 ret;
  __int32 l, h;
  __int32 index;
  index = *a - base;
  if (b & 1) {
    l = __expand_high_of_i2x16 (*(base + (index     + (b >> 1)) % (len>>1)));
    h = __expand_low_of_i2x16  (*(base + (index + 1 + (b >> 1)) % (len>>1)));
    ret = __raw_compact_to_i2x16 (__raw_compose_64 (l, h));
  } else {
    ret = *(base + (index + (b >> 1)) % (len>>1));
  }
  (*a) += 1;
  if ((*a) >= (base + (len>>1))) (*a) = (*a) - (len>>1);
  return ret;
}

inline static __int4x16 __raw_dabcb_4x16 (int **a, int b, int *base, int len) {
  __int2x16 l, h;
  l = __raw_dabcb_2x16 (a, b, base, len);
  h = __raw_dabcb_2x16 (a, b, base, len);
  return __raw_compose_64 (l, h);
}

inline static __raw_quad __raw_dabcb_8x16 (int **a, int b, int *base, int len) {
  __int4x16 l, h;
  l = __raw_dabcb_4x16 (a, b, base, len);
  h = __raw_dabcb_4x16 (a, b, base, len);
  return __raw_compose_128 (l, h);
}

#else

inline static __int2x16 __raw_dabcb_2x16 (short int **a,
                                          short int *base,
                                          int len) {
  __int32 l, h;
  l = *(*a);
  (*a) += 1;
  if ((*a) >= (base + len)) (*a) = (*a) - len;
  h = *(*a);
  (*a) += 1;
  if ((*a) >= (base + len)) (*a) = (*a) - len;
  return __raw_compact_to_i2x16 (__raw_compose_64 (l, h));
}

inline static __int4x16 __raw_dabcb_4x16 (short int **a,
                                          short int *base,
                                          int len) {
  __int2x16 l, h;
  l = __raw_dabcb_2x16 (a, base, len);
  h = __raw_dabcb_2x16 (a, base, len);
  return __raw_compose_64 (l, h);
}

inline static __raw_quad __raw_dabcb_8x16 (short int **a,
                                           short int *base,
                                           int len) {
  __int4x16 l, h;
  l = __raw_dabcb_4x16 (a, base, len);
  h = __raw_dabcb_4x16 (a, base, len);
  return __raw_compose_128 (l, h);
}

#endif

/* Circular buffer operations */

inline static __int32 __raw_circindex (__int32 index,
                                       __int32 inc,
                                       __int32 len) {
  index += inc;
  if (index < 0) {
    index += len;
  } else if (index >= len) {
    index -= len;
  }
  return index;
}

#if defined (__NO_BYTE_ADDRESSING__)

inline static void *__raw_circptr (const void *ptr, __int32 inc,
                            const void *base, __int32 len) {
  ptr = (void *) ((__int32 *) ptr + inc);
  if (ptr < base) {
    ptr = (void *) ((__int32 *) ptr + len);
  } else if (ptr >= (void *) ((__int32 *) base + len)) {
    ptr = (void *) ((__int32 *) ptr - len);
  }
  return (void *) ptr;
}

#else

inline static void *__raw_circptr (const void *ptr, __int32 inc,
                            const void *base, __int32 len) {
  ptr = (void *) ((__int8 *) ptr + inc);
  if (ptr < base) {
    ptr = (void *) ((__int8 *) ptr + len);
  } else if (ptr >= (void *) ((__int8 *) base + len)) {
    ptr = (void *) ((__int8 *) ptr - len);
  }
  return (void *) ptr;
}

#endif

/* Internal functions for use in the communication builtins */

inline static __int32 __get_tmax_table (int a) {
  static int __tmax_table[128] = {
    0x00000016, 0x00000016, 0x00000015, 0x00000015,
    0x00000014, 0x00000014, 0x00000013, 0x00000013,
    0x00000012, 0x00000012, 0x00000011, 0x00000011,
    0x00000010, 0x00000010, 0x00000010, 0x00000010,
    0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e,
    0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d,
    0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c,
    0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b,
    0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a,
    0x00000008, 0x00000008, 0x00000008, 0x00000008,
    0x00000007, 0x00000007, 0x00000007, 0x00000007,
    0x00000007, 0x00000007, 0x00000007, 0x00000007,
    0x00000006, 0x00000006, 0x00000006, 0x00000006,
    0x00000006, 0x00000006, 0x00000006, 0x00000006,
    0x00000005, 0x00000005, 0x00000005, 0x00000005,
    0x00000005, 0x00000005, 0x00000005, 0x00000005,
    0x00000003, 0x00000003, 0x00000003, 0x00000003,
    0x00000003, 0x00000003, 0x00000003, 0x00000003,
    0x00000003, 0x00000003, 0x00000003, 0x00000003,
    0x00000003, 0x00000003, 0x00000003, 0x00000003,
    0x00000002, 0x00000002, 0x00000002, 0x00000002,
    0x00000002, 0x00000002, 0x00000002, 0x00000002,
    0x00000002, 0x00000002, 0x00000002, 0x00000002,
    0x00000002, 0x00000002, 0x00000002, 0x00000002,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001,
    0x00000001, 0x00000001, 0x00000001, 0x00000001
  };

  if (a < 0) a = a ^ 0xffffffff;
  if (a > 127) return 0;
  return __tmax_table[a];
}

inline static __int64 __long_tmax (__int64 a, __int64 b) {
  __int64 tmp = a - b;
  if (tmp < -127 || tmp > 127)
     return ((a > b) ? a : b);
  else
     return ((a > b) ? a : b) + (__int64) __get_tmax_table ((__int32) tmp);
}

/* Communication instruction builtins */

inline static __int32 __raw_tmax (__int32 a, __int32 b) {
  return ((a > b) ? a : b) + __get_tmax_table (a - b);
}

inline static __int2x16 __raw_tmax_4s (__int2x16 a, __int2x16 b) {
  __int32 alo = __sel16i (a, 0);
  __int32 blo = __sel16i (b, 0);
  __int32 ahi = __sel16i (a, 1);
  __int32 bhi = __sel16i (b, 1);
  return __raw_compact_to_i2x16 (
    __raw_compose_64 (
      ((alo > blo) ? alo : blo) + __get_tmax_table (alo - blo),
      ((ahi > bhi) ? ahi : bhi) + __get_tmax_table (ahi - bhi)));
}

inline static __int2x32 __raw_max_add (__int2x32 a, __int2x32 b, __raw_quad c) {
  __int64 alo = __sel32ll (a, 0);
  __int64 blo = __sel32ll (b, 0);
  __int64 ahi = __sel32ll (a, 1);
  __int64 bhi = __sel32ll (b, 1);
  __int64 c0 = __sel32q (c, 0);
  __int64 c1 = __sel32q (c, 1);
  __int64 c2 = __sel32q (c, 2);
  __int64 c3 = __sel32q (c, 3);
  return __raw_compose_64 (
    __raw_max (__sat32 (blo + c0), __sat32 (alo + c2)),
    __raw_max (__sat32 (bhi + c1), __sat32 (ahi + c3)));
}

inline static __int2x32 __raw_max_sub (__int2x32 a, __int2x32 b, __raw_quad c) {
  __int64 alo = __sel32ll (a, 0);
  __int64 blo = __sel32ll (b, 0);
  __int64 ahi = __sel32ll (a, 1);
  __int64 bhi = __sel32ll (b, 1);
  __int64 c0 = __sel32q (c, 0);
  __int64 c1 = __sel32q (c, 1);
  __int64 c2 = __sel32q (c, 2);
  __int64 c3 = __sel32q (c, 3);
  return __raw_compose_64 (
    __raw_max (__sat32 (blo - c0), __sat32 (alo - c2)),
    __raw_max (__sat32 (bhi - c1), __sat32 (ahi - c3)));
}

inline static __int2x32 __raw_tmax_add (__int2x32 a,
                                        __int2x32 b,
                                        __raw_quad c) {
  __int64 alo = __sel32ll (a, 0);
  __int64 blo = __sel32ll (b, 0);
  __int64 ahi = __sel32ll (a, 1);
  __int64 bhi = __sel32ll (b, 1);
  __int64 c0 = __sel32q (c, 0);
  __int64 c1 = __sel32q (c, 1);
  __int64 c2 = __sel32q (c, 2);
  __int64 c3 = __sel32q (c, 3);
  return __raw_compose_64 (
    __sat32 (__long_tmax (blo + c0, alo + c2)),
    __sat32 (__long_tmax (bhi + c1, ahi + c3)));
}

inline static __int2x32 __raw_tmax_sub (__int2x32 a,
                                        __int2x32 b,
                                        __raw_quad c) {
  __int64 alo = __sel32ll (a, 0);
  __int64 blo = __sel32ll (b, 0);
  __int64 ahi = __sel32ll (a, 1);
  __int64 bhi = __sel32ll (b, 1);
  __int64 c0 = __sel32q (c, 0);
  __int64 c1 = __sel32q (c, 1);
  __int64 c2 = __sel32q (c, 2);
  __int64 c3 = __sel32q (c, 3);
  return __raw_compose_64 (
    __sat32 (__long_tmax (blo - c0, alo - c2)),
    __sat32 (__long_tmax (bhi - c1, ahi - c3)));
}

inline static __int2x32 __raw_max_add_8s (__int2x32 a,
                                          __int2x32 b,
                                          __raw_quad c) {
  __int32 ap[4], bp[4], cp[8];
  __int32 i;
  for (i=0; i<4; i++) {
    ap[i] = __sel16ll (a, i);
    bp[i] = __sel16ll (b, i);
  }
  for (i=0; i<8; i++) {
    cp[i] = __sel16q (c, i);
  }
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __raw_max (__sat16 (ap[0] + cp[4]), __sat16 (bp[0] + cp[0])),
        __raw_max (__sat16 (ap[1] + cp[5]), __sat16 (bp[1] + cp[1])))),
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __raw_max (__sat16 (ap[2] + cp[6]), __sat16 (bp[2] + cp[2])),
        __raw_max (__sat16 (ap[3] + cp[7]), __sat16 (bp[3] + cp[3])))));
}

inline static __int2x32 __raw_max_sub_8s (__int2x32 a,
                                          __int2x32 b,
                                          __raw_quad c) {
  __int32 ap[4], bp[4], cp[8];
  __int32 i;
  for (i=0; i<4; i++) {
    ap[i] = __sel16ll (a, i);
    bp[i] = __sel16ll (b, i);
  }
  for (i=0; i<8; i++) {
    cp[i] = __sel16q (c, i);
  }
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __raw_max (__sat16 (ap[0] - cp[4]), __sat16 (bp[0] - cp[0])),
        __raw_max (__sat16 (ap[1] - cp[5]), __sat16 (bp[1] - cp[1])))),
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __raw_max (__sat16 (ap[2] - cp[6]), __sat16 (bp[2] - cp[2])),
        __raw_max (__sat16 (ap[3] - cp[7]), __sat16 (bp[3] - cp[3])))));
}

inline static __int2x32 __raw_tmax_add_8s (__int2x32 a,
                                           __int2x32 b,
                                           __raw_quad c) {
  __int32 ap[4], bp[4], cp[8];
  __int32 i;
  for (i=0; i<4; i++) {
    ap[i] = __sel16ll (a, i);
    bp[i] = __sel16ll (b, i);
  }
  for (i=0; i<8; i++) {
    cp[i] = __sel16q (c, i);
  }
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __sat16 (__raw_tmax (ap[0] + cp[4], bp[0] + cp[0])),
        __sat16 (__raw_tmax (ap[1] + cp[5], bp[1] + cp[1])))),
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __sat16 (__raw_tmax (ap[2] + cp[6], bp[2] + cp[2])),
        __sat16 (__raw_tmax (ap[3] + cp[7], bp[3] + cp[3])))));
}

inline static __int2x32 __raw_tmax_sub_8s (__int2x32 a,
                                           __int2x32 b,
                                           __raw_quad c) {
  __int32 ap[4], bp[4], cp[8];
  __int32 i;
  for (i=0; i<4; i++) {
    ap[i] = __sel16ll (a, i);
    bp[i] = __sel16ll (b, i);
  }
  for (i=0; i<8; i++) {
    cp[i] = __sel16q (c, i);
  }
  return __raw_compose_64 (
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __sat16 (__raw_tmax (ap[0] - cp[4], bp[0] - cp[0])),
        __sat16 (__raw_tmax (ap[1] - cp[5], bp[1] - cp[1])))),
    __raw_compact_to_i2x16 (
      __raw_compose_64 (
        __sat16 (__raw_tmax (ap[2] - cp[6], bp[2] - cp[2])),
        __sat16 (__raw_tmax (ap[3] - cp[7], bp[3] - cp[3])))));
}

inline static void __raw_acs_max_sub (__int64 a,
                                      __int64 b,
                                      __int32 c,
                                      __int64 thi,
                                      __raw_quad *q,
                                      __int64 *tho) {

  __int64 alo, ahi, blo, bhi;
  __int32 clo, chi, bits;

  alo = __raw_low_32 (a);
  ahi = __raw_high_32 (a);
  blo = __raw_low_32 (b);
  bhi = __raw_high_32 (b);
  clo = __sel16i (c, 0);
  chi = __sel16i (c, 1);

  bits = 0;
  if (__sat32 (blo + clo) < __sat32 (alo - clo)) bits |= 1;
  if (__sat32 (blo - clo) < __sat32 (alo + clo)) bits |= 2;
  if (__sat32 (bhi + chi) < __sat32 (ahi - chi)) bits |= 4;
  if (__sat32 (bhi - chi) < __sat32 (ahi + chi)) bits |= 8;

  *tho = ((__uint64) thi >> 4) | ((__uint64) bits << 60);

  *q = __raw_compose_128 (
         __raw_compose_64 (
           __raw_max (__sat32 (blo + clo), __sat32 (alo - clo)),
           __raw_max (__sat32 (blo - clo), __sat32 (alo + clo))),
         __raw_compose_64 (
           __raw_max (__sat32 (bhi + chi), __sat32 (ahi - chi)),
           __raw_max (__sat32 (bhi - chi), __sat32 (ahi + chi))));
}

#define __raw_acs_max(I1,I2,I3,I4,O1,O2) {               \
  __raw_quad r1;                                         \
  __int64 r2;                                            \
  __raw_acs_max_sub ((I1), (I2), (I3), (I4), &r1, &r2);  \
  O1 = r1;                                               \
  O2 = r2;                                               \
}

inline static void __raw_acs_tmax_sub (__int64 a,
                                       __int64 b,
                                       __int32 c,
                                       __int64 thi,
                                       __raw_quad *q,
                                       __int64 *tho) {

  __int64 alo, ahi, blo, bhi;
  __int32 clo, chi, bits;

  alo = __raw_low_32 (a);
  ahi = __raw_high_32 (a);
  blo = __raw_low_32 (b);
  bhi = __raw_high_32 (b);
  clo = __sel16i (c, 0);
  chi = __sel16i (c, 1);

  bits = 0;
  if (__sat32 (blo + clo) < __sat32 (alo - clo)) bits |= 1;
  if (__sat32 (blo - clo) < __sat32 (alo + clo)) bits |= 2;
  if (__sat32 (bhi + chi) < __sat32 (ahi - chi)) bits |= 4;
  if (__sat32 (bhi - chi) < __sat32 (ahi + chi)) bits |= 8;

  *tho = ((__uint64) thi >> 4) | ((__uint64) bits << 60);

  *q = __raw_compose_128 (
         __raw_compose_64 (
           __raw_tmax (__sat32 (blo + clo), __sat32 (alo - clo)),
           __raw_tmax (__sat32 (blo - clo), __sat32 (alo + clo))),
         __raw_compose_64 (
           __raw_tmax (__sat32 (bhi + chi), __sat32 (ahi - chi)),
           __raw_tmax (__sat32 (bhi - chi), __sat32 (ahi + chi))));
}

#define __raw_acs_tmax(I1,I2,I3,I4,O1,O2) {               \
  __raw_quad r1;                                          \
  __int64 r2;                                             \
  __raw_acs_tmax_sub ((I1), (I2), (I3), (I4), &r1, &r2);  \
  O1 = r1;                                                \
  O2 = r2;                                                \
}

inline static void __raw_acs_max_8s_sub (__int64 a,
                                         __int64 b,
                                         __int32 c,
                                         __int64 thi,
                                         __raw_quad *q,
                                         __int64 *tho) {

  __int32 ap[4], bp[4], cp[4];
  __int32 bits, i;

  for (i=0; i<4; i++) {
    ap[i] = __sel16ll (a, i);
    bp[i] = __sel16ll (b, i);
    cp[i] = __sel8i (c, i);
  }

  bits = 0;
  if (__sat16 (bp[0] + cp[0]) < __sat16 (ap[0] - cp[0])) bits |= 0x01;
  if (__sat16 (bp[0] - cp[0]) < __sat16 (ap[0] + cp[0])) bits |= 0x02;
  if (__sat16 (bp[1] + cp[1]) < __sat16 (ap[1] - cp[1])) bits |= 0x04;
  if (__sat16 (bp[1] - cp[1]) < __sat16 (ap[1] + cp[1])) bits |= 0x08;
  if (__sat16 (bp[2] + cp[2]) < __sat16 (ap[2] - cp[2])) bits |= 0x10;
  if (__sat16 (bp[2] - cp[2]) < __sat16 (ap[2] + cp[2])) bits |= 0x20;
  if (__sat16 (bp[3] + cp[3]) < __sat16 (ap[3] - cp[3])) bits |= 0x40;
  if (__sat16 (bp[3] - cp[3]) < __sat16 (ap[3] + cp[3])) bits |= 0x80;

  *tho = ((__uint64) thi >> 8) | ((__uint64) bits << 56);

  *q = __raw_compose_128 (
     __raw_compose_64 (
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_max (__sat16 (bp[0] + cp[0]), __sat16 (ap[0] - cp[0])),
           __raw_max (__sat16 (bp[0] - cp[0]), __sat16 (ap[0] + cp[0])))),
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_max (__sat16 (bp[1] + cp[1]), __sat16 (ap[1] - cp[1])),
           __raw_max (__sat16 (bp[1] - cp[1]), __sat16 (ap[1] + cp[1]))))),
   __raw_compose_64 (
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_max (__sat16 (bp[2] + cp[2]), __sat16 (ap[2] - cp[2])),
           __raw_max (__sat16 (bp[2] - cp[2]), __sat16 (ap[2] + cp[2])))),
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_max (__sat16 (bp[3] + cp[3]), __sat16 (ap[3] - cp[3])),
           __raw_max (__sat16 (bp[3] - cp[3]), __sat16 (ap[3] + cp[3]))))));
}

#define __raw_acs_max_8s(I1,I2,I3,I4,O1,O2) {               \
  __raw_quad r1;                                            \
  __int64 r2;                                               \
  __raw_acs_max_8s_sub ((I1), (I2), (I3), (I4), &r1, &r2);  \
  O1 = r1;                                                  \
  O2 = r2;                                                  \
}

inline static void __raw_acs_tmax_8s_sub (__int64 a,
                                          __int64 b,
                                          __int32 c,
                                          __int64 thi,
                                          __raw_quad *q,
                                          __int64 *tho) {

  __int32 ap[4], bp[4], cp[4];
  __int32 bits, i;

  for (i=0; i<4; i++) {
    ap[i] = __sel16ll (a, i);
    bp[i] = __sel16ll (b, i);
    cp[i] = __sel8i (c, i);
  }

  bits = 0;
  if (__sat16 (bp[0] + cp[0]) < __sat16 (ap[0] - cp[0])) bits |= 0x01;
  if (__sat16 (bp[0] - cp[0]) < __sat16 (ap[0] + cp[0])) bits |= 0x02;
  if (__sat16 (bp[1] + cp[1]) < __sat16 (ap[1] - cp[1])) bits |= 0x04;
  if (__sat16 (bp[1] - cp[1]) < __sat16 (ap[1] + cp[1])) bits |= 0x08;
  if (__sat16 (bp[2] + cp[2]) < __sat16 (ap[2] - cp[2])) bits |= 0x10;
  if (__sat16 (bp[2] - cp[2]) < __sat16 (ap[2] + cp[2])) bits |= 0x20;
  if (__sat16 (bp[3] + cp[3]) < __sat16 (ap[3] - cp[3])) bits |= 0x40;
  if (__sat16 (bp[3] - cp[3]) < __sat16 (ap[3] + cp[3])) bits |= 0x80;

  *tho = ((__uint64) thi >> 8) | ((__uint64) bits << 56);

  *q = __raw_compose_128 (
     __raw_compose_64 (
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_tmax (__sat16 (ap[0] - cp[0]), __sat16 (bp[0] + cp[0])),
           __raw_tmax (__sat16 (ap[0] + cp[0]), __sat16 (bp[0] - cp[0])))),
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_tmax (__sat16 (ap[1] - cp[1]), __sat16 (bp[1] + cp[1])),
           __raw_tmax (__sat16 (ap[1] + cp[1]), __sat16 (bp[1] - cp[1]))))),
   __raw_compose_64 (
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_tmax (__sat16 (ap[2] - cp[2]), __sat16 (bp[2] + cp[2])),
           __raw_tmax (__sat16 (ap[2] + cp[2]), __sat16 (bp[2] - cp[2])))),
       __raw_compact_to_i2x16 (
         __raw_compose_64 (
           __raw_tmax (__sat16 (ap[3] - cp[3]), __sat16 (bp[3] + cp[3])),
           __raw_tmax (__sat16 (ap[3] + cp[3]), __sat16 (bp[3] - cp[3]))))));
}

#define __raw_acs_tmax_8s(I1,I2,I3,I4,O1,O2) {               \
  __raw_quad r1;                                             \
  __int64 r2;                                                \
  __raw_acs_tmax_8s_sub ((I1), (I2), (I3), (I4), &r1, &r2);  \
  O1 = r1;                                                   \
  O2 = r2;                                                   \
}

inline static __int64 __raw_permute_8b (__int64 source, __int32 indices) {
  __int32 r[8];
  __int32 i;
  for (i=0; i<8; i++) {
    r[i] = __usel8ll (source, indices & 0x7);
    indices >>= 4;
  }
  return __compact_to_i8x8_from_i32 (r[0], r[1], r[2], r[3],
                                     r[4], r[5], r[6], r[7]);
}

inline static __raw_quad __raw_permute_8s (__int64 source, __int32 indices) {
  __int32 r[8];
  __int32 i;
  for (i=0; i<8; i++) {
    if (indices & 0x4) {
      r[i] = 0 - __usel16ll (source, indices & 0x3);
    } else {
      r[i] = __usel16ll (source, indices & 0x3);
    }
    indices >>= 4;
  }
  return __raw_compose_128 (
    __raw_compose_64 (
      __raw_compact_to_i2x16 (__raw_compose_64 (r[0], r[1])),
      __raw_compact_to_i2x16 (__raw_compose_64 (r[2], r[3]))),
    __raw_compose_64 (
      __raw_compact_to_i2x16 (__raw_compose_64 (r[4], r[5])),
      __raw_compact_to_i2x16 (__raw_compose_64 (r[6], r[7]))));
}

inline static void __raw_despread_sub (__raw_quad inputs,
                                       __int64 codein,
                                       __int32 accumin,
                                       __int32 *accumout,
                                       __int64 *codeout) {

  __int4x16 code;
  __int32 inputi[8], inputq[8];
  __int32 codei, codeq;
  __int32 sumi, sumq;
  __int32 i;

  code = codein;

  /* initialise the real and imaginary parts of the sum */
  sumi = __sel16i (accumin, 0);
  sumq = __sel16i (accumin, 1);

  /* get the signed real and imaginary inputs */
  for (i=0; i<8; i++) {
    inputi[i] = __sel8q (inputs, i*2);
    inputq[i] = __sel8q (inputs, i*2+1);
  }

  for (i=0; i<8; i++) {
    /* get the +1/-1 real and imaginary parts of the spreading code */
    codei = (code & 1) ? -1 : 1;
    codeq = (code & 2) ? -1 : 1;

    /* complex multiply and add, saturate after each addition */
    sumi += codei * inputi[i] - codeq * inputq[i];
    sumi = __sat16 (sumi);
    sumq += codei * inputq[i] + codeq * inputi[i];
    sumq = __sat16 (sumq);

    /* shift the code bits down */
    code = code >> 2;
  }

  /* the results... */
  *accumout = (__int32) (sumq << 16) | (sumi & 0xffff);
  *codeout = code;
}

#define __raw_despread(I1,I2,I3,O1,O2) {                \
  __int32 out1;                                         \
  __int4x16 out2;                                       \
  __raw_despread_sub ((I1), (I2), (I3), &out1, &out2);  \
  O1 = out1;                                            \
  O2 = out2;                                            \
}

#define __raw_despread_i(I1,I2,I3,O1,O2) {                               \
  __int32 out1;                                                          \
  __int4x16 out2;                                                        \
  __raw_despread_sub ((I1), __raw_interleave (I2), (I3), &out1, &out2);  \
  O1 = out1;                                                             \
  O2 = out2;                                                             \
}

/*
  Now we use macros to map the real builtin names onto the reference
  implementations.
*/

#define __builtin_quad                   __raw_quad
#define __builtin_low_32                 __raw_low_32
#define __builtin_low_32u                __raw_low_32u
#define __builtin_high_32                __raw_high_32
#define __builtin_high_32u               __raw_high_32u
#define __builtin_low_64                 __raw_low_64
#define __builtin_high_64                __raw_high_64
#define __builtin_compose_64             __raw_compose_64
#define __builtin_compose_64u            __raw_compose_64u
#define __builtin_compose_128            __raw_compose_128
#define __builtin_compact_to_i2x16       __raw_compact_to_i2x16
#define __builtin_compact_to_i2x16_sat   __raw_compact_to_i2x16_sat
#define __builtin_expand_i2x16           __raw_expand_i2x16
#define __builtin_expand_i2x16_ze        __raw_expand_i2x16_ze
#define __builtin_expand_fr2x16          __raw_expand_i2x16
#define __builtin_compact_to_fr2x16      __raw_compact_to_fr2x16
#define __builtin_compact_to_fr2x16_trunc __raw_compact_to_fr2x16_trunc
#define __builtin_compact_to_fr4x8       __raw_compact_to_fr4x8
#define __builtin_compact_to_fr4x8_trunc __raw_compact_to_fr4x8_trunc
#define __builtin_compact_to_i4x8        __raw_compact_to_i4x8
#define __builtin_compact_to_i4x8_sat    __raw_compact_to_i4x8_sat
#define __builtin_expand_i4x8            __raw_expand_i4x8
#define __builtin_expand_i4x8_ze         __raw_expand_i4x8_ze
#define __builtin_add_sat                __raw_add_sat
#define __builtin_uadd_sat               __raw_uadd_sat
#define __builtin_sub_sat                __raw_sub_sat
#define __builtin_usub_sat               __raw_usub_sat
#define __builtin_neg_sat                __raw_neg_sat
#define __builtin_neg_4x8_sat            __raw_neg_4x8_sat
#define __builtin_neg_8x8_sat            __raw_neg_8x8_sat
#define __builtin_avg                    __raw_avg
#define __builtin_lavg                   __raw_lavg
#define __builtin_llavg                  __raw_llavg
#define __builtin_lavgt                  __raw_lavgt
#define __builtin_avgt                   __raw_avgt
#define __builtin_abs                    __raw_abs
#define __builtin_llabs                  __raw_llabs
#define __builtin_clip                   __raw_clip
#define __builtin_lclip                  __raw_lclip
#define __builtin_llclip                 __raw_llclip
#define __builtin_max                    __raw_max
#define __builtin_llmax                  __raw_llmax
#define __builtin_min                    __raw_min
#define __builtin_llmin                  __raw_llmin
#define __builtin_mult_i1x32_wide        __raw_mult_i1x32_wide
#define __builtin_mult_u1x32_wide        __raw_mult_u1x32_wide
#define __builtin_recip                  __raw_recip
#define __builtin_rsqrt                  __raw_rsqrt
#define __builtin_fix                    __raw_fix
#define __builtin_exp                    __raw_exp
#define __builtin_exp2                   __raw_exp2
#define __builtin_lead_ones              __raw_lead_ones
#define __builtin_lllead_ones            __raw_lllead_ones
#define __builtin_lead_zero              __raw_lead_zero
#define __builtin_lllead_zero            __raw_lllead_zero
#define __builtin_count_ones             __raw_count_ones
#define __builtin_lcount_ones            __raw_lcount_ones
#define __builtin_llcount_ones           __raw_llcount_ones
#define __builtin_addbitrev              __raw_addbitrev
#define __builtin_add_2x32               __raw_add_2x32
#define __builtin_add_2x32u              __raw_add_2x32u
#define __builtin_add_2x32_sat           __raw_add_2x32_sat
#define __builtin_add_u2x32_sat          __raw_add_u2x32_sat
#define __builtin_sub_2x32               __raw_sub_2x32
#define __builtin_sub_2x32u              __raw_sub_2x32u
#define __builtin_sub_2x32_sat           __raw_sub_2x32_sat
#define __builtin_sub_u2x32_sat          __raw_sub_u2x32_sat
#define __builtin_sum_2x32               __raw_sum_2x32
#define __builtin_add_4x32               __raw_add_4x32
#define __builtin_add_4x32_sat           __raw_add_4x32_sat
#define __builtin_add_u4x32_sat          __raw_add_u4x32_sat
#define __builtin_sub_4x32               __raw_sub_4x32
#define __builtin_sub_4x32_sat           __raw_sub_4x32_sat
#define __builtin_sub_u4x32_sat          __raw_sub_u4x32_sat
#define __builtin_add_2x16               __raw_add_2x16
#define __builtin_add_u2x16              __raw_add_u2x16
#define __builtin_add_2x16_sat           __raw_add_2x16_sat
#define __builtin_add_u2x16_sat          __raw_add_u2x16_sat
#define __builtin_sub_2x16               __raw_sub_2x16
#define __builtin_sub_u2x16              __raw_sub_u2x16
#define __builtin_sub_2x16_sat           __raw_sub_2x16_sat
#define __builtin_sub_u2x16_sat          __raw_sub_u2x16_sat
#define __builtin_neg_2x16_sat           __raw_neg_2x16_sat
#define __builtin_abs_2x16               __raw_abs_2x16
#define __builtin_clip_2x16              __raw_clip_2x16
#define __builtin_merge_2x16             __raw_merge_2x16
#define __builtin_max_2x16               __raw_max_2x16
#define __builtin_max_u2x16              __raw_max_u2x16
#define __builtin_min_2x16               __raw_min_2x16
#define __builtin_min_u2x16              __raw_min_u2x16
#define __builtin_sum_2x16               __raw_sum_2x16
#define __builtin_mult_i2x16             __raw_mult_i2x16
#define __builtin_mult_u2x16             __raw_mult_u2x16
#define __builtin_mult_i2x16_sat         __raw_mult_i2x16_sat
#define __builtin_mult_i2x16_wide        __raw_mult_i2x16_wide
#define __builtin_mult_i2x16_wide_sat    __raw_mult_i2x16_wide
#define __builtin_frmul                  __raw_mult_fr1x32
#define __builtin_frmulr                 __raw_multr_fr1x32
#define __builtin_frmul_sat              __raw_mult_fr1x32_sat
#define __builtin_frmulr_sat             __raw_multr_fr1x32_sat
#define __builtin_mult_sat               __raw_mult_sat
#define __builtin_mult_fr1x32            __raw_mult_fr1x32
#define __builtin_multr_fr1x32           __raw_multr_fr1x32
#define __builtin_mult_fr1x32_sat        __raw_mult_fr1x32_sat
#define __builtin_multr_fr1x32_sat       __raw_multr_fr1x32_sat
#define __builtin_cmult_i2x16            __raw_cmult_i2x16
#define __builtin_cmult_i2x16_sat        __raw_cmult_i2x16_sat
#define __builtin_cmult_conj_i2x16       __raw_cmult_conj_i2x16
#define __builtin_cmult_conj_i2x16_sat   __raw_cmult_conj_i2x16_sat
#define __builtin_cmult_i2x16_wide       __raw_cmult_i2x16_wide
#define __builtin_cmult_conj_i2x16_wide  __raw_cmult_conj_i2x16_wide
#define __builtin_mult_fr2x16            __raw_mult_fr2x16
#define __builtin_mult_fr2x16_sat        __raw_mult_fr2x16_sat
#define __builtin_multr_fr2x16           __raw_multr_fr2x16
#define __builtin_multr_fr2x16_sat       __raw_multr_fr2x16_sat
#define __builtin_cmult_fr2x16           __raw_cmult_fr2x16
#define __builtin_cmultr_fr2x16          __raw_cmultr_fr2x16
#define __builtin_cmult_fr2x16_sat       __raw_cmult_fr2x16_sat
#define __builtin_cmultr_fr2x16_sat      __raw_cmultr_fr2x16_sat
#define __builtin_cmult_conj_fr2x16      __raw_cmult_conj_fr2x16
#define __builtin_cmultr_conj_fr2x16     __raw_cmultr_conj_fr2x16
#define __builtin_cmult_conj_fr2x16_sat  __raw_cmult_conj_fr2x16_sat
#define __builtin_cmultr_conj_fr2x16_sat __raw_cmultr_conj_fr2x16_sat
#define __builtin_add_4x16               __raw_add_4x16
#define __builtin_add_4x16_sat           __raw_add_4x16_sat
#define __builtin_add_u4x16_sat          __raw_add_u4x16_sat
#define __builtin_sub_4x16               __raw_sub_4x16
#define __builtin_sub_4x16_sat           __raw_sub_4x16_sat
#define __builtin_sub_u4x16_sat          __raw_sub_u4x16_sat
#define __builtin_neg_4x16_sat           __raw_neg_4x16_sat
#define __builtin_abs_4x16               __raw_abs_4x16
#define __builtin_clip_4x16              __raw_clip_4x16
#define __builtin_max_4x16               __raw_max_4x16
#define __builtin_max_u4x16              __raw_max_u4x16
#define __builtin_min_4x16               __raw_min_4x16
#define __builtin_min_u4x16              __raw_min_u4x16
#define __builtin_sum_4x16               __raw_sum_4x16
#define __builtin_mult_i4x16             __raw_mult_i4x16
#define __builtin_mult_u4x16             __raw_mult_u4x16
#define __builtin_mult_i4x16_sat         __raw_mult_i4x16_sat
#define __builtin_mult_i4x16_wide        __raw_mult_i4x16_wide
#define __builtin_mult_i4x16_wide_sat    __raw_mult_i4x16_wide
#define __builtin_mult_u2x16_wide        __raw_mult_u2x16_wide
#define __builtin_mult_u4x16_wide        __raw_mult_u4x16_wide
#define __builtin_mult_fr4x16            __raw_mult_fr4x16
#define __builtin_mult_fr4x16_sat        __raw_mult_fr4x16_sat
#define __builtin_multr_fr4x16           __raw_multr_fr4x16
#define __builtin_multr_fr4x16_sat       __raw_multr_fr4x16_sat
#define __builtin_add_4x8                __raw_add_4x8
#define __builtin_add_4x8_sat            __raw_add_4x8_sat
#define __builtin_add_u4x8_sat           __raw_add_u4x8_sat
#define __builtin_sub_4x8                __raw_sub_4x8
#define __builtin_sub_4x8_sat            __raw_sub_4x8_sat
#define __builtin_sub_u4x8_sat           __raw_sub_u4x8_sat
#define __builtin_abs_4x8                __raw_abs_4x8
#define __builtin_clip_4x8               __raw_clip_4x8
#define __builtin_merge_4x8              __raw_merge_4x8
#define __builtin_max_4x8                __raw_max_4x8
#define __builtin_min_4x8                __raw_min_4x8
#define __builtin_sum_4x8                __raw_sum_4x8
#define __builtin_add_8x8                __raw_add_8x8
#define __builtin_add_8x8_sat            __raw_add_8x8_sat
#define __builtin_add_u8x8_sat           __raw_add_u8x8_sat
#define __builtin_sub_8x8                __raw_sub_8x8
#define __builtin_sub_8x8_sat            __raw_sub_8x8_sat
#define __builtin_sub_u8x8_sat           __raw_sub_u8x8_sat
#define __builtin_sum_8x8                __raw_sum_8x8
#define __builtin_abs_8x8                __raw_abs_8x8
#define __builtin_clip_8x8               __raw_clip_8x8
#define __builtin_max_8x8                __raw_max_8x8
#define __builtin_min_8x8                __raw_min_8x8
#define __builtin_ashift_4x8             __raw_ashift_4x8
#define __builtin_ashift_8x8             __raw_ashift_8x8
#define __builtin_ashift_2x16            __raw_ashift_2x16
#define __builtin_ashift_4x16            __raw_ashift_4x16
#define __builtin_ashift_2x32            __raw_ashift_2x32
#define __builtin_lshift_4x8             __raw_lshift_4x8
#define __builtin_lshift_8x8             __raw_lshift_8x8
#define __builtin_lshift_2x16            __raw_lshift_2x16
#define __builtin_lshift_4x16            __raw_lshift_4x16
#define __builtin_lshift_2x32            __raw_lshift_2x32
#define __builtin_rotate_1x32            __raw_rotate_1x32
#define __builtin_rotate_2x32            __raw_rotate_2x32
#define __builtin_rotate_1x64            __raw_rotate_1x64
#define __builtin_fext                   __raw_fext_se
#define __builtin_fext_se                __raw_fext_se
#define __builtin_fext_ze                __raw_fext_ze
#define __builtin_fext2                  __raw_fext2_se
#define __builtin_fext2_se               __raw_fext2_se
#define __builtin_fext2_ze               __raw_fext2_ze
#define __builtin_fext_long_control      __raw_fext_long_control_se
#define __builtin_fext_long_control_se   __raw_fext_long_control_se
#define __builtin_fext_long_control_ze   __raw_fext_long_control_ze
#define __builtin_fext2_long_control     __raw_fext2_long_control_se
#define __builtin_fext2_long_control_se  __raw_fext2_long_control_se
#define __builtin_fext2_long_control_ze  __raw_fext2_long_control_ze
#define __builtin_fdep                   __raw_fdep
#define __builtin_fdep_se                __raw_fdep_se
#define __builtin_fdep_zf                __raw_fdep_zf
#define __builtin_fdep2                  __raw_fdep2
#define __builtin_fdep2_se               __raw_fdep2_se
#define __builtin_fdep2_zf               __raw_fdep2_zf
#define __builtin_fdep_long_control      __raw_fdep_long_control
#define __builtin_fdep_long_control_se   __raw_fdep_long_control_se
#define __builtin_fdep_long_control_zf   __raw_fdep_long_control_zf
#define __builtin_fdep2_long_control     __raw_fdep2_long_control
#define __builtin_fdep2_long_control_se  __raw_fdep2_long_control_se
#define __builtin_fdep2_long_control_zf  __raw_fdep2_long_control_zf
#define __builtin_dab_2x32               __raw_dab_2x32
#define __builtin_dab_4x32               __raw_dab_4x32
#define __builtin_dab_2x16               __raw_dab_2x16
#define __builtin_dab_4x16               __raw_dab_4x16
#define __builtin_dab_8x16               __raw_dab_8x16
#define __builtin_dabcb_2x32             __raw_dabcb_2x32
#define __builtin_dabcb_4x32             __raw_dabcb_4x32
#define __builtin_dabcb_2x16             __raw_dabcb_2x16
#define __builtin_dabcb_4x16             __raw_dabcb_4x16
#define __builtin_dabcb_8x16             __raw_dabcb_8x16
#define __builtin_circindex              __raw_circindex
#define __builtin_circptr                __raw_circptr
#define __builtin_tmax                   __raw_tmax
#define __builtin_tmax_4s                __raw_tmax_4s
#define __builtin_max_add                __raw_max_add
#define __builtin_max_sub                __raw_max_sub
#define __builtin_tmax_add               __raw_tmax_add
#define __builtin_tmax_sub               __raw_tmax_sub
#define __builtin_max_add_8s             __raw_max_add_8s
#define __builtin_max_sub_8s             __raw_max_sub_8s
#define __builtin_tmax_add_8s            __raw_tmax_add_8s
#define __builtin_tmax_sub_8s            __raw_tmax_sub_8s
#define __acs_max                        __raw_acs_max
#define __acs_tmax                       __raw_acs_tmax
#define __acs_max_8s                     __raw_acs_max_8s
#define __acs_tmax_8s                    __raw_acs_tmax_8s
#define __builtin_permute_8b             __raw_permute_8b
#define __builtin_permute_8s             __raw_permute_8s
#define __despread                       __raw_despread
#define __despread_i                     __raw_despread_i

/* Some things can be ignored if we are not __ADSPTS__ */

#if !defined (__ADSPTS__)

/* Ignore section specifiers in subsequent code */
#define section(X)

/* Ignore memory bank specifiers in subsequent code */
#define bank(X)

/* Ignore __builtin_aligned in subsequent code */
#define __builtin_aligned(X,Y)

/* Ignore __emuclk in subsequent code */
#define __emuclk() (0)

#endif

#if defined (__IGNORE_SYSREG_BUILTINS__)

/* Ignore __builtin_sysreg_read* / write* in subsequent code */

#define __builtin_sysreg_read(X) (0)
#define __builtin_sysreg_read2(X) (0)
#define __builtin_sysreg_read4(X) __builtin_compose_128 (0, 0)
#define __builtin_sysreg_write(X,Y)
#define __builtin_sysreg_write2(X,Y)
#define __builtin_sysreg_write4(X,Y)

#elif defined (__ADSPTS__) && defined (__USE_RAW_BUILTINS__)

/*
  If we are using the C reference builtins with the TigerSHARC Compiler
  we still want access to the sysreg builtins, however we cannot handle
  quad sysreg reads and writes because __raw_quad is not assignment
  compatible with the real __builtin_quad, and we cannot write a macro
  for quad sysreg read or write that references the real builtins.
  The sysreg builtin requires the register to be constant, so the read
  has to be done from a macro: we cannot use a function.
  In summary, use the builtins for 1 or 2 word sysregs, and ignore the
  4 word variants.
*/

#define __builtin_sysreg_read4(X) __builtin_compose_128 (0, 0)
#define __builtin_sysreg_write4(X,Y)

#endif

/* Ignore __builtin_idle / __builtin_idle_lp in subsequent code */

#if defined (__IGNORE_IDLE_BUILTINS__)

#define __builtin_idle()
#define __builtin_idle_lp()

#endif

#endif /* ! (defined (__ADSPTS__) && !defined (__USE_RAW_BUILTINS__)) */

/*
  XCORRS section: this will allow use of XCORRS builtins on non-TigerSHARC
  platforms and when compiling for ADSP-TS101 with the TigerSHARC compiler.
*/

#if !defined (__ADSPTS__) || \
    (defined (__ADSPTS101__) && defined (__USE_RAW_XCORRS__)) || \
    defined (__USE_RAW_BUILTINS__)

inline static __int32 __xsel8q (__builtin_quad a, __int32 b) {
  __int64 half;
  __int32 tmp;
  if (b > 7) {
    half = __builtin_high_64 (a);
    b -= 8;
  } else {
    half = __builtin_low_64 (a);
  }
  half >>= (b << 3);
  tmp = (__int32) half & 0xff;
  return ((tmp & 0x80) ? (0xffffff00 | tmp) : tmp);
}

inline static __int32 __xsel16i (__int32 a, __int32 b) {
  a >>= (b << 4);
  a = a & 0xffff;
  return ((a & 0x8000) ? (0xffff0000 | a) : a);
}

inline static __int32 __xsel16q (__builtin_quad a, __int32 b) {
  __int64 half;
  __int32 tmp;
  if (b > 3) {
    half = __builtin_high_64 (a);
    b -= 4;
  } else {
    half = __builtin_low_64 (a);
  }
  half >>= (b << 4);
  tmp = (__int32) half & 0xffff;
  return ((tmp & 0x8000) ? (0xffff0000 | tmp) : tmp);
}

inline static __int32 __xsat16 (__int32 a) {
  if (a > 0x7fff) {
    a = 0x7fff;
  } else if ((a < 0) && (-a > 0x7fff)) {
    a = 0x8000;
  }
  return a;
}

inline static __int64 __raw_interleave (__int64 codein) {
/*
   This function takes 64 bits in and interleaves the top 32
   with the bottom
*/

  __int32 low = __builtin_low_32 (codein);
  __int32 high = __builtin_high_32 (codein);
  __int32 newlow = 0, newhigh = 0;
  __int32 i;

  for (i=0; i<15; i++) {
    newhigh = newhigh | ((high >> 30) & 0x2) | ((low >> 31) & 0x1);
    high <<= 1; low <<=1; newhigh <<=2;
  }
  newhigh = newhigh | ((high >> 30) & 0x2) | ((low >> 31) & 0x1);
  high <<= 1; low <<=1;

  for (i=0; i<15; i++) {
    newlow = newlow | ((high >> 30) & 0x2) | ((low >> 31) & 0x1);
    high <<= 1; low <<=1; newlow <<=2;
  }
  newlow = newlow | ((high >> 30) & 0x2) | ((low >> 31) & 0x1);
  high <<= 1; low <<=1;

  return __builtin_compose_64 (newlow, newhigh);
}

inline static __int32 *__raw_xcorrs_clear (__int32 *tr) {
  __int32 i;
  for (i=0; i<16; i++) {
    tr[i] = 0;
  }
  return tr;
}

inline static void __raw_xcorrs (__builtin_quad inputs,
                                 __int64 *codeslow,
                                 __int64 *codeshigh,
                                 __int32 cut,
                                 __builtin_quad *accumq) {
  __int64 code;
  __int32 inputi[8], inputq[8];
  __int32 codei, codeq;
  __int32 sumi, sumq;
  __int32 i, delay;
  __int32 *accum;

  accum = (__int32 *) accumq;

  /* get the signed real and imaginary inputs */
  for (i=0; i<8; i++) {
    inputi[i] = __xsel8q (inputs, i*2);
    inputq[i] = __xsel8q (inputs, i*2+1);
  }

  for (delay=15; delay>=0; delay--) {
    code = (*codeslow) >> ((16 - delay) * 2);
    /* initialise the real and imaginary parts of the sum */
    sumi = __xsel16i (accum[delay], 0);
    sumq = __xsel16i (accum[delay], 1);

    for (i=0; i<8; i++) {
      /* get the +1/-1 real and imaginary parts of the spreading code */
      codei = (code & 1) ? -1 : 1;
      codeq = (code & 2) ? -1 : 1;

      if ((cut < 0) && (i + 15 - delay < -cut)) {
        codei = codeq = 0;
      } else if ((cut > 0) && (15 - delay + i >= cut)) {
        codei = codeq = 0;
      }

      /* complex multiply and add, saturate after each addition */
      sumi += codei * inputi[i] - codeq * inputq[i];
      sumi = __xsat16 (sumi);
      sumq += codei * inputq[i] + codeq * inputi[i];
      sumq = __xsat16 (sumq);

      /* shift the code bits down */
      code = code >> 2;
    }

    /* the results... */
    accum[delay] =
      __builtin_compact_to_i2x16 (__builtin_compose_64 (sumi, sumq));
  }

  *codeslow = (((*codeslow) >> 16) & __ULLCONST (0x0000ffffffffffff)) |
               ((*codeshigh) << 48);
  *codeshigh = ((*codeshigh) >> 16) & __ULLCONST (0x0000ffffffffffff);
}

inline static void __raw_xcorrs_clr (__builtin_quad inputs,
                                     __int64 *codeslow,
                                     __int64 *codeshigh,
                                     __int32 cut,
                                     __builtin_quad *accumq) {
  __raw_xcorrs_clear ((__int32 *) accumq);
  __raw_xcorrs (inputs, codeslow, codeshigh, cut, accumq);
}

inline static void __raw_xcorrs_i (__builtin_quad inputs,
                                   __int64 *codeslow,
                                   __int64 *codeshigh,
                                   __int32 cut,
                                   __builtin_quad *accumq) {
  *codeshigh = __raw_interleave (*codeshigh);
  __raw_xcorrs (inputs, codeslow, codeshigh, cut, accumq);
}

inline static void __raw_xcorrs_i_clr (__builtin_quad inputs,
                                       __int64 *codeslow,
                                       __int64 *codeshigh,
                                       __int32 cut,
                                       __builtin_quad *accumq) {
  __raw_xcorrs_clear ((__int32 *) accumq);
  *codeshigh = __raw_interleave (*codeshigh);
  __raw_xcorrs (inputs, codeslow, codeshigh, cut, accumq);
}

inline static void __raw_xcorrs_ext (__builtin_quad inputs,
                                     __int64 *codeslow,
                                     __int64 *codeshigh,
                                     __int32 cut,
                                     __builtin_quad *accumq) {
  __int64 code;
  __int32 inputi[4], inputq[4];
  __int32 codei, codeq;
  __int32 sumi, sumq;
  __int32 i, delay;
  __int32 *accum;

  accum = (__int32 *) accumq;

  /* get the signed real and imaginary inputs */
  for (i=0; i<4; i++) {
    inputi[i] = __xsel16q (inputs, i*2);
    inputq[i] = __xsel16q (inputs, i*2+1);
  }

  for (delay=7; delay>=0; delay--) {
    code = (*codeslow) >> ((8 - delay) * 2);
    /* initialise the real and imaginary parts of the sum */
    sumi = accum[delay * 2];
    sumq = accum[delay * 2 + 1];

    for (i=0; i<4; i++) {
      /* get the +1/-1 real and imaginary parts of the spreading code */
      codei = (code & 1) ? -1 : 1;
      codeq = (code & 2) ? -1 : 1;

      if ((cut < 0) && (i + 7 - delay < -cut)) {
        codei = codeq = 0;
      } else if ((cut > 0) && (7 - delay + i >= cut)) {
        codei = codeq = 0;
      }

      /* complex multiply and add */
      sumi += codei * inputi[i] - codeq * inputq[i];
      sumq += codei * inputq[i] + codeq * inputi[i];

      /* shift the code bits down */
      code = code >> 2;
    }

    /* the results... */
    accum[delay*2+1] = sumq;
    accum[delay*2] = sumi;
  }

  *codeslow = (((*codeslow) >> 8) & __ULLCONST (0x00ffffffffffffff)) |
               ((*codeshigh) << 56);
  *codeshigh = ((*codeshigh) >> 8) & __ULLCONST (0x00ffffffffffffff);
}

inline static void __raw_xcorrs_clr_ext (__builtin_quad inputs,
                                         __int64 *codeslow,
                                         __int64 *codeshigh,
                                         __int32 cut,
                                         __builtin_quad *accumq) {
  __raw_xcorrs_clear ((__int32 *) accumq);
  __raw_xcorrs_ext (inputs, codeslow, codeshigh, cut, accumq);
}

inline static void __raw_xcorrs_i_ext (__builtin_quad inputs,
                                       __int64 *codeslow,
                                       __int64 *codeshigh,
                                       __int32 cut,
                                       __builtin_quad *accumq) {
  *codeshigh = __raw_interleave (*codeshigh);
  __raw_xcorrs_ext (inputs, codeslow, codeshigh, cut, accumq);
}

inline static void __raw_xcorrs_i_clr_ext (__builtin_quad inputs,
                                           __int64 *codeslow,
                                           __int64 *codeshigh,
                                           __int32 cut,
                                           __builtin_quad *accumq) {
  __raw_xcorrs_clear ((__int32 *) accumq);
  *codeshigh = __raw_interleave (*codeshigh);
  __raw_xcorrs_ext (inputs, codeslow, codeshigh, cut, accumq);
}

#define __xcorrs                         __raw_xcorrs
#define __xcorrs_clr                     __raw_xcorrs_clr
#define __xcorrs_ext                     __raw_xcorrs_ext
#define __xcorrs_clr_ext                 __raw_xcorrs_clr_ext
#define __xcorrs_i                       __raw_xcorrs_i
#define __xcorrs_i_clr                   __raw_xcorrs_i_clr
#define __xcorrs_i_ext                   __raw_xcorrs_i_ext
#define __xcorrs_i_clr_ext               __raw_xcorrs_i_clr_ext

#endif /* !defined (__ADSPTS__) ||
          (defined (__ADSPTS101__) && defined (__USE_RAW_XCORRS__)) ||
          defined (__USE_RAW_BUILTINS__) */

#if defined (__cplusplus)
}
#endif

#endif /* __BUILTINS_DEFINED */
