#include <exception>
#include "exectuor.h"
#include "merkle.h"
#include "rand.h"

/*
new variable D:\work\snarkVM\circuit\environment\src\helpers\r1cs.rs
bitxor_assign 0?    1:0   D:\work\snarkVM\circuit\types\boolean\src\xor.rs
bitand_assign             D:\work\snarkVM\circuit\types\boolean\src\and.rs
ternary       true? 1:0   D:\work\snarkVM\circuit\types\boolean\src\ternary.rs
*/

//Exectuor
__device__ Exectuor::Exectuor()
{
	m_psd2.setExectuor(this);

	uint8_t epoch_hash_bytes[] = { 0x09, 0x91, 0xa7, 0xa4, 0x4d, 0xee, 0xbb, 0xe7, 0x5a, 0x64, 0xc4, 0xe1, 0x85, 0x3a, 0x1e, 0xad, 0x48, 0xd8, 0xa9, 0x98, 0x3c, 0x97, 0xe5, 0xf2, 0x39, 0xf3, 0x7a, 0xc1, 0x3d, 0xbf, 0x8f, 0x12 };
	uint8_t address_bytes[] = { 0x26, 0xab, 0x19, 0x72, 0x41, 0xa7, 0x46, 0x4b, 0xea, 0x5b, 0x9e, 0x40, 0x9b, 0x2a, 0x60, 0xb8, 0x0a, 0x9e, 0x9a, 0xf8, 0x57, 0x3f, 0x43, 0x3f, 0x6f, 0xc3, 0xeb, 0x55, 0xde, 0x4a, 0x92, 0x0c };
	m_epoch_hash = toBytes((char*)epoch_hash_bytes, sizeof(epoch_hash_bytes));
	m_address = toBytes((char*)address_bytes, sizeof(address_bytes));
}

__device__ Exectuor::~Exectuor()
{
}

__device__ void Exectuor::inWitness(bool flag)
{
	if (flag)
	{
		if (m_witness.size() == 0)
			m_witness.push_back(true);
		else
			m_witness.push_back(false);
	}
	else
	{
		m_witness.pop_back();
	}
}

__device__ CuVariant Exectuor::getReg(int op)
{
	return m_regs[op];
}

__device__ void Exectuor::setReg(int op, const CuVariant& value)
{
	assert(m_program->regType[op] == variantType(value));
	assert((op < 150 && variantMode(value) != Constant) || (op >= 150 && variantMode(value) == Constant));
	m_regs[op] = value;
}

__device__ void Exectuor::setHash(const char *epoch_hash, const char *address)
{
	m_epoch_hash = toBytes(epoch_hash,32);
	m_address = toBytes(address,32);
}

__device__ void Exectuor::setProgram(Program* p)
{
	m_program = p;
	char *constValue = p->constValue;
	for (int i = 0; i < p->constRegSize; i++)
	{
		int reg_id = CONST_REG_START + i;
		int const_type = p->regType[reg_id];
		int byte_size = (typeBitSize(const_type) + 7) / 8;
		CuByteArray bytes = toBytes(constValue, byte_size);
		BitArray bits = bytes_to_bits_le(bytes);
		
		CuVariant v = variantFromBitsLe(const_type, bits);
		setReg(reg_id, v);

		constValue += byte_size;
	}
}

__device__ const CuVector<Field>& Exectuor::newVariable()
{
	return m_newVariable;
}

__device__ int Exectuor::newVariableCount()
{
	return m_newVariable.size();
}

__device__ void Exectuor::addNewVariable(Field f)
{
	assert(f.mode == Private);
	if (m_witness.size() > 0)
		return;

	m_newVariable.push_back(f);
}

__device__ void Exectuor::addNewVariableList(const BitArray& bits, int size)
{
	for (int i = 0; i < size; i++)
	{
		Field f = Field::create(bits[i].mode, bits[i].value);
		addNewVariable(f);
	}
}

__device__ int Exectuor::rand_input(int countor)
{
	MyRandom r;
	uint64_t sln_id = r.init(m_epoch_hash,m_address,countor);

	for (int i = 0; i < m_program->inputRegSize; i++)
	{
		int reg = i;
		int type = m_program->regType[reg];
		
		if (type == boolean)
			setReg(reg, CuVariant::fromBoolean(r.rand_bool()));
		else if (type == i8)
			setReg(reg, CuVariant::fromInteger(r.rand_i8()));
		else if (type == i16)
			setReg(reg, CuVariant::fromInteger(r.rand_i16()));
		else if (type == i32)
			setReg(reg, CuVariant::fromInteger(r.rand_i32()));
		else if (type == i64)
			setReg(reg, CuVariant::fromInteger(r.rand_i64()));
		else if (type == i128)
			setReg(reg, CuVariant::fromInteger(r.rand_i128()));
		else if (type == u8)
			setReg(reg, CuVariant::fromInteger(r.rand_u8()));
		else if (type == u16)
			setReg(reg, CuVariant::fromInteger(r.rand_u16()));
		else if (type == u32)
			setReg(reg, CuVariant::fromInteger(r.rand_u32()));
		else if (type == u64)
			setReg(reg, CuVariant::fromInteger(r.rand_u64()));
		else if (type == field)
			setReg(reg, CuVariant::fromField(r.rand_field()));
		else 
		{
			assert(0);
		}
	}
	return sln_id;
}

__device__ bool Exectuor::run()
{
	m_newPublic.push_back(Field::constant(1));
	for (int i = 0; i < m_program->inputRegSize; i++)
	{
		auto v = getReg(i);
		if(v.typeId() != Type::field)
		{ 
			auto bits = variantToBitsLe(v);
			for (int i = 0; i < bits.size(); i++)
			{
				auto &bit = bits[i];
				m_newPublic.push_back(Field::create(bit.mode, bit.value));
			}
		}
		else
		{
			m_newPublic.push_back(v.toField());
		}	
	}

	for (int i = 0; i < m_program->instSize; i++)
	{
		int pre = m_newVariable.size();
		auto inst = &m_program->inst[i];
		exec(inst);

		//cuDebug() << i << m_newVariable.size();
	}
	return true;
}

__device__ uint64_t Exectuor::calcHash(bool calc_public)
{
	CuVector<BitArray> leaves;
	if (calc_public)
	{
		for (int i = 0; i < m_newPublic.size(); i++)
			leaves.push_back(m_newPublic[i].toBitsLe());
	}

	for (int i = 0; i < m_newVariable.size(); i++)
	{
		leaves.push_back(m_newVariable[i].toBitsLe());
	}
	int max_n = checked_next_power_of_n(leaves.size(),8);
	leaves.resize(max_n, BitArray(254, Boolean::constant(false)));

	uint64_t u = calcMerkle(leaves);
	return u;
}

__device__ Boolean Exectuor::bit_ternary(Boolean condition, Boolean first, Boolean second)
{
	if (condition.is_constant()) {
		return condition.value ? first : second;
	}
	// Constant `first`
	else if(first.is_constant()) {
		return first.value ? bit_or(condition , second) : bit_and(bit_not(condition) , second);
	}
	// Constant `second`
	else if(second.is_constant()) {
		return second.value ? bit_or(bit_not(condition) , first) : bit_and(condition , first);
	}
	// Variables
	else {
		// Compute the witness value, based on the condition.
		bool witness = condition.value ? first.value : second.value;

		// Declare a new variable with the expected output as witness.
		// Note: The constraint below will ensure `output` is either 0 or 1,
		// assuming `self` and `other` are well-formed (they are either 0 or 1).
		auto output = Boolean::create(Mode::Private, witness);
		addNewVariable(Field::from_boolean(output));
		return output;
	}
}

__device__ Boolean Exectuor::bit_is_equal(Boolean self, Boolean other)
{
	Boolean ret = bit_is_not_equal(self, other);
	ret.value = !ret.value;
	return ret;
}

__device__ Boolean Exectuor::bit_is_not_equal(Boolean self, Boolean other)
{
	return bit_xor(self, other);
}

__device__ Boolean Exectuor::bit_and(Boolean self, Boolean other)
{
	if (self.is_constant()) {
		return self.value ? other : self;
	}
	// Constant `other`
	else if (other.is_constant()) {
		return other.value ? self : other;
	}
	// Variable AND Variable
	else {
		// Declare a new variable with the expected output as witness.
		// Note: The constraint below will ensure `output` is either 0 or 1,
		// assuming `self` and `other` are well-formed (they are either 0 or 1).
		Boolean output = Boolean::create(Mode::Private,self.value && other.value);
		addNewVariable(Field::from_boolean(output));

		return output;
	}
}

__device__ Boolean Exectuor::bit_nand(Boolean self, Boolean other)
{
	if (self.is_constant()) {
		return self.value ? bit_not(other) : bit_not(self);
	}
	// Constant `other`
	else if (other.is_constant()) {
		return other.value ? bit_not(self) : bit_not(other);
	}
	// Variable NAND Variable
	else {
		// Declare a new variable with the expected output as witness.
		// Note: The constraint below will ensure `output` is either 0 or 1,
		// assuming `self` and `other` are well-formed (they are either 0 or 1).
		Boolean output = Boolean::create(Mode::Private, !(self.value && other.value));
		addNewVariable(Field::from_boolean(output));

		// Ensure `self` * `other` = (1 - `output`)
		// `output` is `1` iff `self` or `other` is `0`, otherwise `output` is `0`.

		return output;
	}
}

__device__ Boolean Exectuor::bit_or(Boolean self, Boolean other)
{
	if (self.is_constant()) {
		return self.value ? self : other;
	}
	// Constant `other`
	else if (other.is_constant()) {
		return other.value ? other : self;
	}
	// Variable AND Variable
	else {
		// Declare a new variable with the expected output as witness.
		// Note: The constraint below will ensure `output` is either 0 or 1,
		// assuming `self` and `other` are well-formed (they are either 0 or 1).
		Boolean output = Boolean::create(Mode::Private, self.value || other.value);
		addNewVariable(Field::from_boolean(output));

		return output;
	}
}

__device__ Boolean Exectuor::bit_nor(Boolean self, Boolean other)
{
	// Constant `self`
	if (self.is_constant()) {
		return self.value ? bit_not(self) : bit_not(other);
	}
	// Constant `other`
	else if(other.is_constant()) {
		return other.value ? bit_not(other) : bit_not(self);
	}
	// Variable NOR Variable
	else {
		// Declare a new variable with the expected output as witness.
		// Note: The constraint below will ensure `output` is either 0 or 1,
		// assuming `self` and `other` are well-formed (they are either 0 or 1).
		Boolean output = Boolean::create(Mode::Private, !self.value && !other.value);
		addNewVariable(Field::from_boolean(output));

		return output;
	}
}

__device__ Boolean Exectuor::bit_xor(Boolean self, Boolean other)
{
	if (self.is_constant()) {
		return self.value? bit_not(other) : other;
	}
	// Constant `other`
	else if (other.is_constant()) {
		return other.value ? bit_not(self) : self;
	}
	// Variable != Variable
	else {
		// Declare a new variable with the expected output as witness.
		// Note: The constraint below will ensure `output` is either 0 or 1,
		// assuming `self` and `other` are well-formed (they are either 0 or 1).
		Boolean output = Boolean::create(Mode::Private, self.value ^ other.value);
		addNewVariable(Field::from_boolean(output));

		return output;
	}
}

__device__ Boolean Exectuor::bit_not(Boolean self)
{
	self.value = !self.value;
	if (!self.is_constant())
		self.mode = Private;
	return self;
}

__device__ Integer Exectuor::integer_abs_checked(const Integer& self)
{
	if (self.is_signed()) 
	{
		return integer_ternary(self.msb(), integer_sub_checked(Integer::zero(self.type()), self), self);
	}
	else{
		return self;
	}
}

__device__ Integer Exectuor::integer_abs_wrapped(const Integer& self) 
{
	if (self.is_signed())
	{
		return integer_ternary(self.msb(), integer_sub_wrapped(Integer::zero(self.type()), self), self);
	}
	else {
		return self;
	}
}

__device__ Integer Exectuor::integer_add_checked(const Integer& self, const Integer& other) 
{
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the sum and return the new constant.
		return self + other;
	}
	else if (self.is_signed()) {
		// Instead of adding the bits of `self` and `other` directly, the integers are
		// converted into a field elements, and summed, before converting back to integers.
		// Note: This is safe as the field is larger than the maximum integer type supported.
		auto f_sum = self.toField() + other.toField();
		addNewVariableList(f_sum.toBitsLe(), self.bitSize() + 1);

		// Extract the integer bits from the field element, ignoring the carry bit as it is not relevant for signed addition.
		Integer sum = toInteger(f_sum, self.type());

		// For signed addition, overflow and underflow conditions are:
		//   - a > 0 && b > 0 && a + b < 0 (Overflow)
		//   - a < 0 && b < 0 && a + b > 0 (Underflow)
		//   - Note: if sign(a) != sign(b) then over/underflow is impossible.
		//   - Note: the result of an overflow and underflow must be negative and positive, respectively.
		Boolean is_same_sign = bit_is_equal(self.msb(),other.msb());
		Boolean is_overflow = bit_and(is_same_sign , bit_is_not_equal(sum.msb(),self.msb()));

		return sum;
	}
	else {
		// Instead of adding the bits of `self` and `other` directly, witness the integer sum.
		auto sum = self.toField() + other.toField();
		addNewVariableList(sum.toBitsLe(), self.bitSize());

		return toInteger(sum,self.type());
	}
}

__device__ Integer Exectuor::integer_add_wrapped(const Integer& self, const Integer& other) 
{
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the sum and return the new constant.
		auto sum = self.toField() + other.toField();
		return toInteger(sum, self.type());
	}
	else {
		// Instead of adding the bits of `self` and `other` directly, the integers are
		// converted into a field elements, and summed, before converting back to integers.
		// Note: This is safe as the field is larger than the maximum integer type supported.
		auto sum = self.toField() + other.toField();

		// Extract the integer bits from the field element, with a carry bit.
		auto bits_le = sum.toBitsLe();
		// Drop the carry bit as the operation is wrapped addition.
		addNewVariableList(bits_le, self.bitSize() + 1);

		// Return the sum of `self` and `other`.
		return toInteger(sum, self.type());
	}
}

__device__ Integer Exectuor::integer_and(const Integer& self, const Integer& other) 
{
	// Stores the bitwise AND of `self` and `other` in `self`.
	auto self_bits_le = self.toBitsLe();
	auto other_bits_le = other.toBitsLe();
	BitArray ret(self_bits_le.size());
	for (int i = 0; i < self_bits_le.size(); i++)
	{
		ret[i] = bit_and(self_bits_le[i], other_bits_le[i]);
    }

	return Integer::fromBitsLe(self.type(), ret);
}

__device__ Boolean Exectuor::integer_is_less_than(const Integer& self, const Integer& other){
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the comparison and return the new constant.
		return Boolean::constant(self < other);
	}
	else if (self.is_signed()) {
		// Compute the less than operation via a sign and overflow check.
		// If sign(a) != sign(b), then a < b, if a is negative and b is positive.
		// If sign(b) == sign(a), then a < b if the carry bit of I::NEG_ONE + a - b + 1 is set.
		Boolean same_sign = bit_is_equal(self.msb(),other.msb());
		Boolean self_is_negative_and_other_is_positive = bit_and(self.msb() , bit_not(other.msb()));
		Field negative_one_plus_difference_plus_one =
			Integer::constant(self.type(), -1).toField() + self.toField() - other.toField()
			+ Field::one();

		auto bits = negative_one_plus_difference_plus_one.toBitsLe();
		addNewVariableList(bits, self.bitSize() + 1);
		return bit_ternary(same_sign, bit_not(bits[self.bitSize()]), self_is_negative_and_other_is_positive);
	}
	else {
		// Compute the less than operation via an overflow check.
		// If Integer::MAX + a - b + 1 overflows, then a >= b, otherwise a < b.
		auto max_plus_difference_plus_one =
			self.max().toField() + self.toField() - other.toField()
			+ Field::one();

		auto bits = max_plus_difference_plus_one.toBitsLe();
		addNewVariableList(bits, self.bitSize() + 1);
		return bit_not(bits[self.bitSize()]);
	}
}

/// Returns `true` if `self` is greater than `other`.
__device__ Boolean Exectuor::integer_is_greater_than(const Integer& self, const Integer& other) {
	return integer_is_less_than(other, self);
}

/// Returns `true` if `self` is less than or equal to `other`.
__device__ Boolean Exectuor::integer_is_less_than_or_equal(const Integer& self, const Integer& other)
{
	return integer_is_greater_than_or_equal(other, self);
}

/// Returns `true` if `self` is greater than or equal to `other`.
__device__ Boolean Exectuor::integer_is_greater_than_or_equal(const Integer& self, const Integer& other) {
	return bit_not(integer_is_less_than(self, other));
}

__device__ Integer Exectuor::integer_div_checked(const Integer& self, const Integer& other) 
{
	if (self.is_constant() && other.is_constant()) {
		// If `other` is a constant and is zero, then halt.
		return self / other;
	}
	else
	{
		// Handle the remaining cases.
		// Note that `other` is either a constant and non-zero, or not a constant.
		if (self.is_signed()) {
			// Ensure that overflow cannot occur in this division.
			// Signed integer division wraps when the dividend is Integer::MIN and the divisor is -1.
			auto min = self.min();
			auto neg_one = Integer::constant(self.type(), -1);
			auto tmp1 = integer_is_equal(self, min);
			auto tmp2 = integer_is_equal(other, neg_one);
			auto overflows = bit_and(tmp1, tmp2);

			// Divide the absolute value of `self` and `other` in the base field.
			// Note that it is safe to use `abs_wrapped`, since the case for console::Integer::MIN is handled above.
			auto unsigned_dividend = integer_abs_wrapped(self).cast_as_dual();
			// Note that `unsigned_divisor` is zero iff `other` is zero.
			auto unsigned_divisor = integer_abs_wrapped(other).cast_as_dual();
			// Note that this call to `div_wrapped` checks that `unsigned_divisor` is not zero.
			auto unsigned_quotient = integer_div_wrapped(unsigned_dividend,unsigned_divisor);
			
			// Note that quotient <= |console::Integer::MIN|, since the dividend <= |console::Integer::MIN| and 0 <= quotient <= dividend.
			auto signed_quotient = Integer::fromBitsLe(self.type(), unsigned_quotient.toBitsLe());
			auto operands_same_sign = bit_is_equal(self.msb(),other.msb());

			return integer_ternary(operands_same_sign, signed_quotient, integer_sub_wrapped(Integer::zero(self.type()), signed_quotient));
		}
		else {
			// Return the quotient of `self` and `other`.
			// Note that this call to `div_wrapped` checks that `unsigned_divisor` is not zero.
			return integer_div_wrapped(self, other);
		}
	}
}

__device__ DivisionRet Exectuor::integer_unsigned_division_via_witness(const Integer& self, const Integer& other)
{
	// Eject the dividend and divisor, to compute the quotient as a witness.
	int var_bitSize = self.bitSize();
	auto dividend_value = self;
	// Note: This band-aid was added to prevent a panic when the divisor is 0.
	Integer divisor_value;
	if (other == Integer::constant(self.type(),0))
		divisor_value = Integer::constant(self.type(), 1);
	else
		divisor_value = other;

	// Overflow is not possible for unsigned integers so we use wrapping operations.
	auto quotient = dividend_value / divisor_value;
	auto remainder = dividend_value % divisor_value;
	addNewVariableList(quotient.toBitsLe(), var_bitSize);
	addNewVariableList(remainder.toBitsLe(), var_bitSize);

	if (2 * var_bitSize < 253) {
		// Ensure that Euclidean division holds for these values in the base field.
		field_mul(quotient.toField() , other.toField());
	}
	else {
		// Ensure that Euclidean division holds for these values as integers.
		auto tmp = integer_mul_checked(quotient, other);
		integer_add_checked(tmp, remainder);
	}

	// Ensure that the remainder is less than the divisor.
	// Note that if this check is satisfied and `other` is an unsigned integer, then `other` is not zero.
	integer_is_less_than(remainder, other);

	// Return the quotient and remainder of `self` and `other`.
	return { quotient, remainder };
}

__device__ Integer Exectuor::integer_div_wrapped(const Integer& self, const Integer& other)
{
	if (self.is_constant() && other.is_constant()) {
		// If `other` is a constant and is zero, then halt.
		return self / other;
	}
	else
	{
		if(self.is_signed()) {
			// Divide the absolute value of `self` and `other` in the base field.
			auto unsigned_dividend = integer_abs_wrapped(self).cast_as_dual();
			// Note that `unsigned_divisor` is zero iff `other` is zero.
			auto unsigned_divisor = integer_abs_wrapped(other).cast_as_dual();
		
			// Note that this call to `div_wrapped` checks that `unsigned_divisor` is not zero.
			auto unsigned_quotient = integer_div_wrapped(unsigned_dividend,unsigned_divisor);

			//  Note that quotient <= |console::Integer::MIN|, since the dividend <= |console::Integer::MIN| and 0 <= quotient <= dividend.
			auto signed_quotient = Integer::fromBitsLe(self.type(), unsigned_quotient.toBitsLe());
			auto operands_same_sign = bit_is_equal(self.msb(),other.msb());

			// Note that this expression handles the wrapping case, where the dividend is `I::MIN` and the divisor is `-1` and the result should be `I::MIN`.
			return integer_ternary(operands_same_sign, signed_quotient, integer_sub_wrapped(Integer::zero(self.type()), signed_quotient));
		}
		else {
			auto ret = integer_unsigned_division_via_witness(self,other);
			return ret.r0;
		}
	}
}

__device__ Boolean Exectuor::integer_is_equal(const Integer& self, const Integer& other)
{
	// Determine if this operation is constant or variable.
	if (self.is_constant() && other.is_constant()) {
		return Boolean::constant(self == other);
	}
	else
	{
		// Instead of comparing the bits of `self` and `other` directly, the integers are
		// converted into a field elements, and checked if they are equivalent as field elements.
		// Note: This is safe as the field is larger than the maximum integer type supported.
		return field_is_equal(self.toField(), other.toField());
	}
}

__device__ Boolean Exectuor::integer_is_not_equal(const Integer& self, const Integer& other) 
{
	return bit_not(integer_is_equal(self,other));
}

__device__ Integer Exectuor::integer_modulo(const Integer& self, const Integer& other) 
{
	return integer_rem_wrapped(self, other);
}

__device__ KaratsubaRet Exectuor::integer_karatsuba_multiply(const Integer& self, const Integer& other)
{
	int var_size = self.bitSize();

	auto this_bits_le = self.toBitsLe();
	auto that_bits_le = other.toBitsLe();
	auto x_1 = Field::fromBitsLe(this_bits_le.mid((var_size / 2)));
	auto x_0 = Field::fromBitsLe(this_bits_le.mid(0, (var_size / 2)));
	auto y_1 = Field::fromBitsLe(that_bits_le.mid((var_size / 2)));
	auto y_0 = Field::fromBitsLe(that_bits_le.mid(0, (var_size / 2)));

	auto z_0 = field_mul(x_0, y_0);
	auto z_2 = field_mul(x_1, y_1);
	auto z_1 = field_mul((x_1 + x_0), (y_1 + y_0)) - z_2 - z_0;

	auto ret = self.toField() * other.toField();
	auto ret_bits = ret.toBitsLe();
	ret_bits.resize(var_size + var_size / 2 + 1);
	addNewVariableList(ret_bits, var_size + var_size / 2 + 1);

	return { toInteger(ret, self.type()), ret_bits.mid(var_size), z_2 };
}

__device__ Integer Exectuor::integer_mul_and_check(const Integer& self, const Integer& other)
{
	int field_size_in_bits = 253;
	int var_size = self.bitSize();

	// Case 1 - 2 integers fit in 1 field element (u8, u16, u32, u64, i8, i16, i32, i64).
	if (2 * var_size < (field_size_in_bits - 1)) {
		// Instead of multiplying the bits of `self` and `other`, witness the integer product.
		auto product = self * other;
		addNewVariableList(product.toBitsLe(), self.bitSize());
		return product;
	}
	// Case 2 - 1.5 integers fit in 1 field element (u128, i128).
	else if ((var_size + var_size / 2) < (field_size_in_bits - 1)) { 
		auto ret = integer_karatsuba_multiply(self, other);
		auto product = ret.r0;
		return product;
	}
	else {
		assert(0);
		return Integer();
	}
}

__device__ Integer Exectuor::integer_mul_checked(const Integer& self, const Integer& other) 
{
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the product and return the new constant.
		return self * other;
	}
	else if( self.is_signed()) {
		// Compute the product of `abs(self)` and `abs(other)`, while checking for an overflow.
		// Note: it is safe to use `abs_wrapped` as we want `Integer::MIN` to be interpreted as an unsigned number.
		auto abs_ret1 = integer_abs_wrapped(self);
		auto abs_ret2 = integer_abs_wrapped(other);
		auto product = integer_mul_and_check(abs_ret1, abs_ret2);

		// If the product should be positive, then it cannot exceed the signed maximum.
		auto operands_same_sign = bit_is_equal(self.msb(),other.msb());
		auto positive_product_overflows = bit_and(operands_same_sign , product.msb());

		// If the product should be negative, then it cannot exceed the absolute value of the signed minimum.
		{
			int var_bitSize = self.bitSize();
			auto lower_product_bits_nonzero = Boolean::constant(false);
			auto product_bits_le = product.toBitsLe();
			for (int i = 0; i < var_bitSize - 1; i++)
			{
				lower_product_bits_nonzero = bit_or(lower_product_bits_nonzero, product_bits_le[i]);
			}

			auto negative_product_lt_or_eq_signed_min =
				bit_or(bit_not(product.msb()) , bit_and(product.msb() , bit_not(lower_product_bits_nonzero)));
			bit_and(bit_not(operands_same_sign), bit_not(negative_product_lt_or_eq_signed_min));
		}

		// Note that the relevant overflow cases are checked independently above.
		// Return the product of `self` and `other` with the appropriate sign.
		return integer_ternary(operands_same_sign, product, integer_sub_wrapped(Integer::zero(self.type()), product));
	}
	else {
		// Compute the product of `self` and `other`, while checking for an overflow.
		return integer_mul_and_check(self, other);
	}
}

__device__ Integer Exectuor::integer_mul_wrapped(const Integer& self, const Integer& other) 
{
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the product and return the new constant.
		return self * other;
	}
	else {
		// Perform multiplication by decomposing it into operations on its upper and lower bits.
		// See this page for reference: https://en.wikipedia.org/wiki/Karatsuba_algorithm.
		// We follow the naming convention given in the `Basic Step` section of the cited page.
		// Note that currently here we perform Babbage multiplication, not Karatsuba multiplication;
		// however, since we do not need to calculate z2 here,
		// Babbage involves three multiplication, same as Karatsuba.
		// For integers with size less than 128, this algorithm saves approximately 0.5 * var_bitSize
		// constraints compared to a field multiplication.
		auto self_bits_le = self.toBitsLe();
		auto other_bits_le = other.toBitsLe();
		int bit_size = self.bitSize();
		Field x_1 = Field::fromBitsLe(self_bits_le.mid(bit_size / 2));
		Field x_0 = Field::fromBitsLe(self_bits_le.mid(0,bit_size / 2));
		Field y_1 = Field::fromBitsLe(other_bits_le.mid(bit_size / 2));
		Field y_0 = Field::fromBitsLe(other_bits_le.mid(0, bit_size / 2));

		Field z_0 = field_mul(x_0 , y_0);

		Field tmp1 = field_mul(x_1, y_0);
		Field tmp2 = field_mul(x_0, y_1);
		Field z_1 = field_add(tmp1 ,tmp2 );

		BitArray b_m_bits = BitArray(bit_size/2,Boolean::constant(false));
		b_m_bits.push_back(Boolean::constant(true));
		auto b_m = Field::fromBitsLe(b_m_bits);
		auto z_0_plus_scaled_z_1 = z_0 + (z_1 * b_m);

		addNewVariableList(z_0_plus_scaled_z_1.toBitsLe(), bit_size + bit_size / 2 + 1);
		
		return toInteger(z_0_plus_scaled_z_1,self.type());
	}
}

__device__ Integer Exectuor::integer_neg(const Integer& self) 
{
	return integer_add_checked(Integer::constant(self.type(), 1), integer_not(self));
}

__device__ Integer Exectuor::integer_not(const Integer& self)
{
	return ~self;
}

__device__ Integer Exectuor::integer_or(const Integer& self, const Integer& other) 
{
	// Stores the bitwise OR of `self` and `other` in `self`.
	auto self_bits_le = self.toBitsLe();
	auto other_bits_le = other.toBitsLe();
	BitArray ret(self_bits_le.size());
	for (int i = 0; i < self_bits_le.size(); i++)
	{
		ret[i] = bit_or(self_bits_le[i], other_bits_le[i]);
	}

	return Integer::fromBitsLe(self.type(), ret);
}

__device__ MulRet Exectuor::integer_mul_with_flags(const Integer& self, const Integer& that)
{
	int var_bitSize = self.bitSize();
	int field_size = 253;
	// Case 1 - 2 integers fit in 1 field element (u8, u16, u32, u64, i8, i16, i32, i64).
	if (2 * var_bitSize < (field_size - 1)) {
		// Instead of multiplying the bits of `self` and `other`, witness the integer product.
		auto product = self * that;
		addNewVariableList(product.toBitsLe(), var_bitSize);

		// Check that the computed product is not equal to witnessed product, in the base field.
		// Note: The multiplication is safe as the field twice as large as the maximum integer type supported.
		auto computed_product = field_mul(self.toField() , that.toField());
		auto witnessed_product = product.toField();
		auto flag = field_is_not_equal(computed_product,witnessed_product);

		// Return the product of `self` and `other` and the overflow flag.
		return { product, flag };
	}
	// Case 2 - 1.5 integers fit in 1 field element (u128, i128).
	else if ((var_bitSize + var_bitSize / 2) < (field_size - 1)) {
		// Use Karatsuba multiplication to compute the product of `self` and `other` and the carry bits.
		auto kara_ret = integer_karatsuba_multiply(self, that);
		auto product = kara_ret.r0;
		auto z_1_upper_bits = kara_ret.r1;
		auto z2 = kara_ret.r2;

		// Reconstruct the upper bits of z_1 in the field.
		Field z_1_upper_field = Field::fromBitsLe(z_1_upper_bits);
		// Compute whether the sum of z_1_field and z_2 is zero.
		Field z_1_upper_field_plus_z_2 = field_add(z_1_upper_field , z2);
		auto flag = field_is_not_equal(z_1_upper_field_plus_z_2, Field::zero());

		// Return the product of `self` and `other` and the overflow flag.
		return { product, flag };
	}
	else {
		assert(0);
		return MulRet();
	}
}

__device__ Integer Exectuor::integer_pow_checked(const Integer& self, const Integer& other) 
{
	int var_bitSize = self.bitSize();
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the result and return the new constant.
		// This cast is safe since `Magnitude`s can only be `u8`, `u16`, or `u32`.
		assert(0);
		return Integer();
	}
	else {
		Integer result = Integer::constant(self.type(), 1);

		// TODO (@pranav) In each step, we check that we have not overflowed,
		//  yet we know that in the first step, we do not need to check and
		//  in general we do not need to check for overflow until we have found
		//  the second bit that has been set. Optimize.
		auto other_bits_le = other.toBitsLe();
		for (int i = other_bits_le.size() - 1; i >= 0; i--)
		{
			auto bit = other_bits_le[i];
			result = integer_mul_checked(result,result);

			Integer result_times_self;
			if (self.is_signed()) 
			{
				// Multiply the absolute value of `self` and `other` in the base field.
				// Note: it is safe to use `abs_wrapped` since we want `Integer::MIN` to be interpreted as an unsigned number.
				auto abs_ret1 = integer_abs_wrapped(result);
				auto abs_ret2 = integer_abs_wrapped(self);
				auto mul_ret = integer_mul_with_flags(abs_ret1, abs_ret2);
				auto product = mul_ret.r0;
				auto overflow = mul_ret.r1;

				// If the product should be positive, then it cannot exceed the signed maximum.
				auto operands_same_sign = bit_is_equal(result.msb(),self.msb());
				auto positive_product_overflows = bit_and(operands_same_sign , product.msb());

				// If the product should be negative, then it cannot exceed the absolute value of the signed minimum.
				Boolean negative_product_underflows;
				auto lower_product_bits_nonzero = Boolean::constant(false);
				auto product_bits = product.toBitsLe().mid(0, var_bitSize);
				for (int i = 0; i < product_bits.size(); i++)
				{
					negative_product_underflows = bit_or(negative_product_underflows, product_bits[i]);
				}
				auto negative_product_lt_or_eq_signed_min = bit_or(
					!product.msb() , bit_and(product.msb() , !lower_product_bits_nonzero));
				negative_product_underflows = bit_and(!operands_same_sign, !negative_product_lt_or_eq_signed_min);
				
				overflow = bit_or(overflow , positive_product_overflows);
				bit_or(overflow , negative_product_underflows);
				bit_and(overflow, bit);

				// Return the product of `self` and `other` with the appropriate sign.
				result_times_self = integer_ternary(operands_same_sign, product, integer_add_wrapped(integer_not(product), Integer::constant(self.type(), 1)));
			}
			else {
				auto mul_ret = integer_mul_with_flags(result, self);
				auto product = mul_ret.r0;
				auto overflow = mul_ret.r1;

				// For unsigned multiplication, check that the overflow flag is not set.
				bit_and(overflow, bit);

				// Return the product of `self` and `other`.
				result_times_self = product;
			}

			result = integer_ternary(bit, result_times_self, result);
		}
		return result;
	}
}

__device__ Integer Exectuor::integer_pow_wrapped(const Integer& self, const Integer& other) 
{
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the result and return the new constant.
		// This cast is safe since Magnitude other can only be `u8`, `u16`, or `u32`.
		assert(0);
		return Integer();
	}
	else {
		auto result = Integer::constant(self.type(),1);
		auto bits_le = other.toBitsLe();
		for (int i = bits_le.size() - 1; i >= 0; i--) 
		{
			auto bit = bits_le[i];
			result = integer_mul_wrapped(result,result);
			result = integer_ternary(bit, integer_mul_wrapped(result,self), result);
		}
		return result;
	}
}

__device__ Integer Exectuor::integer_rem_checked(const Integer& self, const Integer& other) 
{
	if (self.is_constant() && other.is_constant()) {
		return self % other;
	}
	else
	{
		// Handle the remaining cases.
		// Note that `other` is either a constant and non-zero, or not a constant.
		if (self.is_signed()) {
			// Ensure that overflow cannot occur when computing the associated division operations.
			// Signed integer division overflows when the dividend is Integer::MIN and the divisor is -1.
			auto min = self.min();
			auto neg_one = Integer::constant(self.type(), - 1);
			auto overflows = bit_and(integer_is_equal(self,min) , integer_is_equal(other,neg_one));

			// Divide the absolute value of `self` and `other` in the base field.
			auto unsigned_dividend = integer_abs_wrapped(self).cast_as_dual();
			// Note that `unsigned_divisor` is zero iff `other` is zero.
			auto unsigned_divisor = integer_abs_wrapped(other).cast_as_dual();
			// Note that this call to `rem_wrapped` checks that `unsigned_divisor` is not zero.
			auto unsigned_remainder = integer_rem_wrapped(unsigned_dividend,unsigned_divisor);

			/*
			Integer signed_remainder = Self{ bits_le: unsigned_remainder.bits_le, phantom : Default::default() };

			// The remainder takes on the same sign as `self` because the division operation rounds towards zero.
			return integer_ternary(bit_not(self.msb()), signed_remainder, &Self::zero().sub_wrapped(&signed_remainder));
			*/
			return Integer();
		}
		else {
			// Return the remainder of `self` and `other`.
			// Note that this call to `rem_wrapped` checks that `unsigned_divisor` is not zero.
			return integer_rem_wrapped(self, other);
		}
	}
}

__device__ Integer Exectuor::integer_rem_wrapped(const Integer& self, const Integer& other) 
{
	if(self.is_constant() && other.is_constant()) {
		return self % other;
	}
	else
	{
		if (self.is_signed()) {
			// Divide the absolute value of `self` and `other` in the base field.
			auto unsigned_dividend = integer_abs_wrapped(self).cast_as_dual();
			// Note that `unsigned_divisor` is zero iff `other` is zero.
			auto unsigned_divisor = integer_abs_wrapped(other).cast_as_dual();

			// Note that this call to `rem_wrapped` checks that `unsigned_divisor` is not zero.
			auto unsigned_remainder = integer_rem_wrapped(unsigned_dividend, unsigned_divisor);
			
			auto signed_remainder = Integer::fromBitsLe(self.type(), unsigned_remainder.toBitsLe());

			// The remainder takes on the same sign as `self` because the division operation rounds towards zero.
			return integer_ternary(bit_not(self.msb()), signed_remainder, integer_sub_wrapped(Integer::zero(self.type()), signed_remainder));
		}
		else {
			auto ret = integer_unsigned_division_via_witness(self, other);
			return ret.r1;
		}
	}
}

__device__ Integer Exectuor::integer_shl_checked(const Integer& self, const Integer& rhs) 
{
	int var_bitSize = self.bitSize();
	int field_size = 253;

	// Retrieve the index for the first upper bit from the RHS that we mask.
	auto first_upper_bit_index = log2f(var_bitSize);
	// Initialize a constant `two`.
	Integer two = Integer::constant(self.type(), 2);
	if( self.is_signed()) 
	{
		if (3 * var_bitSize < field_size) {
			// Enforce that the upper bits of `rhs` are all zero.

			// Sign-extend `self` to 2 * var_bitSize.
			auto bits_le = self.toBitsLe();
			bits_le.resize(2 * var_bitSize, self.msb());

			// Calculate the result directly in the field.
			// Since 2^{rhs} < Integer::MAX and 3 * var_bitSize is less than E::BaseField::size in data bits,
			// we know that the operation will not overflow the field modulus.
			auto result = Field::fromBitsLe(bits_le);
			auto rhs_bits_le = rhs.toBitsLe();
			for (int i = 0; i < first_upper_bit_index; i++) {
				// In each iteration, multiple the result by 2^(1<<i), if the bit is set.
				// Note that instantiating the field from a u128 is safe since it is larger than all eligible integer types.
				auto bit = rhs_bits_le[i];
				auto constant = Field::constant((uint256_t)pow(2,1 << i));
				auto product = field_mul(result , constant);
				result = field_ternary(bit, product, result);
			}
			
			// Extract the bits of the result, including the carry bits.
			bits_le = result.toBitsLe();
			addNewVariableList(bits_le,3 * var_bitSize);

			// Split the bits into the lower and upper bits.
			auto lower_bits_le = bits_le.mid(0,var_bitSize);
			// Initialize the integer from the lower bits.
			auto ret = Integer::fromBitsLe(self.type(), lower_bits_le);
			// Ensure that the sign of the first var_bitSize upper bits match the sign of the result.
			
			// Return the result.
			return ret;
		}
		else {
			// Compute 2 ^ `rhs` as unsigned integer of the size var_bitSize.
			// This is necessary to avoid a spurious overflow when `rhs` is var_bitSize - 1.
			// For example, 2i8 ^ 7i8 overflows, however -1i8 << 7i8 ==> -1i8 * 2i8 ^ 7i8 ==> -128i8, which is a valid i8 value.
			auto unsigned_two = two.cast_as_dual();
			// Note that `pow_checked` is used to enforce that `rhs` < var_bitSize.
			auto unsigned_factor = integer_pow_checked(unsigned_two,rhs);
			// For all values of `rhs` such that `rhs` < var_bitSize,
			//  - if `rhs` == var_bitSize - 1, `signed_factor` == I::MIN,
			//  - otherwise, `signed_factor` is the same as `unsigned_factor`.
			auto signed_factor = Integer::fromBitsLe(self.type() , unsigned_factor.toBitsLe());

			// If `signed_factor` is I::MIN, then negate `self` in order to balance the sign of I::MIN.
			auto signed_factor_is_min = integer_is_equal(signed_factor,self.min());
			auto lhs = integer_ternary(signed_factor_is_min, integer_sub_wrapped(Integer::zero(self.type()),self), self);

			// Compute `lhs` * `factor`, which is equivalent to `lhs` * 2 ^ `rhs`.
			return integer_mul_checked(lhs, signed_factor);
		}
	}
	else
	{
		if (2 * var_bitSize < field_size) {
			// Enforce that the upper bits of `rhs` are all zero.

			// Calculate the result directly in the field.
			// Since 2^{rhs} < Integer::MAX and 2 * var_bitSize is less than E::BaseField::size in data bits,
			// we know that the operation will not overflow Integer::MAX or the field modulus.
			auto result = self.toField();
			auto rhs_bits_le = rhs.toBitsLe();
			for (int i = 0; i < rhs_bits_le.size(); i++) {
				// In each iteration, multiple the result by 2^(1<<i), if the bit is set.
				// Note that instantiating the field from a u128 is safe since it is larger than all eligible integer types.
				auto bit = rhs_bits_le[i];
				auto constant = Field::constant((uint256_t)pow(2, 1 << i));
				auto product = field_mul(result, constant);
				result = field_ternary(bit, product, result);
			}
			// Extract the bits of the result, including the carry bits.
			auto bits_le = result.toBitsLe();
			addNewVariableList(bits_le,2 * var_bitSize);
			// Split the bits into the lower and upper bits.
			auto lower_bits_le = bits_le.mid(var_bitSize);
			// Ensure that the carry bits are all zero.
			
			// Initialize the integer from the lower bits
			return Integer::fromBitsLe(self.type(), lower_bits_le);
		}
		else {
			// Compute `lhs` * 2 ^ `rhs`.
			return integer_mul_checked(self, integer_pow_checked(two, rhs));
		}
	}
}

__device__ Integer Exectuor::integer_shl_wrapped(const Integer& self, const Integer& rhs)
{
	int var_bitSize = self.bitSize();
	int field_size = 253;

	// Determine the variable mode.
	if (self.is_constant() && rhs.is_constant()) {
		// Note: Casting `rhs` to a `u32` is safe since `Magnitude`s can only be `u8`, `u16`, or `u32`.
		return self.wrapping_shl(rhs);
	}
	else {
		// Retrieve the index for the first upper bit from the RHS that we mask.
		auto first_upper_bit_index = log2f(var_bitSize);

		// Perform the left shift operation by exponentiation and multiplication.
		// By masking the upper bits, we have that rhs < var_bitSize.
		// Therefore, 2^{rhs} < Integer::MAX.

		// Zero-extend `rhs` by `8`.
		auto rhs_bits_le = rhs.toBitsLe();
		auto bits_le = rhs_bits_le.mid(0,first_upper_bit_index);
		bits_le.append(BitArray(8,Boolean::constant(false)));

		// Use U8 for the exponent as it costs fewer constraints.

		if (rhs.is_constant()) {
			// If the shift amount is a constant, then we can manually shift in bits and truncate the result.
			return self.wrapping_shl(rhs);
		}
		else if (2 * var_bitSize < field_size) {
			// Calculate the result directly in the field.
			// Since 2^{rhs} < Integer::MAX and 2 * var_bitSize is less than E::BaseField::size in data bits,
			// we know that the operation will not overflow Integer::MAX or the field modulus.
			auto result = self.toField();
			auto rhs_bits_le = rhs.toBitsLe();
			for (int i = 0; i < first_upper_bit_index; i++) {
				// In each iteration, multiple the result by 2^(1<<i), if the bit is set.
				// Note that instantiating the field from a u128 is safe since it is larger than all eligible integer types.
				auto bit = rhs_bits_le[i];
				auto constant = Field::constant((uint256_t)pow(2, 1 << i));
				auto product = field_mul(result, constant);
				result = field_ternary(bit, product, result);
			}
			// Extract the bits of the result, including the carry bits.
			auto bits_le = result.toBitsLe();
			addNewVariableList(bits_le,2 * var_bitSize);
			// Initialize the integer, ignoring the carry bits.
			return Integer::fromBitsLe(self.type(), bits_le.mid(0, var_bitSize));
		}
		else {
			// Calculate the value of the shift directly in the field.
			// Since 2^{rhs} < Integer::MAX, we know that the operation will not overflow Integer::MAX or the field modulus.
			auto two = Field::one() + Field::one();
			auto shift_in_field = Field::one();
			auto rhs_bits_le = rhs.toBitsLe();
			for (int i = first_upper_bit_index - 1; i >= 0; i--) {
				auto bit = rhs_bits_le[i];
				shift_in_field = field_square(shift_in_field);
				shift_in_field = field_ternary(bit, field_mul(shift_in_field , two), shift_in_field);
			}
			// TODO (@pranav) Avoid initializing the integer.
			auto shift_in_field_bits = shift_in_field.toBitsLe();
			addNewVariableList(shift_in_field_bits,var_bitSize);
			auto shift_as_multiplicand = Integer::fromBitsLe(self.type(), shift_in_field_bits.mid(0, var_bitSize));
			return integer_mul_wrapped(self, shift_as_multiplicand);
		}
	}
}

__device__ Integer Exectuor::integer_shr_checked(const Integer& self, const Integer& rhs)
{
	// Determine the variable mode.
	if (self.is_constant() && rhs.is_constant()) {
		// This cast is safe since `Magnitude`s can only be `u8`, `u16`, or `u32`.
		return self.wrapping_shr(rhs);
	}
	else {
		// Determine the index where the first upper bit of the RHS must be zero.
		// There is at least one trailing zero, as var_bitSize = 8, 16, 32, 64, or 128.

		// Check that the upper bits of the RHS are nonzero.

		// Perform a wrapping shift right.
		return integer_shr_wrapped(self, rhs);
	}
}

__device__ Integer Exectuor::integer_shr_wrapped(const Integer& self, const Integer& rhs)
{
	int var_bitSize = self.bitSize();
	int field_size = 253;

	// Determine the variable mode.
	if (self.is_constant() && rhs.is_constant()) {
		// Note: Casting `rhs` to `u32` is safe since `Magnitude`s can only be `u8`, `u16`, or `u32`.
		return self.wrapping_shr(rhs);
	}
	else {
		// Retrieve the index for the first upper bit from the RHS that we mask.
		auto first_upper_bit_index = log2f(var_bitSize);

		// Perform the right shift operation by exponentiation and multiplication.
		// By masking the upper bits, we have that rhs < var_bitSize.
		// Therefore, 2^{rhs} < Integer::MAX.
		
		if (rhs.is_constant()) {
			// If the shift amount is a constant, then we can manually shift in bits and truncate the result.
			BitArray lower_rhs_bits = self.toBitsLe().mid(0, first_upper_bit_index);
			lower_rhs_bits.resize(8, Boolean::constant(false));

			uint32_t shift_amount = (int64_t)Integer::fromBitsLe(Type::u8, lower_rhs_bits).value();

			BitArray bits_le = self.toBitsLe();

			if (self.is_signed()) {
				// Sign-extend `self` by `shift_amount`.
				bits_le.append(BitArray(shift_amount, self.msb()));
			}
			else
			{
				// Zero-extend `self` by `shift_amount`.
				bits_le.append(BitArray(shift_amount, Boolean::constant(false)));
			}

			bits_le.reverse();
			bits_le.resize(var_bitSize);
			bits_le.reverse();

			return Integer::fromBitsLe(self.type(), bits_le);
		}
		else if (2 * var_bitSize < field_size) {
			if (self.is_signed()) {
				// Initialize the msb of `self` as a field element.
				auto msb_field = Field::from_boolean(self.msb());

				// The signed right-shift is implemented as an unsigned right-shift followed by a sign-extension.
				// Initialize the result from the reversed bits of `self`.
				auto result = Field::fromBitsBe(self.toBitsLe());

				// Calculate the result directly in the field.
				// Since 2^{rhs} < Integer::MAX and 2 * var_bitSize is less than E::BaseField::size in data bits,
				// we know that the operation will not overflow the field modulus.
				auto rhs_bits_le = rhs.toBitsLe();
				for (int i = 0; i < first_upper_bit_index; i++) {
					// In each iteration, multiply the result by 2^(1<<i), if the bit is set.
					// Note that instantiating the field from a u128 is safe since it is larger than all eligible integer types.
					auto bit = rhs_bits_le[i];
					auto constant = Field::constant((uint256_t)pow(2, 1 << i));
					auto product = field_mul(result , constant);

					// If `self` is negative, mask the value with 2^{1<<i} - 1.
					// For example, in the first, second, and third iterations, the mask is 0b1, 0b11, and 0b111, respectively.
					// This serves to appropriately sign-extend the result.
					auto mask = Field::constant((uint256_t)pow(2, 1 << i) - 1);
					auto masked = field_add(product, field_mul(mask , msb_field));

					result = field_ternary(bit, masked, result);
				}

				// Extract the bits of the result, including the carry bits.
				auto bits_le = result.toBitsLe();
				addNewVariableList(bits_le,2 * var_bitSize);
				bits_le = bits_le.mid(0, var_bitSize);

				// Reverse the bits.
				bits_le.reverse();

				// Initialize the integer, ignoring the carry bits.
				return Integer::fromBitsLe(self.type(), bits_le);
			}
			else {
				// The unsigned right-shift is implemented as a left-shift over the reversed bits of `self`.
				// Initialize the result from the reversed bits of `self`.
				auto result = Field::fromBitsBe(self.toBitsLe());
				// Calculate the result directly in the field.
				// Since 2^{rhs} < Integer::MAX and 2 * var_bitSize is less than E::BaseField::size in data bits,
				// we know that the operation will not overflow the field modulus.
				auto rhs_bits_le = rhs.toBitsLe();
				for (int i = 0; i < first_upper_bit_index; i++) {
					// In each iteration, multiply the result by 2^(1<<i), if the bit is set.
					// Note that instantiating the field from a u128 is safe since it is larger than all eligible integer types.
					auto bit = rhs_bits_le[i];
					auto constant = Field::constant((uint256_t)pow(2, 1 << i));
					auto product = field_mul(result, constant);
					result = field_ternary(bit, product, result);
				}
				
				// Extract the bits of the result, including the carry bits.
				auto bits_le = result.toBitsLe();
				addNewVariableList(bits_le, 2 * var_bitSize);
				bits_le = bits_le.mid(0,var_bitSize);

				// Reverse the bits.
				bits_le.reverse();
				// Initialize the integer, ignoring the carry bits.
				return Integer::fromBitsLe(self.type(), bits_le);
			}
		}
		else {
			// Calculate the value of the shift directly in the field.
			// Since 2^{rhs} < Integer::MAX, we know that the operation will not overflow Integer::MAX or the field modulus.
			auto two = Field::one() + Field::one();
			// Note that `shift_in_field` is always greater than zero and does not wrap around the field modulus.
			auto shift_in_field = Field::one();
			auto rhs_bits_le = rhs.toBitsLe();
			for(int i = first_upper_bit_index - 1; i >= 0; i--) {
				auto bit = rhs_bits_le[i];
				shift_in_field = field_square(shift_in_field);
				shift_in_field = field_ternary(bit, field_mul(shift_in_field , two), shift_in_field);
			}

			// TODO (@pranav) Avoid initializing the integer.
			auto shift_in_field_bits = shift_in_field.toBitsLe();
			addNewVariableList(shift_in_field_bits, var_bitSize);
			auto shift_as_divisor = Integer::fromBitsLe(self.type(), shift_in_field_bits);

			if (self.is_signed()) {
				// Divide the absolute value of `self` and `shift` (as a divisor) in the base field.
				auto unsigned_divided = integer_abs_wrapped(self).cast_as_dual();
				// Note that `unsigned_divisor` is greater than zero since `shift_in_field` is greater than zero.
				auto unsigned_divisor = shift_as_divisor.cast_as_dual();

				// Compute the quotient and remainder using wrapped, unsigned division.
				// Note that we do not invoke `div_wrapped` since we need the quotient AND the remainder.
				auto div_ret = integer_unsigned_division_via_witness(unsigned_divided, unsigned_divisor);
				auto unsigned_quotient = div_ret.r0;
				auto unsigned_remainder = div_ret.r1;
					
				// Note that quotient <= |console::Integer::MIN|, since the dividend <= |console::Integer::MIN| and 0 <= quotient <= dividend.
				auto quotient = Integer::fromBitsLe(self.type(), unsigned_quotient.toBitsLe());
				auto negated_quotient = integer_add_wrapped(integer_not(quotient),Integer::constant(self.type(),1));
				
				auto tmp1 = field_is_equal(unsigned_remainder.toField(), Field::zero());
				auto tmp2 = integer_sub_wrapped(negated_quotient, Integer::constant(self.type(), 1));

				// Arithmetic shift uses a different rounding mode than division.
				auto rounded_negated_quotient = integer_ternary(
					tmp1,
					negated_quotient,
					tmp2);

				return integer_ternary(self.msb(), rounded_negated_quotient, quotient);
			}
			else {
				return integer_div_wrapped(self, shift_as_divisor);
			}
		}
	}
}

__device__ Integer Exectuor::integer_sub_checked(const Integer& self, const Integer& other) 
{
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the difference and return the new constant.
		return self - other;
	}
	else {
		// Instead of subtracting the bits of `self` and `other` directly, the integers are
		// converted into a field elements, and subtracted, before converting back to integers.
		// Note: This is safe as the field is larger than the maximuminteger type supported.
		auto difference = self.toField() + integer_not(other).toField() + Field::one();
		addNewVariableList(difference.toBitsLe(), self.bitSize() + 1);

		// Check for underflow.
		if (self.is_signed()) {			
			auto is_different_signs = bit_is_not_equal(self.msb(),other.msb());
			auto is_underflow = bit_and(is_different_signs ,bit_is_equal(difference.msb(),other.msb()));
		}

		// Return the difference of `self` and `other`.
		return toInteger(difference,self.type());
	}
}

__device__ Integer Exectuor::integer_sub_wrapped(const Integer& self, const Integer& other) {
	// Determine the variable mode.
	if (self.is_constant() && other.is_constant()) {
		// Compute the difference and return the new constant.
		return self - other;
	}
	else {
		// Instead of subtracting the bits of `self` and `other` directly, the integers are
		// converted into field elements to perform the operation, before converting back to integers.
		// Note: This is safe as the field is larger than the maximum integer type supported.
		auto difference = self.toField() + integer_not(other).toField() + Field::one();

		// Extract the integer bits from the field element, with a carry bit.
		auto bits_le = difference.toBitsLe();
		addNewVariableList(bits_le, self.bitSize() + 1);

		// Return the difference of `self` and `other`.
		return toInteger(difference,self.type());
	}
}

__device__ Integer Exectuor::integer_ternary(Boolean condition, const Integer& self, const Integer& other)
{
	// Constant `condition`
	if (condition.is_constant()) {
		return condition.value ? self : other;
	}
	// Variables
	else {
		// Directly instantiate the integer, rather than invoking `from_bits_le`
		// since the modes of each individual bit varies depending on the modes
		// and values of `condition`, `first_bit`, and `second_bit`.
		BitArray self_bits = self.toBitsLe();
		BitArray other_bits = other.toBitsLe();
		for(int i = 0; i < self_bits.size(); i++)
		{
			self_bits[i] = bit_ternary(condition, self_bits[i], other_bits[i]);
		}
		return Integer::fromBitsLe(self.type(), self_bits);
	}
}

__device__ Integer Exectuor::integer_xor(const Integer& self, const Integer& other) 
{
	// Stores the bitwise XOR of `self` and `other` in `self`.
	auto self_bits_le = self.toBitsLe();
	auto other_bits_le = other.toBitsLe();
	BitArray ret(self_bits_le.size());
	for (int i = 0; i < self_bits_le.size(); i++)
	{
		ret[i] = bit_xor(self_bits_le[i], other_bits_le[i]);
	}

	return Integer::fromBitsLe(self.type(), ret);
}

__device__ Field Exectuor::field_add(const Field& self, const Field& other)
{
	return self + other;
}

__device__ Boolean Exectuor::field_is_less_than(const Field& self, const Field& other)
{
	BitArray self_bits = self.toBitsLe();
	BitArray other_bits = other.toBitsLe();
	Boolean is_less_than = Boolean::constant(false);

	// Case 1: Constant < Constant
	if(self.is_constant() && other.is_constant()) {
		return Boolean::constant(self.b < other.b);
	}
	// Case 2: Constant < Variable
	else if(self.is_constant()) {
		// See the `else` case below for the truth table and description of the logic.
		to_unique_bits_le(other);
		for (int i = 0; i < self_bits.size(); i++)
		{
			auto bit_this = self_bits[i];
			auto bit_that = other_bits[i];
			if (bit_this.value)
			{
				is_less_than = bit_and(bit_that, is_less_than);
			}
			else
			{
				is_less_than = bit_or(bit_that, is_less_than);
			}
		}
		return is_less_than;
	}
	// Case 3: Variable < Constant
	else if (other.is_constant()) {
		// See the `else` case below for the truth table and description of the logic.
		to_unique_bits_le(self);
		for (int i = 0; i < self_bits.size(); i++)
		{
			auto bit_this = self_bits[i];
			auto bit_that = other_bits[i];
			if (bit_that.value)
			{
				is_less_than = bit_or(bit_not(bit_this), is_less_than);
			}
			else
			{
				is_less_than = bit_and(bit_not(bit_this), is_less_than);
			}
		}
		return is_less_than;
	}
	// Case 4: Variable < Variable
	else {
		to_unique_bits_le(self);
		to_unique_bits_le(other);
		for (int i = 0; i < self_bits.size(); i++)
		{
			auto bit_this = self_bits[i];
			auto bit_that = other_bits[i];
			is_less_than = bit_ternary(bit_xor(bit_this, bit_that), bit_that, is_less_than);
		}
		return is_less_than;
	}
}

__device__ Boolean Exectuor::field_is_greater_than(const Field& self, const Field& other)
{
	return field_is_less_than(other, self);
}

__device__ Boolean Exectuor::field_is_less_than_or_equal(const Field& self, const Field& other)
{
	return field_is_greater_than_or_equal(other, self);
}

__device__ Boolean Exectuor::field_is_greater_than_or_equal(const Field& self, const Field& other)
{
	return bit_not(field_is_less_than(self, other));
}

__device__ Field Exectuor::field_div(const Field& self, const Field& other)
{
	return field_mul(self, field_inverse(other));
}

__device__ Field Exectuor::field_div_unchecked(const Field& self, const Field& other)
{
	if (self.is_constant() && other.is_constant()) {
		return self * other.inverse();
	}
	else
	{
		// Otherwise, we can perform division with 1 constraint by using a `quotient` witness,
		// and ensuring that `quotient * other == self`.

		// Construct the quotient as a witness.
		Field quotient;
		inWitness(true);
		
		// Note: This band-aid was added to prevent a panic when `other` is zero.
		if (other.b == 0) 
		{ 
			quotient = field_div(self, Field::one());
		}
		else 
		{
			quotient = field_div(self , other);
		};
		inWitness(false);
		addNewVariable(quotient);

		// Ensure the quotient is correct by enforcing:
		// `quotient * other == self`.
		//E::enforce(|| (&quotient, other, self));

		// Return the quotient.
		return quotient;
	}
}

__device__ Field Exectuor::field_double(const Field& self)
{
	return field_add(self, self);
}

__device__ Boolean Exectuor::field_is_equal(const Field& self, const Field& other)
{
	return bit_not(field_is_not_equal(self, other));
}

__device__ Boolean Exectuor::field_is_not_equal(const Field& self, const Field& other)
{
	// Initialize a (console) boolean that is `true` if `this` and `that` are not equivalent.
	bool is_neq_ejected = self.b != other.b;

	if (self.is_constant() && other.is_constant()) 
	{
		return Boolean::constant(is_neq_ejected);
	}
	else
	{
		if (is_neq_ejected)
			addNewVariable(Field::create(Private,1));
		else
			addNewVariable(Field::create(Private,0));
			

		// Compute `self` - `other`.
		auto delta = self - other;

		// Assign the expected multiplier as a witness.
		//
		// Note: the inverse of `delta` is not guaranteed to exist, and if it does not,
		// we pick 1 as the multiplier, as its value is irrelevant to satisfy the constraints.
		if(delta.b == 0)
			addNewVariable(Field::create(Private, 1));
		else
			addNewVariable(delta.inverse());

		return Boolean::create(Private, is_neq_ejected);
	}
}

__device__ Field Exectuor::field_inverse(const Field& self)
{
	Field inverse = self.inverse();
	if (!inverse.is_constant())
	{
		inverse.mode = Private;
		addNewVariable(inverse);
	}

	return inverse;
}

__device__ Field Exectuor::field_mul(const Field& self, const Field& other)
{
	if (self.is_constant() || other.is_constant()) {
		return self * other;
	}
	else
	{
		auto ret = self * other;
		addNewVariable(ret);
		return ret;
	}
}

__device__ Field Exectuor::field_neg(const Field& self) 
{
	auto ret = self;
	ret.b = (*P::MODULUS - ret.b) % *P::MODULUS;
	return ret;
}

__device__ Field Exectuor::field_pow(const Field& self, const Field& exponent)
{
	// Initialize the output.
	Field output = Field::one();

	auto bits_be = exponent.toBitsBe();
	// If the exponent is a constant, eject its bits to determine whether to multiply in each iteration.
	if (exponent.is_constant()) {
		for (int i = 0; i < bits_be.size(); i++) {
			auto bit = bits_be[i];
			// Square the output.
			output = field_square(output);
			// If `bit` is `true, set the output to `output * self`.
			if (bit.value) {
				output = field_mul(output,self);
			}
		}
	}
	// If the exponent is a variable, use a ternary to select whether to multiply in each iteration.
	else {
		to_unique_bits_le(exponent);
		for (int i = 0; i < bits_be.size(); i++) {
			auto bit = bits_be[i];
			// Square the output.
			output = field_square(output);
			// If `bit` is `true, set the output to `output * self`.
			output = field_ternary(bit, field_mul(output , self), output);
		}
	}

	return output;
}

__device__ Field Exectuor::field_square(const Field& self)
{
	return field_mul(self, self);
}

__device__ Field Exectuor::field_square_root(const Field& self)
{
	/*
	let square_root : Field<E> = witness!(| self | match self.square_root() {
		Ok(square_root) = > square_root,
			_ = > console::Field::zero(),
	});

	// Ensure `square_root` * `square_root` == `self`.
	E::enforce(|| (&square_root, &square_root, self));

	// Define the MODULUS_MINUS_ONE_DIV_TWO as a constant.
	let modulus_minus_one_div_two = match E::BaseField::from_bigint(E::BaseField::modulus_minus_one_div_two()) {
		Some(modulus_minus_one_div_two) = > Field::constant(console::Field::new(modulus_minus_one_div_two)),
			None = > E::halt("Failed to initialize MODULUS_MINUS_ONE_DIV_TWO as a constant"),
	};

	// Ensure that `square_root` is less than or equal to (MODULUS - 1) / 2.
	// This ensures that the resulting square root is unique.
	let is_less_than_or_equal = square_root.is_less_than_or_equal(&modulus_minus_one_div_two);
	E::assert(is_less_than_or_equal);

	square_root
	*/
	return Field();
}

__device__ Field Exectuor::field_sub(const Field& self, const Field& other)
{
	return field_add(self,field_neg(other));
}
__device__ Field Exectuor::field_ternary(Boolean condition, const Field& first, const Field& second)
{
	// Constant `condition`
	if (condition.is_constant()) {
		return condition.value ? first : second;
	}
	// Constant `first` and `second`
	else if(first.is_constant() && second.is_constant()) {
		auto f_not_condition = Field::from_boolean(bit_not(condition));
		auto f_condition = Field::from_boolean(condition);
		auto ret1 = field_mul(f_condition , first);
		auto ret2 = field_mul(f_not_condition, second);
		return condition.value ? ret1 : ret2;
	}
	// Variables
	else {
		// Initialize the witness.
		auto ret = condition.value ? first : second;
		ret.mode = Private;
		if (!first.is_constant() || !second.is_constant())
		{
			addNewVariable(ret);
		}
		return ret;
	}
}

__device__ CuVariant Exectuor::field_cassLossy(const Field& self, int type_id)
{
	to_unique_bits_le(self);

	if (typeIsInteger(type_id))
	{
		return CuVariant::fromInteger(Integer::fromField(type_id, self));
	}

	assert(0);
	return CuVariant();
}

__device__ Group Exectuor::group_ternary(Boolean condition, const Group& first, const Group& second)
{
	Group g;
	g.x = field_ternary(condition, first.x, second.x);
	g.y = field_ternary(condition, first.y, second.y);
	return g;
}

__device__ Group Exectuor::group_add(const Group& self, const Group& other)
{
	// If `self` is constant *and* `self` is zero, then return `other`.
	if (self.is_constant() && self.is_zero()) {
		return other;
	}
	// If `other` is constant *and* `other` is zero, then return `self`.
	else if (other.is_constant() && other.is_zero()) {
		return self;
	}
	// Otherwise, compute the sum of `self` and `other`.
	else 
	{
		// This swap reduces the number of constants by one.
		Group g_this, g_that;
		if (other.is_constant())
		{
			g_this = self;
			g_that = other;
		}
		else
		{
			g_this = other;
			g_that = self;
		}

		auto a = Field::constant(*P::EDWARDS_A);
		auto d = Field::constant(*P::EDWARDS_D);
		Field r0, r1;

		// Compute U = (-A * x1 + y1) * (x2 + y2)
		//auto u1 = field_mul(g_this.x,field_neg(a)) + g_this.y;
		r0 = field_neg(a);
		r1 = field_mul(g_this.x, r0);
		auto u1 = r1 + g_this.y;

		auto u2 = g_that.x + g_that.y;
		auto u = field_mul(u1 , u2);

		// Compute v0 = x1 * y2
		auto v0 = field_mul(g_this.x , g_that.y);

		// Compute v1 = x2 * y1
		auto v1 = field_mul(g_that.x , g_this.y);

		// Compute v2 = d * v0 * v1
		//auto v2 = field_mul(field_mul(v0 , v1) , d);
		r0 = field_mul(v0, v1);
		auto v2 = field_mul(r0, d);

		inWitness(true);
		// Assign x3 = (v0 + v1) / (v2 + 1).
		//Field x3 = field_div((v0 + v1) , (v2 + Field::one()));
		r0 = (v0 + v1);
		r1 = (v2 + Field::one());
		Field x3 = field_div(r0, r1);
		// Assign y3 = (U + a * v0 - v1) / (1 - v2).
		//Field y3 = field_div((u + field_mul(v0 , a) - v1) , (Field::one() - v2));
		r0 = field_mul(v0, a);
		r1 = u + r0;
		r0 = r1 - v1;
		r1 = (Field::one() - v2);
		Field y3 = field_div(r0, r1);
		inWitness(false);
		if(!x3.is_constant())
			addNewVariable(x3);
		if(!y3.is_constant())
			addNewVariable(y3);

		/*
		// Ensure x3 is well-formed.
		// x3 * (v2 + 1) = v0 + v1
		auto v2_plus_one = v2 + Field::one();
		auto v0_plus_v1 = v0 + v1;

		// Ensure y3 is well-formed.
		// y3 * (1 - v2) = u + (a * v0) - v1
		auto one_minus_v2 = Field::one() - v2;
		auto a_v0 = v0 * a;
		auto u_plus_a_v0_minus_v1 = u + a_v0 - v1;
		*/

		Group ret;
		ret.x = x3;
		ret.y = y3;
		return ret;
	}
}

__device__ void Exectuor::to_unique_bits_le(Field in)
{
	if (in.is_constant())
		return;

	in.mode = Private;
	auto bits = in.toBitsLe();
	addNewVariableList(bits,bits.size());

	Field f = Field::constant(*P::MODULUS - 1);
	auto modulus_minus_one = f.toBitsLe();

	auto rest_is_less = Boolean::constant(false);
	for (int i = 0; i < modulus_minus_one.size(); i++)
	{
		auto f_this = modulus_minus_one[i];
		auto f_that = bits[i];

		if (f_this.value)
		{
			f_that = bit_and(f_that, rest_is_less);
		}
		else 
		{ 
			f_that = bit_or(f_that, rest_is_less);
		}
		rest_is_less = f_that;
	}
}

__device__ void Exectuor::instCastLossy(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);

	int out_type = m_program->regType[inst->outOp];
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{

	}
	else if (typeIsInteger(a.typeId()))
	{
	}
	else if (a.typeId() == Type::field)
	{
		out = field_cassLossy(a.toField(), out_type);
	}
	setReg(inst->outOp, out);
}

__device__ BitArray Exectuor::inputToBitsLe(const CuVariant& v)
{
	// Compute the bits of the literal.
	BitArray bits;
	bits.push_back(Boolean::constant(false));
	bits.push_back(Boolean::constant(false));

	int v_type = variantType(v);
	Integer rust_type = Integer::constant(Type::u8, (int64_t)toRustType(v_type));
	Integer size_in_bits = Integer::constant(Type::u16, (int64_t)typeBitSize(v_type));
	BitArray var_bits = variantToBitsLe(v);

	bits.append(rust_type.toBitsLe());
	bits.append(size_in_bits.toBitsLe());
	bits.append(var_bits);

	return bits;
}

__device__ CuVector<Field> Exectuor::inputToFields(const CuVariant& v)
{
	BitArray bits = inputToBitsLe(v);
	bits.push_back(Boolean::constant(true));

	// Extend the vector with the bits of the literal.
	CuVector<Field> ret;
	for (int i = 0; i < bits.size(); i += 252)
	{
		Field f = Field::fromBitsLe(bits.mid(i, 252));
		ret.push_back(f);
	}
	return ret;
}

__device__ void Exectuor::exec(const Instruction* inst)
{
	for (int i = 0; i < inst->inOpSize; i++)
	{
		assert(m_regs[inst->inOp[i]].typeId() != Type::none);
	}

	int pre_variable_size = m_newVariable.size();
	if (inst->type == Inst_add)
		instAdd(inst);
	else if (inst->type == Inst_addW)
		instAddW(inst);
	else if (inst->type == Inst_div)
		instDiv(inst);
	else if (inst->type == Inst_divW)
		instDivW(inst);
	else if (inst->type == Inst_mul)
		instMul(inst);
	else if (inst->type == Inst_mulW)
		instMulW(inst);
	else if (inst->type == Inst_sub)
		instSub(inst);
	else if (inst->type == Inst_subW)
		instSubW(inst);
	else if (inst->type == Inst_lt)
		instLt(inst);
	else if (inst->type == Inst_lte)
		instLte(inst);
	else if (inst->type == Inst_gt)
		instGt(inst);
	else if (inst->type == Inst_gte)
		instGte(inst);
	else if (inst->type == Inst_nor)
		instNor(inst);
	else if (inst->type == Inst_isEq)
		instEq(inst);
	else if (inst->type == Inst_isNeq)
		instNotEq(inst);
	else if (inst->type == Inst_mod)
		instMod(inst);
	else if (inst->type == Inst_pow)
		instPow(inst);
	else if (inst->type == Inst_powW)
		instPowW(inst);
	else if (inst->type == Inst_shl)
		instShl(inst);
	else if (inst->type == Inst_shlW)
		instShlW(inst);
	else if (inst->type == Inst_shr)
		instShr(inst);
	else if (inst->type == Inst_shrW)
		instShrW(inst);
	else if (inst->type == Inst_xor)
		instXor(inst);
	else if (inst->type == Inst_or)
		instOr(inst);
	else if (inst->type == Inst_and)
		instAnd(inst);
	else if (inst->type == Inst_nand)
		instNand(inst);
	else if (inst->type == Inst_rem)
		instRem(inst);
	else if (inst->type == Inst_remW)
		instRemW(inst);
	else if (inst->type == Inst_abs)
		instAbs(inst);
	else if (inst->type == Inst_absW)
		instAbsW(inst);
	else if (inst->type == Inst_inv)
		instInv(inst);
	else if (inst->type == Inst_neg)
		instNeg(inst);
	else if (inst->type == Inst_square)
		instSquare(inst);
	else if (inst->type == Inst_double)
		instDouble(inst);
	else if (inst->type == Inst_castLossy)
		instCastLossy(inst);
	else if (inst->type == Inst_not)
		instNot(inst);
	else if (inst->type == Inst_ternary)
		instTernary(inst);
	else if (inst->type == Inst_hashBhp256)
		instBhp(inst, 256);
	else if (inst->type == Inst_hashBhp512)
		instBhp(inst, 512);
	else if (inst->type == Inst_hashBhp768)
		instBhp(inst, 768);
	else if (inst->type == Inst_hashBhp1024)
		instBhp(inst, 1024);
	else if (inst->type == Inst_hashKeccak256)
		instTodo(inst);
	else if (inst->type == Inst_hashKeccak384)
		instTodo(inst);
	else if (inst->type == Inst_hashKeccak512)
		instTodo(inst);
	else if (inst->type == Inst_hashPed64)
		instPed(inst, 64);
	else if (inst->type == Inst_hashPed128)
		instPed(inst, 128);
	else if (inst->type == Inst_hashPsd2)
		instPsd(inst, 2);
	else if (inst->type == Inst_hashPsd4)
		instPsd(inst, 4);
	else if (inst->type == Inst_hashPsd8)
		instPsd(inst, 8);
	else if (inst->type == Inst_hashSha3_256)
		instTodo(inst);
	else if (inst->type == Inst_hashSha3_384)
		instTodo(inst);
	else if (inst->type == Inst_hashSha3_512)
		instTodo(inst);
	else
	{
		assert(0);
	}
}

__device__ void Exectuor::instTodo(const Instruction* inst)
{
	assert(0);
}

__device__ void Exectuor::instPsd(const Instruction* inst, int psd)
{
	auto a = getReg(inst->inOp[0]);
	int out_reg = inst->outOp;

	CuVector<Field> input = inputToFields(a);
	if (a.typeId() == Type::field)
	{
		to_unique_bits_le(variantToField(a));
	}

	CuVector<Field> output = m_psd2.hash_many(input, 1);

	Field f = output[0];
	int reg_type = m_program->regType[out_reg];
	CuVariant ret = field_cassLossy(f, reg_type);
	setReg(out_reg, ret);
}

__device__ void Exectuor::instBhp(const Instruction* inst, int bhp)
{
	auto a = getReg(inst->inOp[0]);
	int out_reg = inst->outOp;

	BitArray input = inputToBitsLe(a);

	Group output;// = m_bhp256.hash(input);

	Field f = output.x;
	int reg_type = m_program->regType[out_reg];
	CuVariant ret = field_cassLossy(f, reg_type);
	setReg(out_reg, ret);
}

__device__ void Exectuor::instPed(const Instruction* inst, int ped)
{
	auto a = getReg(inst->inOp[0]);
	int out_reg = inst->outOp;

	BitArray input = inputToBitsLe(a);

	Field f;// = m_ped64.hash(input);

	int reg_type = m_program->regType[out_reg];
	CuVariant ret = field_cassLossy(f, reg_type);
	setReg(out_reg, ret);
}

__device__ void Exectuor::instAdd(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_add_checked(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_add(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instAddW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_add_wrapped(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instSub(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_sub_checked(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_sub(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instSubW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_sub_wrapped(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instMul(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_mul_checked(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_mul(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instMulW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_mul_wrapped(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instDiv(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_div_checked(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_div(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instDivW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_div_wrapped(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_div_unchecked(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instEq(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_is_equal(a.toBoolean(), b.toBoolean()));
	}
	else if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromBoolean(integer_is_equal(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromBoolean(field_is_equal(a.toField(), b.toField()));
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instNotEq(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_is_not_equal(a.toBoolean(), b.toBoolean()));
	}
	else if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromBoolean(integer_is_not_equal(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromBoolean(field_is_not_equal(a.toField(), b.toField()));
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instGt(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromBoolean(integer_is_greater_than(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromBoolean(field_is_greater_than(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instLt(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromBoolean(integer_is_less_than(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromBoolean(field_is_less_than(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instGte(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromBoolean(integer_is_greater_than_or_equal(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromBoolean(field_is_greater_than_or_equal(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instLte(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromBoolean(integer_is_less_than_or_equal(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromBoolean(field_is_less_than_or_equal(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instTernary(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]).toBoolean();
	auto b = getReg(inst->inOp[1]);
	auto c = getReg(inst->inOp[2]);

	CuVariant out;
	if (b.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_ternary(a, b.toBoolean(), c.toBoolean()));
	}
	else if (typeIsInteger(b.typeId()))
	{
		out = CuVariant::fromInteger(integer_ternary(a, b.toInteger(), c.toInteger()));
	}
	else
	{
		out = CuVariant::fromField(field_ternary(a, b.toField(), c.toField()));
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instPow(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_pow_checked(a.toInteger(), b.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_pow(a.toField(), b.toField()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instPowW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_pow_wrapped(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instShl(const Instruction* inst)
{
	auto self = getReg(inst->inOp[0]);
	auto other = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(self.typeId()))
	{
		out = CuVariant::fromInteger(integer_shl_checked(self.toInteger(), other.toInteger()));
	}
	else {
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instShlW(const Instruction* inst)
{
	auto self = getReg(inst->inOp[0]);
	auto other = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(self.typeId()))
	{
		out = CuVariant::fromInteger(integer_shl_wrapped(self.toInteger(), other.toInteger()));
	}
	else {
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instShr(const Instruction* inst)
{
	auto self = getReg(inst->inOp[0]);
	auto other = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(self.typeId()))
	{
		out = CuVariant::fromInteger(integer_shr_checked(self.toInteger(), other.toInteger()));
	}
	else {
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instShrW(const Instruction* inst)
{
	auto self = getReg(inst->inOp[0]);
	auto other = getReg(inst->inOp[1]);

	CuVariant out;
	if (typeIsInteger(self.typeId()))
	{
		out = CuVariant::fromInteger(integer_shr_wrapped(self.toInteger(), other.toInteger()));
	}
	else {
		assert(0);
	}

	setReg(inst->outOp, out);
}

__device__ void Exectuor::instRem(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_rem_checked(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instRemW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_rem_wrapped(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instAbs(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_abs_checked(a.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instAbsW(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_abs_wrapped(a.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instMod(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_modulo(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instXor(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_xor(a.toBoolean(), b.toBoolean()));
	}
	else if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_xor(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instOr(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_or(a.toBoolean(), b.toBoolean()));
	}
	else if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_or(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instNor(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_nor(a.toBoolean(), b.toBoolean()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instAnd(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_and(a.toBoolean(), b.toBoolean()));
	}
	else if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_and(a.toInteger(), b.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instNand(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	auto b = getReg(inst->inOp[1]);
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_nand(a.toBoolean(), b.toBoolean()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instInv(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant out;
	if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_inverse(a.toField()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instNeg(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant out;
	if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_neg(a.toInteger()));
	}
	else if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_neg(a.toField()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instDouble(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant out;
	if (a.typeId() == Type::field)
	{
		out = CuVariant::fromField(field_double(a.toField()));
	}
	setReg(inst->outOp, a);
}

__device__ void Exectuor::instNot(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant out;
	if (a.typeId() == Type::boolean)
	{
		out = CuVariant::fromBoolean(bit_not(a.toBoolean()));
	}
	else if (typeIsInteger(a.typeId()))
	{
		out = CuVariant::fromInteger(integer_not(a.toInteger()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, out);
}

__device__ void Exectuor::instSquare(const Instruction* inst)
{
	auto a = getReg(inst->inOp[0]);
	CuVariant ret;
	if (a.typeId() == Type::field)
	{
		ret = CuVariant::fromField(field_square(a.toField()));
	}
	else
	{
		assert(0);
	}
	setReg(inst->outOp, ret);
}