/*
Copyright (C) 2018-2019 de4dot@gmail.com

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/

mod enums;
pub(crate) mod handlers_table;
#[cfg(feature = "op_code_info")]
mod instruction_fmt;
mod mem_op;
#[cfg(feature = "op_code_info")]
mod mnemonic_str_tbl;
#[cfg(feature = "op_code_info")]
mod op_code;
mod op_code_data;
#[cfg(feature = "op_code_info")]
mod op_code_fmt;
mod op_code_handler;
#[cfg(feature = "op_code_info")]
pub(crate) mod op_code_tbl;
#[cfg(feature = "op_code_info")]
mod op_kind_tables;
mod ops;
mod ops_tables;
#[cfg(test)]
pub(crate) mod tests;

pub use self::enums::*;
use self::handlers_table::*;
pub use self::mem_op::*;
#[cfg(feature = "op_code_info")]
pub use self::op_code::*;
use self::op_code_handler::OpCodeHandler;
use super::iced_constants::IcedConstants;
use super::*;
#[cfg(not(feature = "std"))]
use alloc::string::String;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::{i16, i32, i8, mem, u32};

// GENERATOR-BEGIN: ImmSizes
// ⚠️This was generated by GENERATOR!🦹‍♂️
#[cfg_attr(feature = "cargo-fmt", rustfmt::skip)]
static IMM_SIZES: [u32; 19] = [
	0,// None
	1,// Size1
	2,// Size2
	4,// Size4
	8,// Size8
	3,// Size2_1
	2,// Size1_1
	4,// Size2_2
	6,// Size4_2
	1,// RipRelSize1_Target16
	1,// RipRelSize1_Target32
	1,// RipRelSize1_Target64
	2,// RipRelSize2_Target16
	2,// RipRelSize2_Target32
	2,// RipRelSize2_Target64
	4,// RipRelSize4_Target32
	4,// RipRelSize4_Target64
	1,// SizeIbReg
	1,// Size1OpCode
];
// GENERATOR-END: ImmSizes

/// Encodes instructions decoded by the decoder or instructions created by other code.
/// See also [`BlockEncoder`] which can encode any number of instructions.
///
/// [`BlockEncoder`]: struct.BlockEncoder.html
///
/// ```
/// use iced_x86::*;
///
/// // xchg ah,[rdx+rsi+16h]
/// let bytes = b"\x86\x64\x32\x16";
/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
/// decoder.set_ip(0x1234_5678);
/// let instr = decoder.decode();
///
/// let mut encoder = Encoder::new(64);
/// match encoder.encode(&instr, 0x5555_5555) {
///     Ok(len) => assert_eq!(4, len),
///     Err(err) => panic!("{}", err),
/// }
/// // We're done, take ownership of the buffer
/// let buffer = encoder.take_buffer();
/// assert_eq!(vec![0x86, 0x64, 0x32, 0x16], buffer);
/// ```
#[allow(missing_debug_implementations)]
pub struct Encoder {
	current_rip: u64,
	buffer: Vec<u8>,
	handler_table: &'static [&'static OpCodeHandler],
	handler: &'static OpCodeHandler,
	error_message: String,
	bitness: u32,
	eip: u32,
	displ_addr: u32,
	imm_addr: u32,
	pub(self) immediate: u32,
	// high 32 bits if it's a 64-bit immediate
	// high 32 bits if it's an IP relative immediate (jcc,call target)
	// high 32 bits if it's a 64-bit absolute address
	pub(self) immediate_hi: u32,
	displ: u32,
	// high 32 bits if it's an IP relative mem displ (target)
	displ_hi: u32,
	pub(self) op_code: u32,
	pub(self) internal_vex_wig_lig: u32,
	pub(self) internal_vex_lig: u32,
	pub(self) internal_evex_wig: u32,
	pub(self) internal_evex_lig: u32,
	pub(self) prevent_vex2: u32,
	opsize16_flags: u32,
	opsize32_flags: u32,
	adrsize16_flags: u32,
	adrsize32_flags: u32,
	// ***************************
	// These fields must be 64-bit aligned.
	// They are cleared in encode() and should be close so the compiler can optimize clearing them.
	pub(self) encoder_flags: u32, // EncoderFlags
	displ_size: DisplSize,
	pub(self) imm_size: ImmSize,
	mod_rm: u8,
	sib: u8,
	// ***************************
}

impl Encoder {
	pub(super) const ERROR_ONLY_1632_BIT_MODE: &'static str = "The instruction can only be used in 16/32-bit mode";
	pub(super) const ERROR_ONLY_64_BIT_MODE: &'static str = "The instruction can only be used in 64-bit mode";

	/// Creates an encoder
	///
	/// # Panics
	///
	/// Panics if `bitness` is not one of 16, 32, 64.
	///
	/// # Arguments
	///
	/// * `bitness`: 16, 32 or 64
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn new(bitness: u32) -> Self {
		Self::with_capacity(bitness, 0)
	}

	/// Creates an encoder with an initial buffer capacity
	///
	/// # Panics
	///
	/// Panics if `bitness` is not one of 16, 32, 64.
	///
	/// # Arguments
	///
	/// * `bitness`: 16, 32 or 64
	/// * `capacity`: Initial capacity of the `u8` buffer
	#[cfg_attr(has_must_use, must_use)]
	#[cfg_attr(feature = "cargo-clippy", allow(clippy::missing_inline_in_public_items))]
	pub fn with_capacity(bitness: u32, capacity: usize) -> Self {
		if bitness != 16 && bitness != 32 && bitness != 64 {
			panic!();
		}

		let opsize16_flags = if bitness != 16 { EncoderFlags::P66 } else { 0 };
		let opsize32_flags = if bitness == 16 { EncoderFlags::P66 } else { 0 };
		let adrsize16_flags = if bitness != 16 { EncoderFlags::P67 } else { 0 };
		let adrsize32_flags = if bitness != 32 { EncoderFlags::P67 } else { 0 };

		Self {
			current_rip: 0,
			handler: unsafe { *HANDLERS_TABLE.get_unchecked(0) },
			// Store it in an instance field since it's a lazy_static
			handler_table: HANDLERS_TABLE.as_slice(),
			buffer: if capacity == 0 { Vec::new() } else { Vec::with_capacity(capacity) },
			error_message: String::new(),
			bitness,
			eip: 0,
			displ_addr: 0,
			imm_addr: 0,
			immediate: 0,
			immediate_hi: 0,
			displ: 0,
			displ_hi: 0,
			op_code: 0,
			internal_vex_wig_lig: 0,
			internal_vex_lig: 0,
			internal_evex_wig: 0,
			internal_evex_lig: 0,
			prevent_vex2: 0,
			opsize16_flags,
			opsize32_flags,
			adrsize16_flags,
			adrsize32_flags,
			encoder_flags: 0,
			displ_size: DisplSize::default(),
			imm_size: ImmSize::default(),
			mod_rm: 0,
			sib: 0,
		}
	}

	/// Encodes an instruction and returns the size of the encoded instruction
	///
	/// # Errors
	///
	/// Returns an error message on failure.
	///
	/// # Arguments
	///
	/// * `instruction`: Instruction to encode
	/// * `rip`: `RIP` of the encoded instruction
	///
	/// # Examples
	///
	/// ```
	/// use iced_x86::*;
	///
	/// // je short $+4
	/// let bytes = b"\x75\x02";
	/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
	/// decoder.set_ip(0x1234_5678);
	/// let instr = decoder.decode();
	///
	/// let mut encoder = Encoder::new(64);
	/// // Use a different IP (orig rip + 0x10)
	/// match encoder.encode(&instr, 0x1234_5688) {
	///     Ok(len) => assert_eq!(2, len),
	///     Err(err) => panic!("{}", err),
	/// }
	/// // We're done, take ownership of the buffer
	/// let buffer = encoder.take_buffer();
	/// assert_eq!(vec![0x75, 0xF2], buffer);
	/// ```
	#[cfg_attr(feature = "cargo-clippy", allow(clippy::missing_inline_in_public_items))]
	pub fn encode(&mut self, instruction: &Instruction, rip: u64) -> Result<usize, String> {
		self.current_rip = rip;
		self.eip = rip as u32;

		self.encoder_flags = EncoderFlags::NONE;
		self.displ_size = DisplSize::None;
		self.imm_size = ImmSize::None;
		self.mod_rm = 0;
		// sib doesn't need to be initialized, but the compiler generates better
		// code since it can clear 8 bytes with one instruction, but 7 bytes
		// requires 3 instructions.
		self.sib = 0;

		let handler = unsafe { *self.handler_table.get_unchecked(instruction.code() as usize) };
		self.handler = handler;
		self.op_code = handler.op_code;
		let group_index = handler.group_index;
		if group_index >= 0 {
			self.encoder_flags |= EncoderFlags::MOD_RM;
			self.mod_rm = (group_index as u8) << 3;
		}

		match handler.encodable {
			Encodable::Any => {}

			Encodable::Only1632 => {
				if self.bitness == 64 {
					self.set_error_message_str(Self::ERROR_ONLY_1632_BIT_MODE);
				}
			}

			Encodable::Only64 => {
				if self.bitness != 64 {
					self.set_error_message_str(Self::ERROR_ONLY_64_BIT_MODE);
				}
			}
		}

		match handler.op_size {
			OperandSize::None => {}
			OperandSize::Size16 => self.encoder_flags |= self.opsize16_flags,
			OperandSize::Size32 => self.encoder_flags |= self.opsize32_flags,
			OperandSize::Size64 => self.encoder_flags |= EncoderFlags::W,
		}

		match handler.addr_size {
			AddressSize::None | AddressSize::Size64 => {}
			AddressSize::Size16 => self.encoder_flags |= self.adrsize16_flags,
			AddressSize::Size32 => self.encoder_flags |= self.adrsize32_flags,
		}

		if (handler.flags & OpCodeHandlerFlags::DECLARE_DATA) == 0 {
			let ops = &*handler.operands;
			if instruction.op_count() as usize != ops.len() {
				self.set_error_message(format!("Expected {} operand(s) but the instruction has {} operand(s)", ops.len(), instruction.op_count()));
			}
			for i in 0..ops.len() {
				let op = unsafe { *ops.get_unchecked(i) };
				op.encode(self, instruction, i as u32);
			}

			if (handler.flags & OpCodeHandlerFlags::FWAIT) != 0 {
				self.write_byte_internal(0x9B);
			}

			self.write_prefixes(instruction);

			(handler.encode)(handler, self, instruction);

			let op_code = self.op_code;
			if op_code <= 0x0000_00FF {
				self.write_byte_internal(op_code);
			} else {
				debug_assert!(op_code <= 0x0000_FFFF);
				self.write_byte_internal(op_code >> 8);
				self.write_byte_internal(op_code);
			}

			if (self.encoder_flags & (EncoderFlags::MOD_RM | EncoderFlags::DISPL)) != 0 {
				self.write_mod_rm();
			}

			if self.imm_size != ImmSize::None {
				self.write_immediate();
			}
		} else {
			(handler.encode)(handler, self, instruction);
		}

		let instr_len = (self.current_rip as usize).wrapping_sub(rip as usize);
		if instr_len > IcedConstants::MAX_INSTRUCTION_LENGTH && (handler.flags & OpCodeHandlerFlags::DECLARE_DATA) == 0 {
			self.set_error_message(format!("Instruction length > {} bytes", IcedConstants::MAX_INSTRUCTION_LENGTH));
		}
		if !self.error_message.is_empty() {
			Err(mem::replace(&mut self.error_message, String::new()))
		} else {
			Ok(instr_len)
		}
	}

	#[inline]
	pub(super) fn set_error_message(&mut self, message: String) {
		if self.error_message.is_empty() {
			self.error_message = message;
		}
	}

	#[inline]
	pub(super) fn set_error_message_str(&mut self, message: &str) {
		if self.error_message.is_empty() {
			self.error_message.push_str(message);
		}
	}

	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub(super) fn verify_op_kind(&mut self, operand: u32, expected: OpKind, actual: OpKind) -> bool {
		if expected == actual {
			true
		} else {
			if cfg!(debug_assertions) {
				self.set_error_message(format!("Operand {}: Expected: {:?}, actual: {:?}", operand, expected, actual));
			} else {
				self.set_error_message(format!(
					"Operand {}: Expected: OpKind value {}, actual: OpKind value {}",
					operand, expected as u32, actual as u32
				));
			}
			false
		}
	}

	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub(super) fn verify_register(&mut self, operand: u32, expected: Register, actual: Register) -> bool {
		if expected == actual {
			true
		} else {
			if cfg!(debug_assertions) {
				self.set_error_message(format!("Operand {}: Expected: {:?}, actual: {:?}", operand, expected, actual));
			} else {
				self.set_error_message(format!(
					"Operand {}: Expected: Register value {}, actual: Register value {}",
					operand, expected as u32, actual as u32
				));
			}
			false
		}
	}

	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub(super) fn verify_register_range(&mut self, operand: u32, register: Register, reg_lo: Register, mut reg_hi: Register) -> bool {
		if self.bitness != 64 && reg_hi as u32 > (reg_lo as u32).wrapping_add(7) {
			reg_hi = unsafe { mem::transmute((reg_lo as u8).wrapping_add(7)) };
		}
		if reg_lo <= register && register <= reg_hi {
			true
		} else {
			if cfg!(debug_assertions) {
				self.set_error_message(format!(
					"Operand {}: Register {:?} is not between {:?} and {:?} (inclusive)",
					operand, register, reg_lo, reg_hi
				));
			} else {
				self.set_error_message(format!(
					"Operand {}: Register {} is not between {} and {} (inclusive)",
					operand, register as u32, reg_lo as u32, reg_hi as u32
				));
			}
			false
		}
	}

	pub(super) fn add_branch(&mut self, op_kind: OpKind, imm_size: u32, instruction: &Instruction, operand: u32) {
		if !self.verify_op_kind(operand, op_kind, instruction.op_kind(operand)) {
			return;
		}

		let target;
		match imm_size {
			1 => match op_kind {
				OpKind::NearBranch16 => {
					self.encoder_flags |= self.opsize16_flags;
					self.imm_size = ImmSize::RipRelSize1_Target16;
					self.immediate = instruction.near_branch16() as u32;
				}

				OpKind::NearBranch32 => {
					self.encoder_flags |= self.opsize32_flags;
					self.imm_size = ImmSize::RipRelSize1_Target32;
					self.immediate = instruction.near_branch32();
				}

				OpKind::NearBranch64 => {
					self.imm_size = ImmSize::RipRelSize1_Target64;
					target = instruction.near_branch64();
					self.immediate = target as u32;
					self.immediate_hi = (target >> 32) as u32;
				}

				_ => unreachable!(),
			},

			2 => match op_kind {
				OpKind::NearBranch16 => {
					self.encoder_flags |= self.opsize16_flags;
					self.imm_size = ImmSize::RipRelSize2_Target16;
					self.immediate = instruction.near_branch16() as u32;
				}

				_ => unreachable!(),
			},

			4 => match op_kind {
				OpKind::NearBranch32 => {
					self.encoder_flags |= self.opsize32_flags;
					self.imm_size = ImmSize::RipRelSize4_Target32;
					self.immediate = instruction.near_branch32();
				}

				OpKind::NearBranch64 => {
					self.imm_size = ImmSize::RipRelSize4_Target64;
					target = instruction.near_branch64();
					self.immediate = target as u32;
					self.immediate_hi = (target >> 32) as u32;
				}

				_ => unreachable!(),
			},
			_ => unreachable!(),
		}
	}

	pub(super) fn add_branch_x(&mut self, imm_size: u32, instruction: &Instruction, operand: u32) {
		if self.bitness == 64 {
			if !self.verify_op_kind(operand, OpKind::NearBranch64, instruction.op_kind(operand)) {
				return;
			}

			let target = instruction.near_branch64();
			match imm_size {
				2 => {
					self.encoder_flags |= EncoderFlags::P66;
					self.imm_size = ImmSize::RipRelSize2_Target64;
					self.immediate = target as u32;
					self.immediate_hi = (target >> 32) as u32;
				}

				4 => {
					self.imm_size = ImmSize::RipRelSize4_Target64;
					self.immediate = target as u32;
					self.immediate_hi = (target >> 32) as u32;
				}

				_ => unreachable!(),
			}
		} else {
			if !self.verify_op_kind(operand, OpKind::NearBranch32, instruction.op_kind(operand)) {
				return;
			}

			match imm_size {
				2 => {
					const_assert_eq!(0x80, EncoderFlags::P66);
					self.encoder_flags |= (self.bitness & 0x20) << 2;
					self.imm_size = ImmSize::RipRelSize2_Target32;
					self.immediate = instruction.near_branch32();
				}

				4 => {
					const_assert_eq!(0x80, EncoderFlags::P66);
					self.encoder_flags |= (self.bitness & 0x10) << 3;
					self.imm_size = ImmSize::RipRelSize4_Target32;
					self.immediate = instruction.near_branch32();
				}

				_ => unreachable!(),
			}
		}
	}

	pub(super) fn add_branch_disp(&mut self, displ_size: u32, instruction: &Instruction, operand: u32) {
		debug_assert!(displ_size == 2 || displ_size == 4);
		let op_kind;
		match displ_size {
			2 => {
				op_kind = OpKind::NearBranch16;
				self.imm_size = ImmSize::Size2;
				self.immediate = instruction.near_branch16() as u32;
			}

			4 => {
				op_kind = OpKind::NearBranch32;
				self.imm_size = ImmSize::Size4;
				self.immediate = instruction.near_branch32();
			}

			_ => unreachable!(),
		}
		if !self.verify_op_kind(operand, op_kind, instruction.op_kind(operand)) {
			return;
		}
	}

	pub(super) fn add_far_branch(&mut self, instruction: &Instruction, operand: u32, size: u32) {
		if size == 2 {
			if !self.verify_op_kind(operand, OpKind::FarBranch16, instruction.op_kind(operand)) {
				return;
			}
			self.imm_size = ImmSize::Size2_2;
			self.immediate = instruction.far_branch16() as u32;
			self.immediate_hi = instruction.far_branch_selector() as u32;
		} else {
			debug_assert_eq!(4, size);
			if !self.verify_op_kind(operand, OpKind::FarBranch32, instruction.op_kind(operand)) {
				return;
			}
			self.imm_size = ImmSize::Size4_2;
			self.immediate = instruction.far_branch32();
			self.immediate_hi = instruction.far_branch_selector() as u32;
		}
		if self.bitness != size.wrapping_mul(8) {
			self.encoder_flags |= EncoderFlags::P66;
		}
	}

	pub(super) fn set_addr_size(&mut self, reg_size: u32) {
		debug_assert!(reg_size == 2 || reg_size == 4 || reg_size == 8);
		if self.bitness == 64 {
			if reg_size == 2 {
				self.set_error_message(format!("Invalid register size: {}, must be 32-bit or 64-bit", reg_size.wrapping_mul(8)));
			} else if reg_size == 4 {
				self.encoder_flags |= EncoderFlags::P67;
			}
		} else {
			if reg_size == 8 {
				self.set_error_message(format!("Invalid register size: {}, must be 16-bit or 32-bit", reg_size.wrapping_mul(8)));
			} else if self.bitness == 16 {
				if reg_size == 4 {
					self.encoder_flags |= EncoderFlags::P67;
				}
			} else {
				if reg_size == 2 {
					self.encoder_flags |= EncoderFlags::P67;
				}
			}
		}
	}

	pub(super) fn add_abs_mem(&mut self, instruction: &Instruction, operand: u32) {
		self.encoder_flags |= EncoderFlags::DISPL;
		let op_kind = instruction.op_kind(operand);
		if op_kind == OpKind::Memory64 {
			if self.bitness != 64 {
				self.set_error_message(format!("Operand {}: 64-bit abs address is only available in 64-bit mode", operand));
				return;
			}
			self.displ_size = DisplSize::Size8;
			let addr = instruction.memory_address64();
			self.displ = addr as u32;
			self.displ_hi = (addr >> 32) as u32;
		} else if op_kind == OpKind::Memory {
			if instruction.memory_base() != Register::None || instruction.memory_index() != Register::None {
				self.set_error_message(format!("Operand {}: Absolute addresses can't have base and/or index regs", operand));
				return;
			}
			let displ_size = instruction.memory_displ_size();
			if displ_size == 2 {
				if self.bitness == 64 {
					self.set_error_message(format!("Operand {}: 16-bit abs addresses can't be used in 64-bit mode", operand));
					return;
				}
				if self.bitness == 32 {
					self.encoder_flags |= EncoderFlags::P67;
				}
				self.displ_size = DisplSize::Size2;
				self.displ = instruction.memory_displacement();
			} else if displ_size == 4 {
				self.encoder_flags |= self.adrsize32_flags;
				self.displ_size = DisplSize::Size4;
				self.displ = instruction.memory_displacement();
			} else {
				self.set_error_message(format!(
					"Operand {}: Instruction.memory_displ_size() must be initialized to 2 (16-bit) or 4 (32-bit)",
					operand
				));
			}
		} else {
			if cfg!(debug_assertions) {
				self.set_error_message(format!("Operand {}: Expected OpKind::Memory or OpKind::Memory64, actual: {:?}", operand, op_kind));
			} else {
				self.set_error_message(format!(
					"Operand {}: Expected OpKind::Memory or OpKind::Memory64, actual: OpKind value {}",
					operand, op_kind as u32
				));
			}
		}
	}

	#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
	pub(super) fn add_mod_rm_register(&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register) {
		if !self.verify_op_kind(operand, OpKind::Register, instruction.op_kind(operand)) {
			return;
		}
		let reg = instruction.op_register(operand);
		if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
			return;
		}
		let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
		if reg_lo == Register::AL {
			if reg >= Register::SPL {
				reg_num -= 4;
				self.encoder_flags |= EncoderFlags::REX;
			} else if reg >= Register::AH {
				self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
			}
		}
		debug_assert!(reg_num <= 31);
		self.mod_rm |= ((reg_num & 7) << 3) as u8;
		self.encoder_flags |= EncoderFlags::MOD_RM;
		const_assert_eq!(4, EncoderFlags::R);
		self.encoder_flags |= (reg_num & 8) >> 1;
		const_assert_eq!(0x200, EncoderFlags::R2);
		self.encoder_flags |= (reg_num & 0x10) << (9 - 4);
	}

	pub(super) fn add_reg(&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register) {
		if !self.verify_op_kind(operand, OpKind::Register, instruction.op_kind(operand)) {
			return;
		}
		let reg = instruction.op_register(operand);
		if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
			return;
		}
		let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
		if reg_lo == Register::AL {
			if reg >= Register::SPL {
				reg_num -= 4;
				self.encoder_flags |= EncoderFlags::REX;
			} else if reg >= Register::AH {
				self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
			}
		}
		debug_assert!(reg_num <= 15);
		self.op_code |= reg_num & 7;
		const_assert_eq!(1, EncoderFlags::B);
		debug_assert!(reg_num <= 15);
		self.encoder_flags |= reg_num >> 3; // reg_num <= 15, so no need to mask out anything
	}

	#[inline]
	pub(super) fn add_reg_or_mem(
		&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register, allow_mem_op: bool, allow_reg_op: bool,
	) {
		self.add_reg_or_mem_full(instruction, operand, reg_lo, reg_hi, Register::None, Register::None, allow_mem_op, allow_reg_op);
	}

	#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
	pub(super) fn add_reg_or_mem_full(
		&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register, vsib_index_reg_lo: Register,
		vsib_index_reg_hi: Register, allow_mem_op: bool, allow_reg_op: bool,
	) {
		let op_kind = instruction.op_kind(operand);
		self.encoder_flags |= EncoderFlags::MOD_RM;
		if op_kind == OpKind::Register {
			if !allow_reg_op {
				self.set_error_message(format!("Operand {}: register operand is not allowed", operand));
				return;
			}
			let reg = instruction.op_register(operand);
			if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
				return;
			}
			let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
			if reg_lo == Register::AL {
				if reg >= Register::R8L {
					reg_num -= 4;
				} else if reg >= Register::SPL {
					reg_num -= 4;
					self.encoder_flags |= EncoderFlags::REX;
				} else if reg >= Register::AH {
					self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
				}
			}
			self.mod_rm |= (reg_num & 7) as u8;
			self.mod_rm |= 0xC0;
			const_assert_eq!(1, EncoderFlags::B);
			const_assert_eq!(2, EncoderFlags::X);
			self.encoder_flags |= (reg_num >> 3) & 3;
			debug_assert!(reg_num <= 31);
		} else if op_kind == OpKind::Memory {
			if !allow_mem_op {
				self.set_error_message(format!("Operand {}: memory operand is not allowed", operand));
				return;
			}
			if instruction.memory_size().is_broadcast() {
				self.encoder_flags |= EncoderFlags::BROADCAST;
			}

			let mut code_size = instruction.code_size();
			if code_size == CodeSize::Unknown {
				code_size = if self.bitness == 64 {
					CodeSize::Code64
				} else if self.bitness == 32 {
					CodeSize::Code32
				} else {
					debug_assert_eq!(16, self.bitness);
					CodeSize::Code16
				};
			}
			let addr_size = super::instruction_internal::get_address_size_in_bytes(
				instruction.memory_base(),
				instruction.memory_index(),
				instruction.memory_displ_size(),
				code_size,
			)
			.wrapping_mul(8);
			if addr_size != self.bitness {
				self.encoder_flags |= EncoderFlags::P67;
			}
			if (self.encoder_flags & EncoderFlags::REG_IS_MEMORY) != 0 {
				let reg_size = Encoder::get_register_op_size(instruction);
				if reg_size != addr_size {
					self.set_error_message(format!("Operand {}: Register operand size must equal memory addressing mode (16/32/64)", operand));
					return;
				}
			}
			if addr_size == 16 {
				if vsib_index_reg_lo != Register::None {
					self.set_error_message(format!(
						"Operand {}: VSIB operands can't use 16-bit addressing. It must be 32-bit or 64-bit addressing",
						operand
					));
					return;
				}
				self.add_mem_op16(instruction, operand);
			} else {
				self.add_mem_op(instruction, operand, addr_size, vsib_index_reg_lo, vsib_index_reg_hi);
			}
		} else {
			if cfg!(debug_assertions) {
				self.set_error_message(format!("Operand {}: Expected a register or memory operand, but op_kind is {:?}", operand, op_kind));
			} else {
				self.set_error_message(format!("Operand {}: Expected a register or memory operand, but op_kind is {}", operand, op_kind as u32));
			}
		}
	}

	#[cfg_attr(has_must_use, must_use)]
	fn get_register_op_size(instruction: &Instruction) -> u32 {
		debug_assert_eq!(OpKind::Register, instruction.op0_kind());
		if instruction.op0_kind() == OpKind::Register {
			let reg = instruction.op0_register();
			if reg.is_gpr64() {
				64
			} else if reg.is_gpr32() {
				32
			} else if reg.is_gpr16() {
				16
			} else {
				0
			}
		} else {
			0
		}
	}

	#[cfg_attr(has_must_use, must_use)]
	fn try_convert_to_disp8n(&mut self, instruction: &Instruction, displ: i32) -> Option<i8> {
		if let Some(try_convert_to_disp8n) = self.handler.try_convert_to_disp8n {
			(try_convert_to_disp8n)(self.handler, self, instruction, displ)
		} else if i8::MIN as i32 <= displ && displ <= i8::MAX as i32 {
			Some(displ as i8)
		} else {
			None
		}
	}

	fn add_mem_op16(&mut self, instruction: &Instruction, operand: u32) {
		if self.bitness == 64 {
			self.set_error_message(format!("Operand {}: 16-bit addressing can't be used by 64-bit code", operand));
			return;
		}
		let base = instruction.memory_base();
		let index = instruction.memory_index();
		let mut displ_size = instruction.memory_displ_size();
		if base == Register::BX && index == Register::SI {
			// Nothing
		} else if base == Register::BX && index == Register::DI {
			self.mod_rm |= 1;
		} else if base == Register::BP && index == Register::SI {
			self.mod_rm |= 2;
		} else if base == Register::BP && index == Register::DI {
			self.mod_rm |= 3;
		} else if base == Register::SI && index == Register::None {
			self.mod_rm |= 4;
		} else if base == Register::DI && index == Register::None {
			self.mod_rm |= 5;
		} else if base == Register::BP && index == Register::None {
			self.mod_rm |= 6;
		} else if base == Register::BX && index == Register::None {
			self.mod_rm |= 7;
		} else if base == Register::None && index == Register::None {
			self.mod_rm |= 6;
			self.displ_size = DisplSize::Size2;
			self.displ = instruction.memory_displacement();
		} else {
			if cfg!(debug_assertions) {
				self.set_error_message(format!("Operand {}: Invalid 16-bit base + index registers: base={:?}, index={:?}", operand, base, index));
			} else {
				self.set_error_message(format!(
					"Operand {}: Invalid 16-bit base + index registers: base={}, index={}",
					operand, base as u32, index as u32
				));
			}
			return;
		}

		if base != Register::None || index != Register::None {
			self.displ = instruction.memory_displacement();
			// [bp] => [bp+00]
			if displ_size == 0 && base == Register::BP && index == Register::None {
				displ_size = 1;
				self.displ = 0;
			}
			if displ_size == 1 {
				// Temp needed if rustc < 1.36.0 (2015 edition)
				let tmp_displ = self.displ;
				if let Some(compressed_value) = self.try_convert_to_disp8n(instruction, tmp_displ as i16 as i32) {
					self.displ = compressed_value as u8 as u32;
				} else {
					displ_size = 2;
				}
			}
			if displ_size == 0 {
				// Nothing
			} else if displ_size == 1 {
				self.mod_rm |= 0x40;
				self.displ_size = DisplSize::Size1;
			} else if displ_size == 2 {
				self.mod_rm |= 0x80;
				self.displ_size = DisplSize::Size2;
			} else {
				self.set_error_message(format!("Operand {}: Invalid displacement size: {}, must be 0, 1, or 2", operand, displ_size));
				return;
			}
		}
	}

	fn add_mem_op(&mut self, instruction: &Instruction, operand: u32, addr_size: u32, vsib_index_reg_lo: Register, vsib_index_reg_hi: Register) {
		debug_assert!(addr_size == 32 || addr_size == 64);
		if self.bitness != 64 && addr_size == 64 {
			self.set_error_message(format!("Operand {}: 64-bit addressing can only be used in 64-bit mode", operand));
			return;
		}

		let base = instruction.memory_base();
		let index = instruction.memory_index();
		let mut displ_size = instruction.memory_displ_size();
		self.displ = instruction.memory_displacement();

		let base_lo;
		let base_hi;
		let index_lo;
		let index_hi;
		if addr_size == 64 {
			base_lo = Register::RAX;
			base_hi = Register::R15;
		} else {
			debug_assert_eq!(32, addr_size);
			base_lo = Register::EAX;
			base_hi = Register::R15D;
		}
		if vsib_index_reg_lo != Register::None {
			index_lo = vsib_index_reg_lo;
			index_hi = vsib_index_reg_hi;
		} else {
			index_lo = base_lo;
			index_hi = base_hi;
		}
		if base != Register::None && base != Register::RIP && base != Register::EIP && !self.verify_register_range(operand, base, base_lo, base_hi) {
			return;
		}
		if index != Register::None && !self.verify_register_range(operand, index, index_lo, index_hi) {
			return;
		}

		if displ_size != 0 && displ_size != 1 && displ_size != 4 && displ_size != 8 {
			self.set_error_message(format!("Operand {}: Invalid displ size: {}, must be 0, 1, 4, 8", operand, displ_size));
			return;
		}
		if base == Register::RIP || base == Register::EIP {
			if index != Register::None {
				self.set_error_message(format!("Operand {}: RIP relative addressing can't use an index register", operand));
				return;
			}
			if self.bitness != 64 {
				self.set_error_message(format!("Operand {}: RIP/EIP relative addressing is only available in 64-bit mode", operand));
				return;
			}
			self.mod_rm |= 5;
			if base == Register::RIP {
				self.displ_size = DisplSize::RipRelSize4_Target64;
				let target = instruction.next_ip().wrapping_add(self.displ as i32 as u64);
				self.displ = target as u32;
				self.displ_hi = (target >> 32) as u32;
			} else {
				self.displ_size = DisplSize::RipRelSize4_Target32;
				self.displ = instruction.next_ip32().wrapping_add(self.displ);
			}
			return;
		}
		let scale = super::instruction_internal::internal_get_memory_index_scale(instruction);
		if base == Register::None && index == Register::None {
			if vsib_index_reg_lo != Register::None {
				self.set_error_message(format!("Operand {}: VSIB addressing can't use an offset-only address", operand));
				return;
			}
			if self.bitness == 64 || scale != 0 {
				self.mod_rm |= 4;
				self.displ_size = DisplSize::Size4;
				self.encoder_flags |= EncoderFlags::SIB;
				self.sib = (0x25 | (scale << 6)) as u8;
				return;
			} else {
				self.mod_rm |= 5;
				self.displ_size = DisplSize::Size4;
				return;
			}
		}

		let base_num = if base == Register::None { -1 } else { (base as i32).wrapping_sub(base_lo as i32) };
		let index_num = if index == Register::None { -1 } else { (index as i32).wrapping_sub(index_lo as i32) };

		// [ebp] => [ebp+00]
		if displ_size == 0 && index == Register::None && (base_num & 7) == 5 {
			displ_size = 1;
			self.displ = 0;
		}

		if displ_size == 1 {
			// Temp needed if rustc < 1.36.0 (2015 edition)
			let tmp_displ = self.displ;
			if let Some(compressed_value) = self.try_convert_to_disp8n(instruction, tmp_displ as i16 as i32) {
				self.displ = compressed_value as u8 as u32;
			} else {
				displ_size = addr_size / 8;
			}
		}

		if base == Register::None {
			// Tested earlier in the method
			debug_assert!(index != Register::None);
			self.displ_size = DisplSize::Size4;
		} else if displ_size == 1 {
			self.mod_rm |= 0x40;
			self.displ_size = DisplSize::Size1;
		} else if displ_size == addr_size / 8 {
			self.mod_rm |= 0x80;
			self.displ_size = DisplSize::Size4;
		} else if displ_size != 0 {
			self.set_error_message_str("Invalid memory_displ_size() value");
		}

		if index == Register::None && (base_num & 7) != 4 && scale == 0 {
			// Tested earlier in the method
			debug_assert!(base != Register::None);
			self.mod_rm |= (base_num & 7) as u8;
		} else {
			self.encoder_flags |= EncoderFlags::SIB;
			self.sib = (scale << 6) as u8;
			self.mod_rm |= 4;
			if index == Register::RSP || index == Register::ESP {
				self.set_error_message(format!("Operand {}: ESP/RSP can't be used as an index register", operand));
				return;
			}
			if base_num < 0 {
				self.sib |= 5;
			} else {
				self.sib |= (base_num & 7) as u8;
			}
			if index_num < 0 {
				self.sib |= 0x20;
			} else {
				self.sib |= ((index_num & 7) << 3) as u8;
			}
		}

		if base_num >= 0 {
			const_assert_eq!(1, EncoderFlags::B);
			debug_assert!(base_num <= 15); // No '& 1' required below
			self.encoder_flags |= (base_num as u32) >> 3;
		}
		if index_num >= 0 {
			const_assert_eq!(2, EncoderFlags::X);
			self.encoder_flags |= ((index_num as u32) >> 2) & 2;
			self.encoder_flags |= ((index_num as u32) & 0x10) << EncoderFlags::VVVVV_SHIFT;
			debug_assert!(index_num <= 31);
		}
	}

	fn write_prefixes(&mut self, instruction: &Instruction) {
		debug_assert!((self.handler.flags & OpCodeHandlerFlags::DECLARE_DATA) == 0);
		let seg = instruction.segment_prefix();
		if seg != Register::None {
			static SEGMENT_OVERRIDES: [u8; 6] = [0x26, 0x2E, 0x36, 0x3E, 0x64, 0x65];
			debug_assert!((seg as usize).wrapping_sub(Register::ES as usize) < SEGMENT_OVERRIDES.len());
			self.write_byte_internal(unsafe { *SEGMENT_OVERRIDES.get_unchecked((seg as usize).wrapping_sub(Register::ES as usize)) } as u32);
		}
		if (self.encoder_flags & EncoderFlags::PF0) != 0 || instruction.has_lock_prefix() {
			self.write_byte_internal(0xF0);
		}
		if (self.encoder_flags & EncoderFlags::P66) != 0 {
			self.write_byte_internal(0x66);
		}
		if (self.encoder_flags & EncoderFlags::P67) != 0 {
			self.write_byte_internal(0x67);
		}
		if super::instruction_internal::internal_has_repe_prefix_has_xrelease_prefix(instruction) {
			self.write_byte_internal(0xF3);
		}
		if super::instruction_internal::internal_has_repne_prefix_has_xacquire_prefix(instruction) {
			self.write_byte_internal(0xF2);
		}
	}

	fn write_mod_rm(&mut self) {
		debug_assert!((self.handler.flags & OpCodeHandlerFlags::DECLARE_DATA) == 0);
		debug_assert!((self.encoder_flags & (EncoderFlags::MOD_RM | EncoderFlags::DISPL)) != 0);
		// Temp needed if rustc < 1.36.0 (2015 edition)
		let mut tmp;
		let tmp2;
		if (self.encoder_flags & EncoderFlags::MOD_RM) != 0 {
			tmp = self.mod_rm as u32;
			self.write_byte_internal(tmp);
			if (self.encoder_flags & EncoderFlags::SIB) != 0 {
				tmp = self.sib as u32;
				self.write_byte_internal(tmp);
			}
		}

		let mut diff4;
		self.displ_addr = self.current_rip as u32;
		match self.displ_size {
			DisplSize::None => {}
			DisplSize::Size1 => {
				tmp = self.displ;
				self.write_byte_internal(tmp)
			}

			DisplSize::Size2 => {
				diff4 = self.displ;
				self.write_byte_internal(diff4);
				self.write_byte_internal(diff4 >> 8);
			}

			DisplSize::Size4 => {
				diff4 = self.displ;
				self.write_byte_internal(diff4);
				self.write_byte_internal(diff4 >> 8);
				self.write_byte_internal(diff4 >> 16);
				self.write_byte_internal(diff4 >> 24);
			}

			DisplSize::Size8 => {
				diff4 = self.displ;
				self.write_byte_internal(diff4);
				self.write_byte_internal(diff4 >> 8);
				self.write_byte_internal(diff4 >> 16);
				self.write_byte_internal(diff4 >> 24);
				diff4 = self.displ_hi;
				self.write_byte_internal(diff4);
				self.write_byte_internal(diff4 >> 8);
				self.write_byte_internal(diff4 >> 16);
				self.write_byte_internal(diff4 >> 24);
			}

			DisplSize::RipRelSize4_Target32 => {
				let eip = (self.current_rip as u32).wrapping_add(4).wrapping_add(unsafe { *IMM_SIZES.get_unchecked(self.imm_size as usize) });
				diff4 = self.displ.wrapping_sub(eip);
				self.write_byte_internal(diff4);
				self.write_byte_internal(diff4 >> 8);
				self.write_byte_internal(diff4 >> 16);
				self.write_byte_internal(diff4 >> 24);
			}

			DisplSize::RipRelSize4_Target64 => {
				let rip = self.current_rip.wrapping_add(4).wrapping_add(unsafe { *IMM_SIZES.get_unchecked(self.imm_size as usize) } as u64);
				let diff8 = ((((self.displ_hi as u64) << 32) | self.displ as u64).wrapping_sub(rip)) as i64;
				if diff8 < i32::MIN as i64 || diff8 > i32::MAX as i64 {
					tmp2 = ((self.displ_hi as u64) << 32) | self.displ as u64;
					self.set_error_message(format!(
						"RIP relative distance is too far away: next_ip: 0x{:016X} target: 0x{:08X}, diff = {}, diff must fit in an i32",
						rip, tmp2, diff8
					));
				}
				diff4 = diff8 as u32;
				self.write_byte_internal(diff4);
				self.write_byte_internal(diff4 >> 8);
				self.write_byte_internal(diff4 >> 16);
				self.write_byte_internal(diff4 >> 24);
			}
		}
	}

	fn write_immediate(&mut self) {
		debug_assert!((self.handler.flags & OpCodeHandlerFlags::DECLARE_DATA) == 0);
		let ip;
		let eip;
		let rip;
		let diff2;
		let diff4;
		let diff8;
		let mut value;
		// Temp needed if rustc < 1.36.0 (2015 edition)
		let mut tmp;
		let tmp2;
		self.imm_addr = self.current_rip as u32;
		match self.imm_size {
			ImmSize::None => {}

			ImmSize::Size1 | ImmSize::SizeIbReg | ImmSize::Size1OpCode => {
				tmp = self.immediate;
				self.write_byte_internal(tmp);
			}

			ImmSize::Size2 => {
				value = self.immediate;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
			}

			ImmSize::Size4 => {
				value = self.immediate;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				self.write_byte_internal(value >> 16);
				self.write_byte_internal(value >> 24);
			}

			ImmSize::Size8 => {
				value = self.immediate;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				self.write_byte_internal(value >> 16);
				self.write_byte_internal(value >> 24);
				value = self.immediate_hi;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				self.write_byte_internal(value >> 16);
				self.write_byte_internal(value >> 24);
			}

			ImmSize::Size2_1 => {
				value = self.immediate;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				tmp = self.immediate_hi;
				self.write_byte_internal(tmp);
			}

			ImmSize::Size1_1 => {
				tmp = self.immediate;
				self.write_byte_internal(tmp);
				tmp = self.immediate_hi;
				self.write_byte_internal(tmp);
			}

			ImmSize::Size2_2 => {
				value = self.immediate;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				value = self.immediate_hi;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
			}

			ImmSize::Size4_2 => {
				value = self.immediate;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				self.write_byte_internal(value >> 16);
				self.write_byte_internal(value >> 24);
				value = self.immediate_hi;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
			}

			ImmSize::RipRelSize1_Target16 => {
				ip = (self.current_rip as u32).wrapping_add(1) as u16;
				diff2 = (self.immediate as i16).wrapping_sub(ip as i16);
				if diff2 < i8::MIN as i16 || diff2 > i8::MAX as i16 {
					tmp = self.immediate;
					self.set_error_message(format!(
						"Branch distance is too far away: next_ip: 0x{:04X} target: 0x{:04X}, diff = {}, diff must fit in an i8",
						ip, tmp as u16, diff2
					));
				}
				self.write_byte_internal(diff2 as u32);
			}

			ImmSize::RipRelSize1_Target32 => {
				eip = (self.current_rip as u32).wrapping_add(1);
				diff4 = self.immediate.wrapping_sub(eip) as i32;
				if diff4 < i8::MIN as i32 || diff4 > i8::MAX as i32 {
					tmp = self.immediate;
					self.set_error_message(format!(
						"Branch distance is too far away: next_ip: 0x{:08X} target: 0x{:08X}, diff = {}, diff must fit in an i8",
						eip, tmp, diff4
					));
				}
				self.write_byte_internal(diff4 as u32);
			}

			ImmSize::RipRelSize1_Target64 => {
				rip = self.current_rip.wrapping_add(1);
				diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
				if diff8 < i8::MIN as i64 || diff8 > i8::MAX as i64 {
					tmp2 = ((self.immediate_hi as u64) << 32) | (self.immediate as u64);
					self.set_error_message(format!(
						"Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i8",
						rip, tmp2, diff8
					));
				}
				self.write_byte_internal(diff8 as u32);
			}

			ImmSize::RipRelSize2_Target16 => {
				eip = (self.current_rip as u32).wrapping_add(2);
				value = self.immediate.wrapping_sub(eip);
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
			}

			ImmSize::RipRelSize2_Target32 => {
				eip = (self.current_rip as u32).wrapping_add(2);
				diff4 = self.immediate.wrapping_sub(eip) as i32;
				if diff4 < i16::MIN as i32 || diff4 > i16::MAX as i32 {
					tmp = self.immediate;
					self.set_error_message(format!(
						"Branch distance is too far away: next_ip: 0x{:08X} target: 0x{:08X}, diff = {}, diff must fit in an i16",
						eip, tmp, diff4
					));
				}
				value = diff4 as u32;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
			}

			ImmSize::RipRelSize2_Target64 => {
				rip = self.current_rip.wrapping_add(2);
				diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
				if diff8 < i16::MIN as i64 || diff8 > i16::MAX as i64 {
					tmp2 = ((self.immediate_hi as u64) << 32) | (self.immediate as u64);
					self.set_error_message(format!(
						"Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i16",
						rip, tmp2, diff8
					));
				}
				value = diff8 as u32;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
			}

			ImmSize::RipRelSize4_Target32 => {
				eip = (self.current_rip as u32).wrapping_add(4);
				value = self.immediate.wrapping_sub(eip);
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				self.write_byte_internal(value >> 16);
				self.write_byte_internal(value >> 24);
			}

			ImmSize::RipRelSize4_Target64 => {
				rip = self.current_rip.wrapping_add(4);
				diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
				if diff8 < i32::MIN as i64 || diff8 > i32::MAX as i64 {
					tmp2 = ((self.immediate_hi as u64) << 32) | (self.immediate as u64);
					self.set_error_message(format!(
						"Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i32",
						rip, tmp2, diff8
					));
				}
				value = diff8 as u32;
				self.write_byte_internal(value);
				self.write_byte_internal(value >> 8);
				self.write_byte_internal(value >> 16);
				self.write_byte_internal(value >> 24);
			}
		}
	}

	/// Writes a byte to the output buffer
	///
	/// # Arguments
	///
	/// `value`: Value to write
	///
	/// # Examples
	///
	/// ```
	/// use iced_x86::*;
	///
	/// let mut encoder = Encoder::new(64);
	/// let instr = Instruction::with_reg_reg(Code::Add_r64_rm64, Register::R8, Register::RBP);
	/// encoder.write_u8(0x90);
	/// match encoder.encode(&instr, 0x5555_5555) {
	///     Ok(len) => assert_eq!(3, len),
	///     Err(err) => panic!("{}", err),
	/// }
	/// encoder.write_u8(0xCC);
	/// // We're done, take ownership of the buffer
	/// let buffer = encoder.take_buffer();
	/// assert_eq!(vec![0x90, 0x4C, 0x03, 0xC5, 0xCC], buffer);
	/// ```
	#[inline]
	pub fn write_u8(&mut self, value: u8) {
		self.write_byte_internal(value as u32);
	}

	#[inline]
	pub(super) fn write_byte_internal(&mut self, value: u32) {
		self.buffer.push(value as u8);
		self.current_rip = self.current_rip.wrapping_add(1);
	}

	#[cfg(all(feature = "encoder", feature = "block_encoder"))]
	#[inline]
	pub(super) fn position(&self) -> usize {
		self.buffer.len()
	}

	/// Returns the buffer and initializes the internal buffer to an empty vector. Should be called when
	/// you've encoded all instructions and need the raw instruction bytes. See also [`set_buffer()`].
	///
	/// [`set_buffer()`]: #method.set_buffer
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn take_buffer(&mut self) -> Vec<u8> {
		mem::replace(&mut self.buffer, Vec::new())
	}

	/// Overwrites the buffer with a new vector. The old buffer is dropped. See also [`take_buffer()`].
	///
	/// [`take_buffer()`]: #method.take_buffer
	#[inline]
	pub fn set_buffer(&mut self, buffer: Vec<u8>) {
		self.buffer = buffer;
	}

	#[cfg(all(feature = "encoder", feature = "block_encoder"))]
	#[inline]
	pub(super) fn clear_buffer(&mut self) {
		self.buffer.clear()
	}

	/// Gets the offsets of the constants (memory displacement and immediate) in the encoded instruction.
	/// The caller can use this information to add relocations if needed.
	#[cfg_attr(has_must_use, must_use)]
	#[cfg_attr(feature = "cargo-clippy", allow(clippy::missing_inline_in_public_items))]
	pub fn get_constant_offsets(&self) -> ConstantOffsets {
		let mut co = ConstantOffsets::default();

		match self.displ_size {
			DisplSize::None => {}

			DisplSize::Size1 => {
				co.displacement_size = 1;
				co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
			}

			DisplSize::Size2 => {
				co.displacement_size = 2;
				co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
			}

			DisplSize::Size4 | DisplSize::RipRelSize4_Target32 | DisplSize::RipRelSize4_Target64 => {
				co.displacement_size = 4;
				co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
			}

			DisplSize::Size8 => {
				co.displacement_size = 8;
				co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
			}
		}

		match self.imm_size {
			ImmSize::None | ImmSize::SizeIbReg | ImmSize::Size1OpCode => {}

			ImmSize::Size1 | ImmSize::RipRelSize1_Target16 | ImmSize::RipRelSize1_Target32 | ImmSize::RipRelSize1_Target64 => {
				co.immediate_size = 1;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
			}

			ImmSize::Size1_1 => {
				co.immediate_size = 1;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
				co.immediate_size2 = 1;
				co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(1) as u8;
			}

			ImmSize::Size2 | ImmSize::RipRelSize2_Target16 | ImmSize::RipRelSize2_Target32 | ImmSize::RipRelSize2_Target64 => {
				co.immediate_size = 2;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
			}

			ImmSize::Size2_1 => {
				co.immediate_size = 2;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
				co.immediate_size2 = 1;
				co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(2) as u8;
			}

			ImmSize::Size2_2 => {
				co.immediate_size = 2;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
				co.immediate_size2 = 2;
				co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(2) as u8;
			}

			ImmSize::Size4 | ImmSize::RipRelSize4_Target32 | ImmSize::RipRelSize4_Target64 => {
				co.immediate_size = 4;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
			}

			ImmSize::Size4_2 => {
				co.immediate_size = 4;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
				co.immediate_size2 = 2;
				co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(4) as u8;
			}

			ImmSize::Size8 => {
				co.immediate_size = 8;
				co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
			}
		}

		co
	}

	/// Disables 2-byte VEX encoding and encodes all VEX instructions with the 3-byte VEX encoding
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn prevent_vex2(&self) -> bool {
		self.prevent_vex2 != 0
	}

	/// Disables 2-byte VEX encoding and encodes all VEX instructions with the 3-byte VEX encoding
	///
	/// # Arguments
	///
	/// * `new_value`: new value
	#[inline]
	pub fn set_prevent_vex2(&mut self, new_value: bool) {
		self.prevent_vex2 = if new_value { u32::MAX } else { 0 };
	}

	/// Value of the `VEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn vex_wig(&self) -> u32 {
		(self.internal_vex_wig_lig >> 7) & 1
	}

	/// Value of the `VEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
	///
	/// # Arguments
	///
	/// * `new_value`: new value (0 or 1)
	#[inline]
	pub fn set_vex_wig(&mut self, new_value: u32) {
		self.internal_vex_wig_lig = (self.internal_vex_wig_lig & !0x80) | ((new_value & 1) << 7);
	}

	/// Value of the `VEX.L` bit to use if it's an instruction that ignores the bit. Default is 0.
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn vex_lig(&self) -> u32 {
		(self.internal_vex_wig_lig >> 2) & 1
	}

	/// Value of the `VEX.L` bit to use if it's an instruction that ignores the bit. Default is 0.
	///
	/// # Arguments
	///
	/// * `new_value`: new value (0 or 1)
	#[inline]
	pub fn set_vex_lig(&mut self, new_value: u32) {
		self.internal_vex_wig_lig = (self.internal_vex_wig_lig & !4) | ((new_value & 1) << 2);
		self.internal_vex_lig = (new_value & 1) << 2;
	}

	/// Value of the `EVEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn evex_wig(&self) -> u32 {
		self.internal_evex_wig >> 7
	}

	/// Value of the `EVEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
	///
	/// # Arguments
	///
	/// * `new_value`: new value (0 or 1)
	#[inline]
	pub fn set_evex_wig(&mut self, new_value: u32) {
		self.internal_evex_wig = (new_value & 1) << 7;
	}

	/// Value of the `EVEX.L'L` bits to use if it's an instruction that ignores the bits. Default is 0.
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn evex_lig(&self) -> u32 {
		self.internal_evex_lig >> 5
	}

	/// Value of the `EVEX.L'L` bits to use if it's an instruction that ignores the bits. Default is 0.
	///
	/// # Arguments
	///
	/// * `new_value`: new value (0 or 3)
	#[inline]
	pub fn set_evex_lig(&mut self, new_value: u32) {
		self.internal_evex_lig = (new_value & 3) << 5
	}

	/// Gets the bitness (16, 32 or 64)
	#[cfg_attr(has_must_use, must_use)]
	#[inline]
	pub fn bitness(&self) -> u32 {
		self.bitness
	}
}
