static sljit_s32 emit_mov_byte()

in pcre/sljit/sljitNativeX86_common.c [874:1017]


static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
	sljit_s32 dst, sljit_sw dstw,
	sljit_s32 src, sljit_sw srcw)
{
	sljit_u8* inst;
	sljit_s32 dst_r;
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
	sljit_s32 work_r;
#endif

#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
	compiler->mode32 = 0;
#endif

	if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM))
		return SLJIT_SUCCESS; /* Empty instruction. */

	if (src & SLJIT_IMM) {
		if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
			return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
#else
			inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
			FAIL_IF(!inst);
			*inst = MOV_rm_i32;
			return SLJIT_SUCCESS;
#endif
		}
		inst = emit_x86_instruction(compiler, 1 | EX86_BYTE_ARG | EX86_NO_REXW, SLJIT_IMM, srcw, dst, dstw);
		FAIL_IF(!inst);
		*inst = MOV_rm8_i8;
		return SLJIT_SUCCESS;
	}

	dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;

	if ((dst & SLJIT_MEM) && FAST_IS_REG(src)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
		if (reg_map[src] >= 4) {
			SLJIT_ASSERT(dst_r == TMP_REG1);
			EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
		} else
			dst_r = src;
#else
		dst_r = src;
#endif
	}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
	else if (FAST_IS_REG(src) && reg_map[src] >= 4) {
		/* src, dst are registers. */
		SLJIT_ASSERT(SLOW_IS_REG(dst));
		if (reg_map[dst] < 4) {
			if (dst != src)
				EMIT_MOV(compiler, dst, 0, src, 0);
			inst = emit_x86_instruction(compiler, 2, dst, 0, dst, 0);
			FAIL_IF(!inst);
			*inst++ = GROUP_0F;
			*inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8;
		}
		else {
			if (dst != src)
				EMIT_MOV(compiler, dst, 0, src, 0);
			if (sign) {
				/* shl reg, 24 */
				inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0);
				FAIL_IF(!inst);
				*inst |= SHL;
				/* sar reg, 24 */
				inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0);
				FAIL_IF(!inst);
				*inst |= SAR;
			}
			else {
				inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 0xff, dst, 0);
				FAIL_IF(!inst);
				*(inst + 1) |= AND;
			}
		}
		return SLJIT_SUCCESS;
	}
#endif
	else {
		/* src can be memory addr or reg_map[src] < 4 on x86_32 architectures. */
		inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
		FAIL_IF(!inst);
		*inst++ = GROUP_0F;
		*inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8;
	}

	if (dst & SLJIT_MEM) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
		if (dst_r == TMP_REG1) {
			/* Find a non-used register, whose reg_map[src] < 4. */
			if ((dst & REG_MASK) == SLJIT_R0) {
				if ((dst & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_R1))
					work_r = SLJIT_R2;
				else
					work_r = SLJIT_R1;
			}
			else {
				if ((dst & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_R0))
					work_r = SLJIT_R0;
				else if ((dst & REG_MASK) == SLJIT_R1)
					work_r = SLJIT_R2;
				else
					work_r = SLJIT_R1;
			}

			if (work_r == SLJIT_R0) {
				ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
			}
			else {
				inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
				FAIL_IF(!inst);
				*inst = XCHG_r_rm;
			}

			inst = emit_x86_instruction(compiler, 1, work_r, 0, dst, dstw);
			FAIL_IF(!inst);
			*inst = MOV_rm8_r8;

			if (work_r == SLJIT_R0) {
				ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
			}
			else {
				inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
				FAIL_IF(!inst);
				*inst = XCHG_r_rm;
			}
		}
		else {
			inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw);
			FAIL_IF(!inst);
			*inst = MOV_rm8_r8;
		}
#else
		inst = emit_x86_instruction(compiler, 1 | EX86_REX | EX86_NO_REXW, dst_r, 0, dst, dstw);
		FAIL_IF(!inst);
		*inst = MOV_rm8_r8;
#endif
	}

	return SLJIT_SUCCESS;
}