static int build_insn()

in net/bpf_jit_comp_64.c [895:1436]


static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
	const u8 code = insn->code;
	const u8 dst = bpf2sparc[insn->dst_reg];
	const u8 src = bpf2sparc[insn->src_reg];
	const int i = insn - ctx->prog->insnsi;
	const s16 off = insn->off;
	const s32 imm = insn->imm;

	if (insn->src_reg == BPF_REG_FP)
		ctx->saw_frame_pointer = true;

	switch (code) {
	/* dst = src */
	case BPF_ALU | BPF_MOV | BPF_X:
		emit_alu3_K(SRL, src, 0, dst, ctx);
		if (insn_is_zext(&insn[1]))
			return 1;
		break;
	case BPF_ALU64 | BPF_MOV | BPF_X:
		emit_reg_move(src, dst, ctx);
		break;
	/* dst = dst OP src */
	case BPF_ALU | BPF_ADD | BPF_X:
	case BPF_ALU64 | BPF_ADD | BPF_X:
		emit_alu(ADD, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_SUB | BPF_X:
	case BPF_ALU64 | BPF_SUB | BPF_X:
		emit_alu(SUB, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_AND | BPF_X:
	case BPF_ALU64 | BPF_AND | BPF_X:
		emit_alu(AND, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_OR | BPF_X:
	case BPF_ALU64 | BPF_OR | BPF_X:
		emit_alu(OR, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_XOR | BPF_X:
	case BPF_ALU64 | BPF_XOR | BPF_X:
		emit_alu(XOR, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_MUL | BPF_X:
		emit_alu(MUL, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_MUL | BPF_X:
		emit_alu(MULX, src, dst, ctx);
		break;
	case BPF_ALU | BPF_DIV | BPF_X:
		emit_write_y(G0, ctx);
		emit_alu(DIV, src, dst, ctx);
		if (insn_is_zext(&insn[1]))
			return 1;
		break;
	case BPF_ALU64 | BPF_DIV | BPF_X:
		emit_alu(UDIVX, src, dst, ctx);
		break;
	case BPF_ALU | BPF_MOD | BPF_X: {
		const u8 tmp = bpf2sparc[TMP_REG_1];

		ctx->tmp_1_used = true;

		emit_write_y(G0, ctx);
		emit_alu3(DIV, dst, src, tmp, ctx);
		emit_alu3(MULX, tmp, src, tmp, ctx);
		emit_alu3(SUB, dst, tmp, dst, ctx);
		goto do_alu32_trunc;
	}
	case BPF_ALU64 | BPF_MOD | BPF_X: {
		const u8 tmp = bpf2sparc[TMP_REG_1];

		ctx->tmp_1_used = true;

		emit_alu3(UDIVX, dst, src, tmp, ctx);
		emit_alu3(MULX, tmp, src, tmp, ctx);
		emit_alu3(SUB, dst, tmp, dst, ctx);
		break;
	}
	case BPF_ALU | BPF_LSH | BPF_X:
		emit_alu(SLL, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_LSH | BPF_X:
		emit_alu(SLLX, src, dst, ctx);
		break;
	case BPF_ALU | BPF_RSH | BPF_X:
		emit_alu(SRL, src, dst, ctx);
		if (insn_is_zext(&insn[1]))
			return 1;
		break;
	case BPF_ALU64 | BPF_RSH | BPF_X:
		emit_alu(SRLX, src, dst, ctx);
		break;
	case BPF_ALU | BPF_ARSH | BPF_X:
		emit_alu(SRA, src, dst, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_ARSH | BPF_X:
		emit_alu(SRAX, src, dst, ctx);
		break;

	/* dst = -dst */
	case BPF_ALU | BPF_NEG:
	case BPF_ALU64 | BPF_NEG:
		emit(SUB | RS1(0) | RS2(dst) | RD(dst), ctx);
		goto do_alu32_trunc;

	case BPF_ALU | BPF_END | BPF_FROM_BE:
		switch (imm) {
		case 16:
			emit_alu_K(SLL, dst, 16, ctx);
			emit_alu_K(SRL, dst, 16, ctx);
			if (insn_is_zext(&insn[1]))
				return 1;
			break;
		case 32:
			if (!ctx->prog->aux->verifier_zext)
				emit_alu_K(SRL, dst, 0, ctx);
			break;
		case 64:
			/* nop */
			break;

		}
		break;

	/* dst = BSWAP##imm(dst) */
	case BPF_ALU | BPF_END | BPF_FROM_LE: {
		const u8 tmp = bpf2sparc[TMP_REG_1];
		const u8 tmp2 = bpf2sparc[TMP_REG_2];

		ctx->tmp_1_used = true;
		switch (imm) {
		case 16:
			emit_alu3_K(AND, dst, 0xff, tmp, ctx);
			emit_alu3_K(SRL, dst, 8, dst, ctx);
			emit_alu3_K(AND, dst, 0xff, dst, ctx);
			emit_alu3_K(SLL, tmp, 8, tmp, ctx);
			emit_alu(OR, tmp, dst, ctx);
			if (insn_is_zext(&insn[1]))
				return 1;
			break;

		case 32:
			ctx->tmp_2_used = true;
			emit_alu3_K(SRL, dst, 24, tmp, ctx);	/* tmp  = dst >> 24 */
			emit_alu3_K(SRL, dst, 16, tmp2, ctx);	/* tmp2 = dst >> 16 */
			emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */
			emit_alu3_K(SLL, tmp2, 8, tmp2, ctx);	/* tmp2 = tmp2 << 8 */
			emit_alu(OR, tmp2, tmp, ctx);		/* tmp  = tmp | tmp2 */
			emit_alu3_K(SRL, dst, 8, tmp2, ctx);	/* tmp2 = dst >> 8 */
			emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */
			emit_alu3_K(SLL, tmp2, 16, tmp2, ctx);	/* tmp2 = tmp2 << 16 */
			emit_alu(OR, tmp2, tmp, ctx);		/* tmp  = tmp | tmp2 */
			emit_alu3_K(AND, dst, 0xff, dst, ctx);	/* dst	= dst & 0xff */
			emit_alu3_K(SLL, dst, 24, dst, ctx);	/* dst  = dst << 24 */
			emit_alu(OR, tmp, dst, ctx);		/* dst  = dst | tmp */
			if (insn_is_zext(&insn[1]))
				return 1;
			break;

		case 64:
			emit_alu3_K(ADD, SP, STACK_BIAS + 128, tmp, ctx);
			emit(ST64 | RS1(tmp) | RS2(G0) | RD(dst), ctx);
			emit(LD64A | ASI(ASI_PL) | RS1(tmp) | RS2(G0) | RD(dst), ctx);
			break;
		}
		break;
	}
	/* dst = imm */
	case BPF_ALU | BPF_MOV | BPF_K:
		emit_loadimm32(imm, dst, ctx);
		if (insn_is_zext(&insn[1]))
			return 1;
		break;
	case BPF_ALU64 | BPF_MOV | BPF_K:
		emit_loadimm_sext(imm, dst, ctx);
		break;
	/* dst = dst OP imm */
	case BPF_ALU | BPF_ADD | BPF_K:
	case BPF_ALU64 | BPF_ADD | BPF_K:
		emit_alu_K(ADD, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_SUB | BPF_K:
	case BPF_ALU64 | BPF_SUB | BPF_K:
		emit_alu_K(SUB, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_AND | BPF_K:
	case BPF_ALU64 | BPF_AND | BPF_K:
		emit_alu_K(AND, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_OR | BPF_K:
	case BPF_ALU64 | BPF_OR | BPF_K:
		emit_alu_K(OR, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_XOR | BPF_K:
	case BPF_ALU64 | BPF_XOR | BPF_K:
		emit_alu_K(XOR, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU | BPF_MUL | BPF_K:
		emit_alu_K(MUL, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_MUL | BPF_K:
		emit_alu_K(MULX, dst, imm, ctx);
		break;
	case BPF_ALU | BPF_DIV | BPF_K:
		if (imm == 0)
			return -EINVAL;

		emit_write_y(G0, ctx);
		emit_alu_K(DIV, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_DIV | BPF_K:
		if (imm == 0)
			return -EINVAL;

		emit_alu_K(UDIVX, dst, imm, ctx);
		break;
	case BPF_ALU64 | BPF_MOD | BPF_K:
	case BPF_ALU | BPF_MOD | BPF_K: {
		const u8 tmp = bpf2sparc[TMP_REG_2];
		unsigned int div;

		if (imm == 0)
			return -EINVAL;

		div = (BPF_CLASS(code) == BPF_ALU64) ? UDIVX : DIV;

		ctx->tmp_2_used = true;

		if (BPF_CLASS(code) != BPF_ALU64)
			emit_write_y(G0, ctx);
		if (is_simm13(imm)) {
			emit(div | IMMED | RS1(dst) | S13(imm) | RD(tmp), ctx);
			emit(MULX | IMMED | RS1(tmp) | S13(imm) | RD(tmp), ctx);
			emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx);
		} else {
			const u8 tmp1 = bpf2sparc[TMP_REG_1];

			ctx->tmp_1_used = true;

			emit_set_const_sext(imm, tmp1, ctx);
			emit(div | RS1(dst) | RS2(tmp1) | RD(tmp), ctx);
			emit(MULX | RS1(tmp) | RS2(tmp1) | RD(tmp), ctx);
			emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx);
		}
		goto do_alu32_trunc;
	}
	case BPF_ALU | BPF_LSH | BPF_K:
		emit_alu_K(SLL, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_LSH | BPF_K:
		emit_alu_K(SLLX, dst, imm, ctx);
		break;
	case BPF_ALU | BPF_RSH | BPF_K:
		emit_alu_K(SRL, dst, imm, ctx);
		if (insn_is_zext(&insn[1]))
			return 1;
		break;
	case BPF_ALU64 | BPF_RSH | BPF_K:
		emit_alu_K(SRLX, dst, imm, ctx);
		break;
	case BPF_ALU | BPF_ARSH | BPF_K:
		emit_alu_K(SRA, dst, imm, ctx);
		goto do_alu32_trunc;
	case BPF_ALU64 | BPF_ARSH | BPF_K:
		emit_alu_K(SRAX, dst, imm, ctx);
		break;

	do_alu32_trunc:
		if (BPF_CLASS(code) == BPF_ALU &&
		    !ctx->prog->aux->verifier_zext)
			emit_alu_K(SRL, dst, 0, ctx);
		break;

	/* JUMP off */
	case BPF_JMP | BPF_JA:
		emit_branch(BA, ctx->idx, ctx->offset[i + off], ctx);
		emit_nop(ctx);
		break;
	/* IF (dst COND src) JUMP off */
	case BPF_JMP | BPF_JEQ | BPF_X:
	case BPF_JMP | BPF_JGT | BPF_X:
	case BPF_JMP | BPF_JLT | BPF_X:
	case BPF_JMP | BPF_JGE | BPF_X:
	case BPF_JMP | BPF_JLE | BPF_X:
	case BPF_JMP | BPF_JNE | BPF_X:
	case BPF_JMP | BPF_JSGT | BPF_X:
	case BPF_JMP | BPF_JSLT | BPF_X:
	case BPF_JMP | BPF_JSGE | BPF_X:
	case BPF_JMP | BPF_JSLE | BPF_X:
	case BPF_JMP | BPF_JSET | BPF_X: {
		int err;

		err = emit_compare_and_branch(code, dst, src, 0, false, i + off, ctx);
		if (err)
			return err;
		break;
	}
	/* IF (dst COND imm) JUMP off */
	case BPF_JMP | BPF_JEQ | BPF_K:
	case BPF_JMP | BPF_JGT | BPF_K:
	case BPF_JMP | BPF_JLT | BPF_K:
	case BPF_JMP | BPF_JGE | BPF_K:
	case BPF_JMP | BPF_JLE | BPF_K:
	case BPF_JMP | BPF_JNE | BPF_K:
	case BPF_JMP | BPF_JSGT | BPF_K:
	case BPF_JMP | BPF_JSLT | BPF_K:
	case BPF_JMP | BPF_JSGE | BPF_K:
	case BPF_JMP | BPF_JSLE | BPF_K:
	case BPF_JMP | BPF_JSET | BPF_K: {
		int err;

		err = emit_compare_and_branch(code, dst, 0, imm, true, i + off, ctx);
		if (err)
			return err;
		break;
	}

	/* function call */
	case BPF_JMP | BPF_CALL:
	{
		u8 *func = ((u8 *)__bpf_call_base) + imm;

		ctx->saw_call = true;

		emit_call((u32 *)func, ctx);
		emit_nop(ctx);

		emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
		break;
	}

	/* tail call */
	case BPF_JMP | BPF_TAIL_CALL:
		emit_tail_call(ctx);
		break;

	/* function return */
	case BPF_JMP | BPF_EXIT:
		/* Optimization: when last instruction is EXIT,
		   simply fallthrough to epilogue. */
		if (i == ctx->prog->len - 1)
			break;
		emit_branch(BA, ctx->idx, ctx->epilogue_offset, ctx);
		emit_nop(ctx);
		break;

	/* dst = imm64 */
	case BPF_LD | BPF_IMM | BPF_DW:
	{
		const struct bpf_insn insn1 = insn[1];
		u64 imm64;

		imm64 = (u64)insn1.imm << 32 | (u32)imm;
		emit_loadimm64(imm64, dst, ctx);

		return 1;
	}

	/* LDX: dst = *(size *)(src + off) */
	case BPF_LDX | BPF_MEM | BPF_W:
	case BPF_LDX | BPF_MEM | BPF_H:
	case BPF_LDX | BPF_MEM | BPF_B:
	case BPF_LDX | BPF_MEM | BPF_DW: {
		const u8 tmp = bpf2sparc[TMP_REG_1];
		u32 opcode = 0, rs2;

		ctx->tmp_1_used = true;
		switch (BPF_SIZE(code)) {
		case BPF_W:
			opcode = LD32;
			break;
		case BPF_H:
			opcode = LD16;
			break;
		case BPF_B:
			opcode = LD8;
			break;
		case BPF_DW:
			opcode = LD64;
			break;
		}

		if (is_simm13(off)) {
			opcode |= IMMED;
			rs2 = S13(off);
		} else {
			emit_loadimm(off, tmp, ctx);
			rs2 = RS2(tmp);
		}
		emit(opcode | RS1(src) | rs2 | RD(dst), ctx);
		if (opcode != LD64 && insn_is_zext(&insn[1]))
			return 1;
		break;
	}
	/* speculation barrier */
	case BPF_ST | BPF_NOSPEC:
		break;
	/* ST: *(size *)(dst + off) = imm */
	case BPF_ST | BPF_MEM | BPF_W:
	case BPF_ST | BPF_MEM | BPF_H:
	case BPF_ST | BPF_MEM | BPF_B:
	case BPF_ST | BPF_MEM | BPF_DW: {
		const u8 tmp = bpf2sparc[TMP_REG_1];
		const u8 tmp2 = bpf2sparc[TMP_REG_2];
		u32 opcode = 0, rs2;

		if (insn->dst_reg == BPF_REG_FP)
			ctx->saw_frame_pointer = true;

		ctx->tmp_2_used = true;
		emit_loadimm(imm, tmp2, ctx);

		switch (BPF_SIZE(code)) {
		case BPF_W:
			opcode = ST32;
			break;
		case BPF_H:
			opcode = ST16;
			break;
		case BPF_B:
			opcode = ST8;
			break;
		case BPF_DW:
			opcode = ST64;
			break;
		}

		if (is_simm13(off)) {
			opcode |= IMMED;
			rs2 = S13(off);
		} else {
			ctx->tmp_1_used = true;
			emit_loadimm(off, tmp, ctx);
			rs2 = RS2(tmp);
		}
		emit(opcode | RS1(dst) | rs2 | RD(tmp2), ctx);
		break;
	}

	/* STX: *(size *)(dst + off) = src */
	case BPF_STX | BPF_MEM | BPF_W:
	case BPF_STX | BPF_MEM | BPF_H:
	case BPF_STX | BPF_MEM | BPF_B:
	case BPF_STX | BPF_MEM | BPF_DW: {
		const u8 tmp = bpf2sparc[TMP_REG_1];
		u32 opcode = 0, rs2;

		if (insn->dst_reg == BPF_REG_FP)
			ctx->saw_frame_pointer = true;

		switch (BPF_SIZE(code)) {
		case BPF_W:
			opcode = ST32;
			break;
		case BPF_H:
			opcode = ST16;
			break;
		case BPF_B:
			opcode = ST8;
			break;
		case BPF_DW:
			opcode = ST64;
			break;
		}
		if (is_simm13(off)) {
			opcode |= IMMED;
			rs2 = S13(off);
		} else {
			ctx->tmp_1_used = true;
			emit_loadimm(off, tmp, ctx);
			rs2 = RS2(tmp);
		}
		emit(opcode | RS1(dst) | rs2 | RD(src), ctx);
		break;
	}

	case BPF_STX | BPF_ATOMIC | BPF_W: {
		const u8 tmp = bpf2sparc[TMP_REG_1];
		const u8 tmp2 = bpf2sparc[TMP_REG_2];
		const u8 tmp3 = bpf2sparc[TMP_REG_3];

		if (insn->imm != BPF_ADD) {
			pr_err_once("unknown atomic op %02x\n", insn->imm);
			return -EINVAL;
		}

		/* lock *(u32 *)(dst + off) += src */

		if (insn->dst_reg == BPF_REG_FP)
			ctx->saw_frame_pointer = true;

		ctx->tmp_1_used = true;
		ctx->tmp_2_used = true;
		ctx->tmp_3_used = true;
		emit_loadimm(off, tmp, ctx);
		emit_alu3(ADD, dst, tmp, tmp, ctx);

		emit(LD32 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx);
		emit_alu3(ADD, tmp2, src, tmp3, ctx);
		emit(CAS | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx);
		emit_cmp(tmp2, tmp3, ctx);
		emit_branch(BNE, 4, 0, ctx);
		emit_nop(ctx);
		break;
	}
	/* STX XADD: lock *(u64 *)(dst + off) += src */
	case BPF_STX | BPF_ATOMIC | BPF_DW: {
		const u8 tmp = bpf2sparc[TMP_REG_1];
		const u8 tmp2 = bpf2sparc[TMP_REG_2];
		const u8 tmp3 = bpf2sparc[TMP_REG_3];

		if (insn->imm != BPF_ADD) {
			pr_err_once("unknown atomic op %02x\n", insn->imm);
			return -EINVAL;
		}

		if (insn->dst_reg == BPF_REG_FP)
			ctx->saw_frame_pointer = true;

		ctx->tmp_1_used = true;
		ctx->tmp_2_used = true;
		ctx->tmp_3_used = true;
		emit_loadimm(off, tmp, ctx);
		emit_alu3(ADD, dst, tmp, tmp, ctx);

		emit(LD64 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx);
		emit_alu3(ADD, tmp2, src, tmp3, ctx);
		emit(CASX | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx);
		emit_cmp(tmp2, tmp3, ctx);
		emit_branch(BNE, 4, 0, ctx);
		emit_nop(ctx);
		break;
	}

	default:
		pr_err_once("unknown opcode %02x\n", code);
		return -EINVAL;
	}

	return 0;
}