Skip to content

Commit dd6a403

Browse files
Shahab VahediAlexei Starovoitov
Shahab Vahedi
authored and
Alexei Starovoitov
committed
ARC, bpf: Fix issues reported by the static analyzers
Also updated couple of comments along the way. One of the issues reported was indeed a bug in the code: memset(ctx, 0, sizeof(ctx)) // original line memset(ctx, 0, sizeof(*ctx)) // fixed line That was a nice catch. Reported-by: kernel test robot <[email protected]> Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/ Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/ Signed-off-by: Shahab Vahedi <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 590016a commit dd6a403

File tree

3 files changed

+18
-16
lines changed

3 files changed

+18
-16
lines changed

arch/arc/net/bpf_jit.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939

4040
/************** Functions that the back-end must provide **************/
4141
/* Extension for 32-bit operations. */
42-
inline u8 zext(u8 *buf, u8 rd);
42+
u8 zext(u8 *buf, u8 rd);
4343
/***** Moves *****/
4444
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
4545
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);

arch/arc/net/bpf_jit_arcv2.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ enum {
6262
* If/when we decide to add ARCv2 instructions that do use register pairs,
6363
* the mapping, hopefully, doesn't need to be revisited.
6464
*/
65-
const u8 bpf2arc[][2] = {
65+
static const u8 bpf2arc[][2] = {
6666
/* Return value from in-kernel function, and exit value from eBPF */
6767
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
6868
/* Arguments from eBPF program to in-kernel function */
@@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
13021302

13031303
/************* Packers (Deal with BPF_REGs) **************/
13041304

1305-
inline u8 zext(u8 *buf, u8 rd)
1305+
u8 zext(u8 *buf, u8 rd)
13061306
{
13071307
if (rd != BPF_REG_FP)
13081308
return arc_movi_r(buf, REG_HI(rd), 0);
@@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
22352235
break;
22362236
default:
22372237
/* The caller must have handled this. */
2238+
break;
22382239
}
22392240
} else {
22402241
/*
@@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
22532254
break;
22542255
default:
22552256
/* The caller must have handled this. */
2257+
break;
22562258
}
22572259
}
22582260

@@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
25172519
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
25182520
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
25192521
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
2520-
const struct {
2522+
static const struct {
25212523
/*
25222524
* "jit_off" is common between all "jmp[]" and is coupled with
25232525
* "cond" of each "jmp[]" instance. e.g.:
@@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
28832885
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
28842886
* instruction that precedes the conditional branch.
28852887
*/
2886-
const u8 arcv2_32_jmps[ARC_CC_LAST] = {
2888+
static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
28872889
[ARC_CC_UGT] = CC_great_u,
28882890
[ARC_CC_UGE] = CC_great_eq_u,
28892891
[ARC_CC_ULT] = CC_less_u,

arch/arc/net/bpf_jit_core.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -159,15 +159,15 @@ static void jit_dump(const struct jit_context *ctx)
159159
/* Initialise the context so there's no garbage. */
160160
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
161161
{
162-
memset(ctx, 0, sizeof(ctx));
162+
memset(ctx, 0, sizeof(*ctx));
163163

164164
ctx->orig_prog = prog;
165165

166166
/* If constant blinding was requested but failed, scram. */
167167
ctx->prog = bpf_jit_blind_constants(prog);
168168
if (IS_ERR(ctx->prog))
169169
return PTR_ERR(ctx->prog);
170-
ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
170+
ctx->blinded = (ctx->prog != ctx->orig_prog);
171171

172172
/* If the verifier doesn't zero-extend, then we have to do it. */
173173
ctx->do_zext = !ctx->prog->aux->verifier_zext;
@@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
11821182
}
11831183

11841184
/*
1185-
* All the "handle_*()" functions have been called before by the
1186-
* "jit_prepare()". If there was an error, we would know by now.
1187-
* Therefore, no extra error checking at this point, other than
1188-
* a sanity check at the end that expects the calculated length
1189-
* (jit.len) to be equal to the length of generated instructions
1190-
* (jit.index).
1185+
* jit_compile() is the real compilation phase. jit_prepare() is
1186+
* invoked before jit_compile() as a dry-run to make sure everything
1187+
* will go OK and allocate the necessary memory.
1188+
*
1189+
* In the end, jit_compile() checks if it has produced the same number
1190+
* of instructions as jit_prepare() would.
11911191
*/
11921192
static int jit_compile(struct jit_context *ctx)
11931193
{
@@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
14071407

14081408
/*
14091409
* This function may be invoked twice for the same stream of BPF
1410-
* instructions. The "extra pass" happens, when there are "call"s
1411-
* involved that their addresses are not known during the first
1412-
* invocation.
1410+
* instructions. The "extra pass" happens, when there are
1411+
* (re)locations involved that their addresses are not known
1412+
* during the first run.
14131413
*/
14141414
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
14151415
{

0 commit comments

Comments
 (0)