diff options
Diffstat (limited to 'kernel/bpf/verifier.c')
| -rw-r--r-- | kernel/bpf/verifier.c | 235 | 
1 files changed, 140 insertions, 95 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ce166a002d16..09d5d972c9ff 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -212,7 +212,7 @@ struct bpf_call_arg_meta {  	int access_size;  	s64 msize_smax_value;  	u64 msize_umax_value; -	int ptr_id; +	int ref_obj_id;  	int func_id;  }; @@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)  	       type == PTR_TO_TCP_SOCK_OR_NULL;  } -static bool type_is_refcounted(enum bpf_reg_type type) -{ -	return type == PTR_TO_SOCKET; -} - -static bool type_is_refcounted_or_null(enum bpf_reg_type type) -{ -	return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL; -} - -static bool reg_is_refcounted(const struct bpf_reg_state *reg) -{ -	return type_is_refcounted(reg->type); -} -  static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)  {  	return reg->type == PTR_TO_MAP_VALUE &&  		map_value_has_spin_lock(reg->map_ptr);  } -static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) +static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)  { -	return type_is_refcounted_or_null(reg->type); +	return type == PTR_TO_SOCKET || +		type == PTR_TO_SOCKET_OR_NULL || +		type == PTR_TO_TCP_SOCK || +		type == PTR_TO_TCP_SOCK_OR_NULL;  } -static bool arg_type_is_refcounted(enum bpf_arg_type type) +static bool arg_type_may_be_refcounted(enum bpf_arg_type type)  { -	return type == ARG_PTR_TO_SOCKET; +	return type == ARG_PTR_TO_SOCK_COMMON;  }  /* Determine whether the function releases some resources allocated by another @@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)  		func_id == BPF_FUNC_sk_lookup_udp;  } +static bool is_ptr_cast_function(enum bpf_func_id func_id) +{ +	return func_id == BPF_FUNC_tcp_sock || +		func_id == BPF_FUNC_sk_fullsock; +} +  /* string representation of 'enum bpf_reg_type' */  static const char * const reg_type_str[] = {  	[NOT_INIT]		= "?", @@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,  				verbose(env, ",call_%d", func(env, reg)->callsite);  		} else {  			verbose(env, "(id=%d", reg->id); +			if (reg_type_may_be_refcounted_or_null(t)) +				verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);  			if (t != SCALAR_VALUE)  				verbose(env, ",off=%d", reg->off);  			if (type_is_pkt_pointer(t)) @@ -1901,8 +1897,9 @@ continue_func:  		}  		frame++;  		if (frame >= MAX_CALL_FRAMES) { -			WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); -			return -EFAULT; +			verbose(env, "the call stack of %d frames is too deep !\n", +				frame); +			return -E2BIG;  		}  		goto process_func;  	} @@ -2414,16 +2411,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,  		/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */  		if (!type_is_sk_pointer(type))  			goto err_type; -	} else if (arg_type == ARG_PTR_TO_SOCKET) { -		expected_type = PTR_TO_SOCKET; -		if (type != expected_type) -			goto err_type; -		if (meta->ptr_id || !reg->id) { -			verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", -				meta->ptr_id, reg->id); -			return -EFAULT; +		if (reg->ref_obj_id) { +			if (meta->ref_obj_id) { +				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", +					regno, reg->ref_obj_id, +					meta->ref_obj_id); +				return -EFAULT; +			} +			meta->ref_obj_id = reg->ref_obj_id;  		} -		meta->ptr_id = reg->id;  	} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {  		if (meta->func_id == BPF_FUNC_spin_lock) {  			if (process_spin_lock(env, regno, true)) @@ -2740,32 +2736,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)  	return true;  } -static bool check_refcount_ok(const struct bpf_func_proto *fn) +static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)  {  	int count = 0; -	if (arg_type_is_refcounted(fn->arg1_type)) +	if (arg_type_may_be_refcounted(fn->arg1_type))  		count++; -	if (arg_type_is_refcounted(fn->arg2_type)) +	if (arg_type_may_be_refcounted(fn->arg2_type))  		count++; -	if (arg_type_is_refcounted(fn->arg3_type)) +	if (arg_type_may_be_refcounted(fn->arg3_type))  		count++; -	if (arg_type_is_refcounted(fn->arg4_type)) +	if (arg_type_may_be_refcounted(fn->arg4_type))  		count++; -	if (arg_type_is_refcounted(fn->arg5_type)) +	if (arg_type_may_be_refcounted(fn->arg5_type))  		count++; +	/* A reference acquiring function cannot acquire +	 * another refcounted ptr. +	 */ +	if (is_acquire_function(func_id) && count) +		return false; +  	/* We only support one arg being unreferenced at the moment,  	 * which is sufficient for the helper functions we have right now.  	 */  	return count <= 1;  } -static int check_func_proto(const struct bpf_func_proto *fn) +static int check_func_proto(const struct bpf_func_proto *fn, int func_id)  {  	return check_raw_mode_ok(fn) &&  	       check_arg_pair_ok(fn) && -	       check_refcount_ok(fn) ? 0 : -EINVAL; +	       check_refcount_ok(fn, func_id) ? 0 : -EINVAL;  }  /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] @@ -2799,19 +2801,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)  }  static void release_reg_references(struct bpf_verifier_env *env, -				   struct bpf_func_state *state, int id) +				   struct bpf_func_state *state, +				   int ref_obj_id)  {  	struct bpf_reg_state *regs = state->regs, *reg;  	int i;  	for (i = 0; i < MAX_BPF_REG; i++) -		if (regs[i].id == id) +		if (regs[i].ref_obj_id == ref_obj_id)  			mark_reg_unknown(env, regs, i);  	bpf_for_each_spilled_reg(i, state, reg) {  		if (!reg)  			continue; -		if (reg_is_refcounted(reg) && reg->id == id) +		if (reg->ref_obj_id == ref_obj_id)  			__mark_reg_unknown(reg);  	}  } @@ -2820,15 +2823,20 @@ static void release_reg_references(struct bpf_verifier_env *env,   * resources. Identify all copies of the same pointer and clear the reference.   */  static int release_reference(struct bpf_verifier_env *env, -			     struct bpf_call_arg_meta *meta) +			     int ref_obj_id)  {  	struct bpf_verifier_state *vstate = env->cur_state; +	int err;  	int i; +	err = release_reference_state(cur_func(env), ref_obj_id); +	if (err) +		return err; +  	for (i = 0; i <= vstate->curframe; i++) -		release_reg_references(env, vstate->frame[i], meta->ptr_id); +		release_reg_references(env, vstate->frame[i], ref_obj_id); -	return release_reference_state(cur_func(env), meta->ptr_id); +	return 0;  }  static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, @@ -3047,7 +3055,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn  	memset(&meta, 0, sizeof(meta));  	meta.pkt_access = fn->pkt_access; -	err = check_func_proto(fn); +	err = check_func_proto(fn, func_id);  	if (err) {  		verbose(env, "kernel subsystem misconfigured func %s#%d\n",  			func_id_name(func_id), func_id); @@ -3093,7 +3101,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn  			return err;  		}  	} else if (is_release_function(func_id)) { -		err = release_reference(env, &meta); +		err = release_reference(env, meta.ref_obj_id);  		if (err) {  			verbose(env, "func %s#%d reference has not been acquired before\n",  				func_id_name(func_id), func_id); @@ -3154,8 +3162,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn  			if (id < 0)  				return id; -			/* For release_reference() */ +			/* For mark_ptr_or_null_reg() */  			regs[BPF_REG_0].id = id; +			/* For release_reference() */ +			regs[BPF_REG_0].ref_obj_id = id;  		} else {  			/* For mark_ptr_or_null_reg() */  			regs[BPF_REG_0].id = ++env->id_gen; @@ -3170,6 +3180,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn  		return -EINVAL;  	} +	if (is_ptr_cast_function(func_id)) +		/* For release_reference() */ +		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; +  	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);  	err = check_map_func_compatibility(env, meta.map_ptr, func_id); @@ -3368,7 +3382,7 @@ do_sim:  		*dst_reg = *ptr_reg;  	}  	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); -	if (!ptr_is_dst_reg) +	if (!ptr_is_dst_reg && ret)  		*dst_reg = tmp;  	return !ret ? -EFAULT : 0;  } @@ -4124,15 +4138,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)  	return 0;  } +static void __find_good_pkt_pointers(struct bpf_func_state *state, +				     struct bpf_reg_state *dst_reg, +				     enum bpf_reg_type type, u16 new_range) +{ +	struct bpf_reg_state *reg; +	int i; + +	for (i = 0; i < MAX_BPF_REG; i++) { +		reg = &state->regs[i]; +		if (reg->type == type && reg->id == dst_reg->id) +			/* keep the maximum range already checked */ +			reg->range = max(reg->range, new_range); +	} + +	bpf_for_each_spilled_reg(i, state, reg) { +		if (!reg) +			continue; +		if (reg->type == type && reg->id == dst_reg->id) +			reg->range = max(reg->range, new_range); +	} +} +  static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,  				   struct bpf_reg_state *dst_reg,  				   enum bpf_reg_type type,  				   bool range_right_open)  { -	struct bpf_func_state *state = vstate->frame[vstate->curframe]; -	struct bpf_reg_state *regs = state->regs, *reg;  	u16 new_range; -	int i, j; +	int i;  	if (dst_reg->off < 0 ||  	    (dst_reg->off == 0 && range_right_open)) @@ -4197,20 +4231,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,  	 * the range won't allow anything.  	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.  	 */ -	for (i = 0; i < MAX_BPF_REG; i++) -		if (regs[i].type == type && regs[i].id == dst_reg->id) -			/* keep the maximum range already checked */ -			regs[i].range = max(regs[i].range, new_range); - -	for (j = 0; j <= vstate->curframe; j++) { -		state = vstate->frame[j]; -		bpf_for_each_spilled_reg(i, state, reg) { -			if (!reg) -				continue; -			if (reg->type == type && reg->id == dst_reg->id) -				reg->range = max(reg->range, new_range); -		} -	} +	for (i = 0; i <= vstate->curframe; i++) +		__find_good_pkt_pointers(vstate->frame[i], dst_reg, type, +					 new_range);  }  /* compute branch direction of the expression "if (reg opcode val) goto target;" @@ -4665,17 +4688,41 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,  		} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {  			reg->type = PTR_TO_TCP_SOCK;  		} -		if (is_null || !(reg_is_refcounted(reg) || -				 reg_may_point_to_spin_lock(reg))) { -			/* We don't need id from this point onwards anymore, -			 * thus we should better reset it, so that state -			 * pruning has chances to take effect. +		if (is_null) { +			/* We don't need id and ref_obj_id from this point +			 * onwards anymore, thus we should better reset it, +			 * so that state pruning has chances to take effect. +			 */ +			reg->id = 0; +			reg->ref_obj_id = 0; +		} else if (!reg_may_point_to_spin_lock(reg)) { +			/* For not-NULL ptr, reg->ref_obj_id will be reset +			 * in release_reg_references(). +			 * +			 * reg->id is still used by spin_lock ptr. Other +			 * than spin_lock ptr type, reg->id can be reset.  			 */  			reg->id = 0;  		}  	}  } +static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, +				    bool is_null) +{ +	struct bpf_reg_state *reg; +	int i; + +	for (i = 0; i < MAX_BPF_REG; i++) +		mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); + +	bpf_for_each_spilled_reg(i, state, reg) { +		if (!reg) +			continue; +		mark_ptr_or_null_reg(state, reg, id, is_null); +	} +} +  /* The logic is similar to find_good_pkt_pointers(), both could eventually   * be folded together at some point.   */ @@ -4683,24 +4730,20 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,  				  bool is_null)  {  	struct bpf_func_state *state = vstate->frame[vstate->curframe]; -	struct bpf_reg_state *reg, *regs = state->regs; +	struct bpf_reg_state *regs = state->regs; +	u32 ref_obj_id = regs[regno].ref_obj_id;  	u32 id = regs[regno].id; -	int i, j; - -	if (reg_is_refcounted_or_null(®s[regno]) && is_null) -		release_reference_state(state, id); +	int i; -	for (i = 0; i < MAX_BPF_REG; i++) -		mark_ptr_or_null_reg(state, ®s[i], id, is_null); +	if (ref_obj_id && ref_obj_id == id && is_null) +		/* regs[regno] is in the " == NULL" branch. +		 * No one could have freed the reference state before +		 * doing the NULL check. +		 */ +		WARN_ON_ONCE(release_reference_state(state, id)); -	for (j = 0; j <= vstate->curframe; j++) { -		state = vstate->frame[j]; -		bpf_for_each_spilled_reg(i, state, reg) { -			if (!reg) -				continue; -			mark_ptr_or_null_reg(state, reg, id, is_null); -		} -	} +	for (i = 0; i <= vstate->curframe; i++) +		__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);  }  static bool try_match_pkt_pointers(const struct bpf_insn *insn, @@ -6052,15 +6095,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,  	}  	/* Propagate read liveness of registers... */  	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); -	/* We don't need to worry about FP liveness because it's read-only */ -	for (i = 0; i < BPF_REG_FP; i++) { -		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) -			continue; -		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { -			err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], -					    &vparent->frame[vstate->curframe]->regs[i]); -			if (err) -				return err; +	for (frame = 0; frame <= vstate->curframe; frame++) { +		/* We don't need to worry about FP liveness, it's read-only */ +		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { +			if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ) +				continue; +			if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) { +				err = mark_reg_read(env, &vstate->frame[frame]->regs[i], +						    &vparent->frame[frame]->regs[i]); +				if (err) +					return err; +			}  		}  	}  | 
