summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-06-01 22:17:21 -0700
committerDavid S. Miller <davem@davemloft.net>2014-06-01 22:17:21 -0700
commitc532cea97ae4b936cc634fa5e3ca04e57b106235 (patch)
tree3468a865cc28d7c6bf743879a72751c9adfb6e7e /include/linux
parent019ee792d7861fde33e6c2331a8b32a2b975e865 (diff)
parentf8f6d679aaa78b989d9aee8d2935066fbdca2a30 (diff)
Merge branch 'filter-next'
Daniel Borkmann says: ==================== BPF + test suite updates These are the last bigger BPF changes that I had in my todo queue for now. As the first two patches from this series contain additional test cases for the test suite, I have rebased them on top of current net-next with the set from [1] applied to avoid introducing any unnecessary merge conflicts. For details, please refer to the individual patches. Test suite runs fine with the set applied. [1] http://patchwork.ozlabs.org/patch/352599/ http://patchwork.ozlabs.org/patch/352600/ ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/filter.h363
1 files changed, 243 insertions, 120 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 625f4de9bdf2..f0c2ad43b4af 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -76,56 +76,211 @@ enum {
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
-/* bpf_add|sub|...: a += x, bpf_mov: a = x */
-#define BPF_ALU64_REG(op, a, x) \
- ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0})
-#define BPF_ALU32_REG(op, a, x) \
- ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0})
-
-/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */
-#define BPF_ALU64_IMM(op, a, imm) \
- ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm})
-#define BPF_ALU32_IMM(op, a, imm) \
- ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm})
-
-/* R0 = *(uint *) (skb->data + off) */
-#define BPF_LD_ABS(size, off) \
- ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off})
-
-/* R0 = *(uint *) (skb->data + x + off) */
-#define BPF_LD_IND(size, x, off) \
- ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off})
-
-/* a = *(uint *) (x + off) */
-#define BPF_LDX_MEM(sz, a, x, off) \
- ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0})
-
-/* if (a 'op' x) goto pc+off */
-#define BPF_JMP_REG(op, a, x, off) \
- ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0})
-
-/* if (a 'op' imm) goto pc+off */
-#define BPF_JMP_IMM(op, a, imm, off) \
- ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm})
-
-#define BPF_EXIT_INSN() \
- ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0})
-
-static inline int size_to_bpf(int size)
-{
- switch (size) {
- case 1:
- return BPF_B;
- case 2:
- return BPF_H;
- case 4:
- return BPF_W;
- case 8:
- return BPF_DW;
- default:
- return -EINVAL;
- }
-}
+/* Helper macros for filter block array initializers. */
+
+/* ALU ops on registers, bpf_add|sub|...: A += X */
+
+#define BPF_ALU64_REG(OP, A, X) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_ALU32_REG(OP, A, X) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = 0 })
+
+/* ALU ops on immediates, bpf_add|sub|...: A += IMM */
+
+#define BPF_ALU64_IMM(OP, A, IMM) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
+ .a_reg = A, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_ALU32_IMM(OP, A, IMM) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
+ .a_reg = A, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
+
+#define BPF_ENDIAN(TYPE, A, LEN) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
+ .a_reg = A, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
+/* Short form of mov, A = X */
+
+#define BPF_MOV64_REG(A, X) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_MOV32_REG(A, X) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = 0 })
+
+/* Short form of mov, A = IMM */
+
+#define BPF_MOV64_IMM(A, IMM) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_K, \
+ .a_reg = A, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_MOV32_IMM(A, IMM) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU | BPF_MOV | BPF_K, \
+ .a_reg = A, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */
+
+#define BPF_MOV64_RAW(TYPE, A, X, IMM) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_MOV32_RAW(TYPE, A, X, IMM) \
+ ((struct sock_filter_int) { \
+ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Direct packet access, R0 = *(uint *) (skb->data + OFF) */
+
+#define BPF_LD_ABS(SIZE, OFF) \
+ ((struct sock_filter_int) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
+ .a_reg = 0, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = OFF })
+
+/* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */
+
+#define BPF_LD_IND(SIZE, X, OFF) \
+ ((struct sock_filter_int) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
+ .a_reg = 0, \
+ .x_reg = X, \
+ .off = 0, \
+ .imm = OFF })
+
+/* Memory store, A = *(uint *) (X + OFF), and vice versa */
+
+#define BPF_LDX_MEM(SIZE, A, X, OFF) \
+ ((struct sock_filter_int) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_STX_MEM(SIZE, A, X, OFF) \
+ ((struct sock_filter_int) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */
+
+#define BPF_JMP_REG(OP, A, X, OFF) \
+ ((struct sock_filter_int) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */
+
+#define BPF_JMP_IMM(OP, A, IMM, OFF) \
+ ((struct sock_filter_int) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
+ .a_reg = A, \
+ .x_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Function call */
+
+#define BPF_EMIT_CALL(FUNC) \
+ ((struct sock_filter_int) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .a_reg = 0, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = ((FUNC) - __bpf_call_base) })
+
+/* Raw code statement block */
+
+#define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \
+ ((struct sock_filter_int) { \
+ .code = CODE, \
+ .a_reg = A, \
+ .x_reg = X, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Program exit */
+
+#define BPF_EXIT_INSN() \
+ ((struct sock_filter_int) { \
+ .code = BPF_JMP | BPF_EXIT, \
+ .a_reg = 0, \
+ .x_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
+#define bytes_to_bpf_size(bytes) \
+({ \
+ int bpf_size = -EINVAL; \
+ \
+ if (bytes == sizeof(u8)) \
+ bpf_size = BPF_B; \
+ else if (bytes == sizeof(u16)) \
+ bpf_size = BPF_H; \
+ else if (bytes == sizeof(u32)) \
+ bpf_size = BPF_W; \
+ else if (bytes == sizeof(u64)) \
+ bpf_size = BPF_DW; \
+ \
+ bpf_size; \
+})
/* Macro to invoke filter function. */
#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
@@ -197,7 +352,6 @@ int sk_detach_filter(struct sock *sk);
int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
-void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
@@ -205,6 +359,41 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct sk_filter *fp);
+#define BPF_ANC BIT(15)
+
+static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
+{
+ BUG_ON(ftest->code & BPF_ANC);
+
+ switch (ftest->code) {
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
+#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
+ return BPF_ANC | SKF_AD_##CODE
+ switch (ftest->k) {
+ BPF_ANCILLARY(PROTOCOL);
+ BPF_ANCILLARY(PKTTYPE);
+ BPF_ANCILLARY(IFINDEX);
+ BPF_ANCILLARY(NLATTR);
+ BPF_ANCILLARY(NLATTR_NEST);
+ BPF_ANCILLARY(MARK);
+ BPF_ANCILLARY(QUEUE);
+ BPF_ANCILLARY(HATYPE);
+ BPF_ANCILLARY(RXHASH);
+ BPF_ANCILLARY(CPU);
+ BPF_ANCILLARY(ALU_XOR_X);
+ BPF_ANCILLARY(VLAN_TAG);
+ BPF_ANCILLARY(VLAN_TAG_PRESENT);
+ BPF_ANCILLARY(PAY_OFFSET);
+ BPF_ANCILLARY(RANDOM);
+ }
+ /* Fallthrough. */
+ default:
+ return ftest->code;
+ }
+}
+
#ifdef CONFIG_BPF_JIT
#include <stdarg.h>
#include <linux/linkage.h>
@@ -224,86 +413,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
}
#else
#include <linux/slab.h>
+
static inline void bpf_jit_compile(struct sk_filter *fp)
{
}
+
static inline void bpf_jit_free(struct sk_filter *fp)
{
kfree(fp);
}
-#endif
+#endif /* CONFIG_BPF_JIT */
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
}
-enum {
- BPF_S_RET_K = 1,
- BPF_S_RET_A,
- BPF_S_ALU_ADD_K,
- BPF_S_ALU_ADD_X,
- BPF_S_ALU_SUB_K,
- BPF_S_ALU_SUB_X,
- BPF_S_ALU_MUL_K,
- BPF_S_ALU_MUL_X,
- BPF_S_ALU_DIV_X,
- BPF_S_ALU_MOD_K,
- BPF_S_ALU_MOD_X,
- BPF_S_ALU_AND_K,
- BPF_S_ALU_AND_X,
- BPF_S_ALU_OR_K,
- BPF_S_ALU_OR_X,
- BPF_S_ALU_XOR_K,
- BPF_S_ALU_XOR_X,
- BPF_S_ALU_LSH_K,
- BPF_S_ALU_LSH_X,
- BPF_S_ALU_RSH_K,
- BPF_S_ALU_RSH_X,
- BPF_S_ALU_NEG,
- BPF_S_LD_W_ABS,
- BPF_S_LD_H_ABS,
- BPF_S_LD_B_ABS,
- BPF_S_LD_W_LEN,
- BPF_S_LD_W_IND,
- BPF_S_LD_H_IND,
- BPF_S_LD_B_IND,
- BPF_S_LD_IMM,
- BPF_S_LDX_W_LEN,
- BPF_S_LDX_B_MSH,
- BPF_S_LDX_IMM,
- BPF_S_MISC_TAX,
- BPF_S_MISC_TXA,
- BPF_S_ALU_DIV_K,
- BPF_S_LD_MEM,
- BPF_S_LDX_MEM,
- BPF_S_ST,
- BPF_S_STX,
- BPF_S_JMP_JA,
- BPF_S_JMP_JEQ_K,
- BPF_S_JMP_JEQ_X,
- BPF_S_JMP_JGE_K,
- BPF_S_JMP_JGE_X,
- BPF_S_JMP_JGT_K,
- BPF_S_JMP_JGT_X,
- BPF_S_JMP_JSET_K,
- BPF_S_JMP_JSET_X,
- /* Ancillary data */
- BPF_S_ANC_PROTOCOL,
- BPF_S_ANC_PKTTYPE,
- BPF_S_ANC_IFINDEX,
- BPF_S_ANC_NLATTR,
- BPF_S_ANC_NLATTR_NEST,
- BPF_S_ANC_MARK,
- BPF_S_ANC_QUEUE,
- BPF_S_ANC_HATYPE,
- BPF_S_ANC_RXHASH,
- BPF_S_ANC_CPU,
- BPF_S_ANC_ALU_XOR_X,
- BPF_S_ANC_VLAN_TAG,
- BPF_S_ANC_VLAN_TAG_PRESENT,
- BPF_S_ANC_PAY_OFFSET,
- BPF_S_ANC_RANDOM,
-};
-
#endif /* __LINUX_FILTER_H__ */