Skip to content

Commit e701001

Browse files
committed
netfilter: nft_rbtree: allow adjacent intervals with dynamic updates
This patch fixes dynamic element updates for adjacent intervals in the rb-tree representation. Since elements are sorted in the rb-tree, in case of adjacent nodes with the same key, the assumption is that an interval end node must be placed before an interval opening. In tree lookup operations, the idea is to search for the closer element that is smaller than the one we're searching for. Given that we'll have two possible matchings, we have to take the opening interval in case of adjacent nodes. Range merges are not trivial with the current representation, specifically we have to check if node extensions are equal and make sure we keep the existing internal states around. Signed-off-by: Pablo Neira Ayuso <[email protected]>
1 parent ef1d20e commit e701001

File tree

1 file changed

+35
-5
lines changed

1 file changed

+35
-5
lines changed

net/netfilter/nft_rbtree.c

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,23 +35,37 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
3535
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
3636
}
3737

38+
static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
39+
const struct nft_rbtree_elem *interval)
40+
{
41+
return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
42+
}
43+
3844
static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
3945
const struct nft_set_ext **ext)
4046
{
4147
const struct nft_rbtree *priv = nft_set_priv(set);
4248
const struct nft_rbtree_elem *rbe, *interval = NULL;
4349
const struct rb_node *parent;
4450
u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
51+
const void *this;
4552
int d;
4653

4754
spin_lock_bh(&nft_rbtree_lock);
4855
parent = priv->root.rb_node;
4956
while (parent != NULL) {
5057
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
5158

52-
d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
59+
this = nft_set_ext_key(&rbe->ext);
60+
d = memcmp(this, key, set->klen);
5361
if (d < 0) {
5462
parent = parent->rb_left;
63+
/* In case of adjacent ranges, we always see the high
64+
* part of the range in first place, before the low one.
65+
* So don't update interval if the keys are equal.
66+
*/
67+
if (interval && nft_rbtree_equal(set, this, interval))
68+
continue;
5569
interval = rbe;
5670
} else if (d > 0)
5771
parent = parent->rb_right;
@@ -101,9 +115,16 @@ static int __nft_rbtree_insert(const struct nft_set *set,
101115
else if (d > 0)
102116
p = &parent->rb_right;
103117
else {
104-
if (nft_set_elem_active(&rbe->ext, genmask))
105-
return -EEXIST;
106-
p = &parent->rb_left;
118+
if (nft_set_elem_active(&rbe->ext, genmask)) {
119+
if (nft_rbtree_interval_end(rbe) &&
120+
!nft_rbtree_interval_end(new))
121+
p = &parent->rb_left;
122+
else if (!nft_rbtree_interval_end(rbe) &&
123+
nft_rbtree_interval_end(new))
124+
p = &parent->rb_right;
125+
else
126+
return -EEXIST;
127+
}
107128
}
108129
}
109130
rb_link_node(&new->node, parent, p);
@@ -148,7 +169,7 @@ static void *nft_rbtree_deactivate(const struct nft_set *set,
148169
{
149170
const struct nft_rbtree *priv = nft_set_priv(set);
150171
const struct rb_node *parent = priv->root.rb_node;
151-
struct nft_rbtree_elem *rbe;
172+
struct nft_rbtree_elem *rbe, *this = elem->priv;
152173
u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
153174
int d;
154175

@@ -166,6 +187,15 @@ static void *nft_rbtree_deactivate(const struct nft_set *set,
166187
parent = parent->rb_left;
167188
continue;
168189
}
190+
if (nft_rbtree_interval_end(rbe) &&
191+
!nft_rbtree_interval_end(this)) {
192+
parent = parent->rb_left;
193+
continue;
194+
} else if (!nft_rbtree_interval_end(rbe) &&
195+
nft_rbtree_interval_end(this)) {
196+
parent = parent->rb_right;
197+
continue;
198+
}
169199
nft_set_elem_change_active(set, &rbe->ext);
170200
return rbe;
171201
}

0 commit comments

Comments
 (0)