[OPTIM] ebtree: ebmb_lookup: reduce stack usage by moving the return code out of the loop
(from ebtree 6.0.5) Last bugfix has introduced a de-optimization in the lookup function because it artificially extended the scope of some local variables, which resulted in higher stack usage and more numerous moves between stack and registers. We can reduce that by moving the return code out of the loop, because gcc notices that it never needs both "troot" and "node" at the same time and can use the same register for both. Doing so has reduced the code size by 39 bytes for the lookup function alone, and has sensibly reduced the instruction dependencies caused by data moves. (cherry picked from commit 59be3cdb96296b65a57aff30cc203269f9a94ebe) It should be backported to 1.4 if previous ebtree fix is backported.
This commit is contained in:
parent
414c4b2146
commit
ce3d44a06a
|
@ -49,7 +49,7 @@ __ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
|
|||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
return NULL;
|
||||
goto ret_null;
|
||||
|
||||
if (unlikely(len == 0))
|
||||
goto walk_down;
|
||||
|
@ -60,9 +60,9 @@ __ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
|
|||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct ebpt_node, node.branches);
|
||||
if (memcmp(node->key + pos, x, len) != 0)
|
||||
return NULL;
|
||||
goto ret_null;
|
||||
else
|
||||
return node;
|
||||
goto ret_node;
|
||||
}
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct ebpt_node, node.branches);
|
||||
|
@ -74,15 +74,9 @@ __ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
|
|||
* one and we don't have our key.
|
||||
*/
|
||||
if (memcmp(node->key + pos, x, len) != 0)
|
||||
return NULL;
|
||||
walk_left:
|
||||
troot = node->node.branches.b[EB_LEFT];
|
||||
walk_down:
|
||||
while (eb_gettag(troot) != EB_LEAF)
|
||||
troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
|
||||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct ebpt_node, node.branches);
|
||||
return node;
|
||||
goto ret_null;
|
||||
else
|
||||
goto walk_left;
|
||||
}
|
||||
|
||||
/* OK, normal data node, let's walk down. We check if all full
|
||||
|
@ -98,7 +92,7 @@ __ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
|
|||
*/
|
||||
while (1) {
|
||||
if (*(unsigned char*)(node->key + pos++) ^ *(unsigned char*)(x++))
|
||||
return NULL; /* more than one full byte is different */
|
||||
goto ret_null; /* more than one full byte is different */
|
||||
if (--len == 0)
|
||||
goto walk_left; /* return first node if all bytes matched */
|
||||
node_bit += 8;
|
||||
|
@ -114,10 +108,21 @@ __ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
|
|||
*/
|
||||
side = *(unsigned char *)x >> node_bit;
|
||||
if (((*(unsigned char*)(node->key + pos) >> node_bit) ^ side) > 1)
|
||||
return NULL;
|
||||
goto ret_null;
|
||||
side &= 1;
|
||||
troot = node->node.branches.b[side];
|
||||
}
|
||||
walk_left:
|
||||
troot = node->node.branches.b[EB_LEFT];
|
||||
walk_down:
|
||||
while (eb_gettag(troot) != EB_LEAF)
|
||||
troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
|
||||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct ebpt_node, node.branches);
|
||||
ret_node:
|
||||
return node;
|
||||
ret_null:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Insert ebpt_node <new> into subtree starting at node root <root>.
|
||||
|
|
|
@ -127,7 +127,7 @@ static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const v
|
|||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
return NULL;
|
||||
goto ret_null;
|
||||
|
||||
if (unlikely(len == 0))
|
||||
goto walk_down;
|
||||
|
@ -138,9 +138,9 @@ static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const v
|
|||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct ebmb_node, node.branches);
|
||||
if (memcmp(node->key + pos, x, len) != 0)
|
||||
return NULL;
|
||||
goto ret_null;
|
||||
else
|
||||
return node;
|
||||
goto ret_node;
|
||||
}
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct ebmb_node, node.branches);
|
||||
|
@ -152,15 +152,9 @@ static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const v
|
|||
* one and we don't have our key.
|
||||
*/
|
||||
if (memcmp(node->key + pos, x, len) != 0)
|
||||
return NULL;
|
||||
walk_left:
|
||||
troot = node->node.branches.b[EB_LEFT];
|
||||
walk_down:
|
||||
while (eb_gettag(troot) != EB_LEAF)
|
||||
troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
|
||||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct ebmb_node, node.branches);
|
||||
return node;
|
||||
goto ret_null;
|
||||
else
|
||||
goto walk_left;
|
||||
}
|
||||
|
||||
/* OK, normal data node, let's walk down. We check if all full
|
||||
|
@ -176,7 +170,7 @@ static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const v
|
|||
*/
|
||||
while (1) {
|
||||
if (node->key[pos++] ^ *(unsigned char*)(x++))
|
||||
return NULL; /* more than one full byte is different */
|
||||
goto ret_null; /* more than one full byte is different */
|
||||
if (--len == 0)
|
||||
goto walk_left; /* return first node if all bytes matched */
|
||||
node_bit += 8;
|
||||
|
@ -192,10 +186,21 @@ static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const v
|
|||
*/
|
||||
side = *(unsigned char *)x >> node_bit;
|
||||
if (((node->key[pos] >> node_bit) ^ side) > 1)
|
||||
return NULL;
|
||||
goto ret_null;
|
||||
side &= 1;
|
||||
troot = node->node.branches.b[side];
|
||||
}
|
||||
walk_left:
|
||||
troot = node->node.branches.b[EB_LEFT];
|
||||
walk_down:
|
||||
while (eb_gettag(troot) != EB_LEAF)
|
||||
troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
|
||||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct ebmb_node, node.branches);
|
||||
ret_node:
|
||||
return node;
|
||||
ret_null:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Insert ebmb_node <new> into subtree starting at node root <root>.
|
||||
|
|
Loading…
Reference in New Issue