tracing: fprobe: optimization for entry only case

For now, fgraph is used for the fprobe, even if we need trace the entry
only. However, the performance of ftrace is better than fgraph, and we
can use ftrace_ops for this case.

Then performance of kprobe-multi increases from 54M to 69M. Before this
commit:

  $ ./benchs/run_bench_trigger.sh kprobe-multi
  kprobe-multi   :   54.663 ± 0.493M/s

After this commit:

  $ ./benchs/run_bench_trigger.sh kprobe-multi
  kprobe-multi   :   69.447 ± 0.143M/s

Mitigation is disable during the bench testing above.

Link: https://lore.kernel.org/all/20251015083238.2374294-2-dongml2@chinatelecom.cn/

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
This commit is contained in:
Menglong Dong 2025-10-15 16:32:37 +08:00 committed by Masami Hiramatsu (Google)
parent e667152e00
commit 2c67dc457b
1 changed files with 119 additions and 9 deletions

View File

@ -254,8 +254,106 @@ static inline int __fprobe_kprobe_handler(unsigned long ip, unsigned long parent
return ret; return ret;
} }
static int fprobe_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops, #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
struct ftrace_regs *fregs) /* ftrace_ops callback, this processes fprobes which have only entry_handler. */
static void fprobe_ftrace_entry(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct fprobe_hlist_node *node;
struct rhlist_head *head, *pos;
struct fprobe *fp;
int bit;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
/*
* ftrace_test_recursion_trylock() disables preemption, but
* rhltable_lookup() checks whether rcu_read_lcok is held.
* So we take rcu_read_lock() here.
*/
rcu_read_lock();
head = rhltable_lookup(&fprobe_ip_table, &ip, fprobe_rht_params);
rhl_for_each_entry_rcu(node, pos, head, hlist) {
if (node->addr != ip)
break;
fp = READ_ONCE(node->fp);
if (unlikely(!fp || fprobe_disabled(fp) || fp->exit_handler))
continue;
if (fprobe_shared_with_kprobes(fp))
__fprobe_kprobe_handler(ip, parent_ip, fp, fregs, NULL);
else
__fprobe_handler(ip, parent_ip, fp, fregs, NULL);
}
rcu_read_unlock();
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(fprobe_ftrace_entry);
static struct ftrace_ops fprobe_ftrace_ops = {
.func = fprobe_ftrace_entry,
.flags = FTRACE_OPS_FL_SAVE_REGS,
};
static int fprobe_ftrace_active;
static int fprobe_ftrace_add_ips(unsigned long *addrs, int num)
{
int ret;
lockdep_assert_held(&fprobe_mutex);
ret = ftrace_set_filter_ips(&fprobe_ftrace_ops, addrs, num, 0, 0);
if (ret)
return ret;
if (!fprobe_ftrace_active) {
ret = register_ftrace_function(&fprobe_ftrace_ops);
if (ret) {
ftrace_free_filter(&fprobe_ftrace_ops);
return ret;
}
}
fprobe_ftrace_active++;
return 0;
}
static void fprobe_ftrace_remove_ips(unsigned long *addrs, int num)
{
lockdep_assert_held(&fprobe_mutex);
fprobe_ftrace_active--;
if (!fprobe_ftrace_active)
unregister_ftrace_function(&fprobe_ftrace_ops);
if (num)
ftrace_set_filter_ips(&fprobe_ftrace_ops, addrs, num, 1, 0);
}
static bool fprobe_is_ftrace(struct fprobe *fp)
{
return !fp->exit_handler;
}
#else
static int fprobe_ftrace_add_ips(unsigned long *addrs, int num)
{
return -ENOENT;
}
static void fprobe_ftrace_remove_ips(unsigned long *addrs, int num)
{
}
static bool fprobe_is_ftrace(struct fprobe *fp)
{
return false;
}
#endif
/* fgraph_ops callback, this processes fprobes which have exit_handler. */
static int fprobe_fgraph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{ {
unsigned long *fgraph_data = NULL; unsigned long *fgraph_data = NULL;
unsigned long func = trace->func; unsigned long func = trace->func;
@ -292,7 +390,7 @@ static int fprobe_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
if (node->addr != func) if (node->addr != func)
continue; continue;
fp = READ_ONCE(node->fp); fp = READ_ONCE(node->fp);
if (fp && !fprobe_disabled(fp)) if (fp && !fprobe_disabled(fp) && !fprobe_is_ftrace(fp))
fp->nmissed++; fp->nmissed++;
} }
return 0; return 0;
@ -312,7 +410,7 @@ static int fprobe_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
if (node->addr != func) if (node->addr != func)
continue; continue;
fp = READ_ONCE(node->fp); fp = READ_ONCE(node->fp);
if (!fp || fprobe_disabled(fp)) if (unlikely(!fp || fprobe_disabled(fp) || fprobe_is_ftrace(fp)))
continue; continue;
data_size = fp->entry_data_size; data_size = fp->entry_data_size;
@ -340,7 +438,7 @@ static int fprobe_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
/* If any exit_handler is set, data must be used. */ /* If any exit_handler is set, data must be used. */
return used != 0; return used != 0;
} }
NOKPROBE_SYMBOL(fprobe_entry); NOKPROBE_SYMBOL(fprobe_fgraph_entry);
static void fprobe_return(struct ftrace_graph_ret *trace, static void fprobe_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops, struct fgraph_ops *gops,
@ -379,7 +477,7 @@ static void fprobe_return(struct ftrace_graph_ret *trace,
NOKPROBE_SYMBOL(fprobe_return); NOKPROBE_SYMBOL(fprobe_return);
static struct fgraph_ops fprobe_graph_ops = { static struct fgraph_ops fprobe_graph_ops = {
.entryfunc = fprobe_entry, .entryfunc = fprobe_fgraph_entry,
.retfunc = fprobe_return, .retfunc = fprobe_return,
}; };
static int fprobe_graph_active; static int fprobe_graph_active;
@ -498,9 +596,14 @@ static int fprobe_module_callback(struct notifier_block *nb,
} while (node == ERR_PTR(-EAGAIN)); } while (node == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter); rhashtable_walk_exit(&iter);
if (alist.index > 0) if (alist.index > 0) {
ftrace_set_filter_ips(&fprobe_graph_ops.ops, ftrace_set_filter_ips(&fprobe_graph_ops.ops,
alist.addrs, alist.index, 1, 0); alist.addrs, alist.index, 1, 0);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_set_filter_ips(&fprobe_ftrace_ops,
alist.addrs, alist.index, 1, 0);
#endif
}
mutex_unlock(&fprobe_mutex); mutex_unlock(&fprobe_mutex);
kfree(alist.addrs); kfree(alist.addrs);
@ -733,7 +836,11 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
mutex_lock(&fprobe_mutex); mutex_lock(&fprobe_mutex);
hlist_array = fp->hlist_array; hlist_array = fp->hlist_array;
ret = fprobe_graph_add_ips(addrs, num); if (fprobe_is_ftrace(fp))
ret = fprobe_ftrace_add_ips(addrs, num);
else
ret = fprobe_graph_add_ips(addrs, num);
if (!ret) { if (!ret) {
add_fprobe_hash(fp); add_fprobe_hash(fp);
for (i = 0; i < hlist_array->size; i++) { for (i = 0; i < hlist_array->size; i++) {
@ -829,7 +936,10 @@ int unregister_fprobe(struct fprobe *fp)
} }
del_fprobe_hash(fp); del_fprobe_hash(fp);
fprobe_graph_remove_ips(addrs, count); if (fprobe_is_ftrace(fp))
fprobe_ftrace_remove_ips(addrs, count);
else
fprobe_graph_remove_ips(addrs, count);
kfree_rcu(hlist_array, rcu); kfree_rcu(hlist_array, rcu);
fp->hlist_array = NULL; fp->hlist_array = NULL;