Skip to content

Commit e7186af

Browse files
committed
tracing: Add back FORTIFY_SOURCE logic to kernel_stack event structure
For backward compatibility, older tooling expects to see the kernel_stack event with a "caller" field that is a fixed size array of 8 addresses. The code now supports more than 8 with an added "size" field that states the real number of entries. But the "caller" field still just looks like a fixed size to user space. Since the tracing macros that create the user space format files also creates the structures that those files represent, the kernel_stack event structure had its "caller" field a fixed size of 8, but in reality, when it is allocated on the ring buffer, it can hold more if the stack trace is bigger that 8 functions. The copying of these entries was simply done with a memcpy(): size = nr_entries * sizeof(unsigned long); memcpy(entry->caller, fstack->calls, size); The FORTIFY_SOURCE logic noticed at runtime that when the nr_entries was larger than 8, that the memcpy() was writing more than what the structure stated it can hold and it complained about it. This is because the FORTIFY_SOURCE code is unaware that the amount allocated is actually enough to hold the size. It does not expect that a fixed size field will hold more than the fixed size. This was originally solved by hiding the caller assignment with some pointer arithmetic. ptr = ring_buffer_data(); entry = ptr; ptr += offsetof(typeof(*entry), caller); memcpy(ptr, fstack->calls, size); But it is considered bad form to hide from kernel hardening. Instead, make it work nicely with FORTIFY_SOURCE by adding a new __stack_array() macro that is specific for this one special use case. The macro will take 4 arguments: type, item, len, field (whereas the __array() macro takes just the first three). This macro will act just like the __array() macro when creating the code to deal with the format file that is exposed to user space. But for the kernel, it will turn the caller field into: type item[] __counted_by(field); or for this instance: unsigned long caller[] __counted_by(size); Now the kernel code can expose the assignment of the caller to the FORTIFY_SOURCE and everyone is happy! Link: https://lore.kernel.org/linux-trace-kernel/[email protected]/ Link: https://lore.kernel.org/linux-trace-kernel/[email protected] Cc: Masami Hiramatsu <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Sven Schnelle <[email protected]> Suggested-by: Kees Cook <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]> Reviewed-by: Kees Cook <[email protected]>
1 parent 5d0c230 commit e7186af

File tree

4 files changed

+24
-22
lines changed

4 files changed

+24
-22
lines changed

kernel/trace/trace.c

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3119,7 +3119,6 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
31193119
struct ftrace_stack *fstack;
31203120
struct stack_entry *entry;
31213121
int stackidx;
3122-
void *ptr;
31233122

31243123
/*
31253124
* Add one, for this function and the call to save_stack_trace()
@@ -3157,32 +3156,16 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
31573156
nr_entries = stack_trace_save(fstack->calls, size, skip);
31583157
}
31593158

3160-
size = nr_entries * sizeof(unsigned long);
31613159
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3162-
(sizeof(*entry) - sizeof(entry->caller)) + size,
3160+
struct_size(entry, caller, nr_entries),
31633161
trace_ctx);
31643162
if (!event)
31653163
goto out;
3166-
ptr = ring_buffer_event_data(event);
3167-
entry = ptr;
3168-
3169-
/*
3170-
* For backward compatibility reasons, the entry->caller is an
3171-
* array of 8 slots to store the stack. This is also exported
3172-
* to user space. The amount allocated on the ring buffer actually
3173-
* holds enough for the stack specified by nr_entries. This will
3174-
* go into the location of entry->caller. Due to string fortifiers
3175-
* checking the size of the destination of memcpy() it triggers
3176-
* when it detects that size is greater than 8. To hide this from
3177-
* the fortifiers, we use "ptr" and pointer arithmetic to assign caller.
3178-
*
3179-
* The below is really just:
3180-
* memcpy(&entry->caller, fstack->calls, size);
3181-
*/
3182-
ptr += offsetof(typeof(*entry), caller);
3183-
memcpy(ptr, fstack->calls, size);
3164+
entry = ring_buffer_event_data(event);
31843165

31853166
entry->size = nr_entries;
3167+
memcpy(&entry->caller, fstack->calls,
3168+
flex_array_size(entry, caller, nr_entries));
31863169

31873170
if (!call_filter_check_discard(call, entry, buffer, event))
31883171
__buffer_unlock_commit(buffer, event);

kernel/trace/trace.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,16 @@ enum trace_type {
7777
#undef __array
7878
#define __array(type, item, size) type item[size];
7979

80+
/*
81+
* For backward compatibility, older user space expects to see the
82+
* kernel_stack event with a fixed size caller field. But today the fix
83+
* size is ignored by the kernel, and the real structure is dynamic.
84+
* Expose to user space: "unsigned long caller[8];" but the real structure
85+
* will be "unsigned long caller[] __counted_by(size)"
86+
*/
87+
#undef __stack_array
88+
#define __stack_array(type, item, size, field) type item[] __counted_by(field);
89+
8090
#undef __array_desc
8191
#define __array_desc(type, container, item, size)
8292

kernel/trace/trace_entries.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
190190

191191
F_STRUCT(
192192
__field( int, size )
193-
__array( unsigned long, caller, FTRACE_STACK_ENTRIES )
193+
__stack_array( unsigned long, caller, FTRACE_STACK_ENTRIES, size)
194194
),
195195

196196
F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"

kernel/trace/trace_export.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,9 @@ static int ftrace_event_register(struct trace_event_call *call,
5151
#undef __array
5252
#define __array(type, item, size) type item[size];
5353

54+
#undef __stack_array
55+
#define __stack_array(type, item, size, field) __array(type, item, size)
56+
5457
#undef __array_desc
5558
#define __array_desc(type, container, item, size) type item[size];
5659

@@ -114,6 +117,9 @@ static void __always_unused ____ftrace_check_##name(void) \
114117
is_signed_type(_type), .filter_type = FILTER_OTHER, \
115118
.len = _len },
116119

120+
#undef __stack_array
121+
#define __stack_array(_type, _item, _len, _field) __array(_type, _item, _len)
122+
117123
#undef __array_desc
118124
#define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
119125

@@ -149,6 +155,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
149155
#undef __array
150156
#define __array(type, item, len)
151157

158+
#undef __stack_array
159+
#define __stack_array(type, item, len, field)
160+
152161
#undef __array_desc
153162
#define __array_desc(type, container, item, len)
154163

0 commit comments

Comments
 (0)