35#ifndef _FREERTOS_DRIVERS_COMMON_CPU_PROFILE_HXX_
36#define _FREERTOS_DRIVERS_COMMON_CPU_PROFILE_HXX_
42#ifndef TRACE_BUFFER_LENGTH_WORDS
43#define TRACE_BUFFER_LENGTH_WORDS 3000
48 extern void *stacktrace[MAX_STRACE];
49 extern int strace_len;
50 void call_unwind(
void);
58 void *alloc(
unsigned size)
62 if (
endOffset + size > TRACE_BUFFER_LENGTH_WORDS)
73 unsigned buffer[TRACE_BUFFER_LENGTH_WORDS];
83} cpu_profile_allocator;
98struct trace *all_traces =
nullptr;
104unsigned hash_trace(
unsigned len,
unsigned *buf)
107 for (
unsigned i = 0; i <
len; ++i)
112 return ret & 0xFFFFFF;
120struct trace *find_current_trace(
unsigned hash)
122 for (
struct trace *t = all_traces; t; t = t->
next)
124 if (t->hash != (
hash & 0xFFFFFF))
126 if (t->len != strace_len)
128 unsigned *payload = (
unsigned *)(t + 1);
129 if (memcmp(payload, stacktrace, strace_len *
sizeof(stacktrace[0])) !=
141struct trace *add_new_trace(
unsigned hash)
144 sizeof(
struct trace) + strace_len *
sizeof(stacktrace[0]);
148 memcpy(t + 1, stacktrace, strace_len *
sizeof(stacktrace[0]));
152 t->
next = all_traces;
158void *stacktrace[MAX_STRACE];
173 unsigned demand_save_flags;
185void fill_phase2_vrs(
volatile unsigned *fault_args)
187 main_context.demand_save_flags = 0;
188 main_context.core.r[0] = fault_args[0];
189 main_context.core.r[1] = fault_args[1];
190 main_context.core.r[2] = fault_args[2];
191 main_context.core.r[3] = fault_args[3];
192 main_context.core.r[12] = fault_args[4];
198 main_context.core.r[14] = fault_args[6] + 2;
199 main_context.core.r[15] = fault_args[6];
200 saved_lr = fault_args[5];
201 main_context.core.r[13] = (unsigned)(fault_args + 8);
205 _Unwind_Reason_Code __gnu_Unwind_Backtrace(
213_Unwind_Reason_Code trace_func(
struct _Unwind_Context *context,
void *arg)
216 ip = (
void *)_Unwind_GetIP(context);
224 else if (last_ip == ip)
226 if (strace_len == 1 && saved_lr != _Unwind_GetGR(context, 14))
228 _Unwind_SetGR(context, 14, saved_lr);
230 return _URC_NO_REASON;
232 return _URC_END_OF_STACK;
234 if (strace_len >= MAX_STRACE - 1)
237 return _URC_END_OF_STACK;
241 ip = (
void *)_Unwind_GetRegionStart(context);
242 stacktrace[strace_len++] = ip;
243 return _URC_NO_REASON;
250 memset(stacktrace, 0,
sizeof(stacktrace));
254 __gnu_Unwind_Backtrace(&trace_func, 0, &first_context);
261 stacktrace[0] = (
void*)main_context.core.r[15];
266 main_context.core.r[14] = saved_lr;
267 main_context.core.r[15] = saved_lr;
269 __gnu_Unwind_Backtrace(&trace_func, 0, &main_context);
273 stacktrace[1] = (
void*)saved_lr;
276 unsigned h = hash_trace(strace_len, (
unsigned *)stacktrace);
277 struct trace *t = find_current_trace(h);
280 t = add_new_trace(h);
290bool enable_profiling = 0;
292volatile unsigned current_interrupt = 0;
300 old_value = current_interrupt;
301 current_interrupt = new_value;
306 current_interrupt = old_value;
318#define DEFINE_CPU_PROFILE_INTERRUPT_HANDLER(irq_handler_name, CLEAR_IRQ_FLAG) \
321 void __attribute__((__noinline__)) load_monitor_interrupt_handler( \
322 volatile unsigned *exception_args, unsigned exception_return_code) \
324 if (enable_profiling) \
326 fill_phase2_vrs(exception_args); \
329 cpuload_tick(exception_return_code & 4 \
331 : current_interrupt > 0 ? current_interrupt : 255); \
334 void __attribute__((__naked__)) irq_handler_name(void) \
336 __asm volatile("mov r0, %0 \n" \
337 "str r4, [r0, 4*4] \n" \
338 "str r5, [r0, 5*4] \n" \
339 "str r6, [r0, 6*4] \n" \
340 "str r7, [r0, 7*4] \n" \
341 "str r8, [r0, 8*4] \n" \
342 "str r9, [r0, 9*4] \n" \
343 "str r10, [r0, 10*4] \n" \
344 "str r11, [r0, 11*4] \n" \
345 "str r12, [r0, 12*4] \n" \
346 "str r13, [r0, 13*4] \n" \
347 "str r14, [r0, 14*4] \n" \
349 : "r"(main_context.core.r) \
351 __asm volatile(" tst lr, #4 \n" \
353 " mrseq r0, msp \n" \
354 " mrsne r0, psp \n" \
356 " ldr r2, =load_monitor_interrupt_handler \n" \
360 : "r0", "r1", "r2"); \
Helper class for keeping track of current interrupt number.
A custom unidirectional memory allocator so we can take traces from interrupts.
unsigned limitReached
how many times did we run into the MAX_STRACE limit.
unsigned buffer[TRACE_BUFFER_LENGTH_WORDS]
Storage of the trace buffer.
unsigned endOffset
index into buffer[] to denote first free element.
unsigned singleLenHack
How many times did we apply the backtrace hack to work around single-entry backtraces.
This struct definition mimics the internal structures of libgcc in arm-none-eabi binary.
This struct definition mimics the internal structures of libgcc in arm-none-eabi binary.
Linked list entry type for a call-stack backtrace.
unsigned hash
For quick comparison of traces.
unsigned total_size
total memory (bytes) allocated via this trace.
unsigned len
Number of entries in the trace.
struct trace * next
Link to the next trace entry.