1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
6 #include <linux/preempt.h>
7 #include <linux/spinlock.h>
8 #include <linux/debug_locks.h>
10 #include <linux/cpumask.h>
11 #include <linux/irq_work.h>
12 #include <linux/printk.h>
17 * printk() could not take logbuf_lock in NMI context. Instead,
18 * it uses an alternative implementation that temporary stores
19 * the strings into a per-CPU buffer. The content of the buffer
20 * is later flushed into the main ring buffer via IRQ work.
22 * The alternative implementation is chosen transparently
23 * by examinig current printk() context mask stored in @printk_context
26 * The implementation allows to flush the strings also from another CPU.
27 * There are situations when we want to make sure that all buffers
28 * were handled or when IRQs are blocked.
31 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
34 sizeof(struct irq_work))
36 struct printk_safe_seq_buf {
37 atomic_t len; /* length of written data */
38 atomic_t message_lost;
39 struct irq_work work; /* IRQ work that flushes the buffer */
40 unsigned char buffer[SAFE_LOG_BUF_LEN];
43 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
44 static DEFINE_PER_CPU(int, printk_context);
46 #ifdef CONFIG_PRINTK_NMI
47 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
50 /* Get flushed in a more safe context. */
51 static void queue_flush_work(struct printk_safe_seq_buf *s)
53 if (printk_percpu_data_ready())
54 irq_work_queue(&s->work);
58 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
59 * have dedicated buffers, because otherwise printk-safe preempted by
60 * NMI-printk would have overwritten the NMI messages.
62 * The messages are flushed from irq work (or from panic()), possibly,
63 * from other CPU, concurrently with printk_safe_log_store(). Should this
64 * happen, printk_safe_log_store() will notice the buffer->len mismatch
65 * and repeat the write.
67 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
68 const char *fmt, va_list args)
75 len = atomic_read(&s->len);
77 /* The trailing '\0' is not counted into len. */
78 if (len >= sizeof(s->buffer) - 1) {
79 atomic_inc(&s->message_lost);
85 * Make sure that all old data have been read before the buffer
86 * was reset. This is not needed when we just append data.
92 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
98 * Do it once again if the buffer has been flushed in the meantime.
99 * Note that atomic_cmpxchg() is an implicit memory barrier that
100 * makes sure that the data were written before updating s->len.
102 if (atomic_cmpxchg(&s->len, len, len + add) != len)
109 static inline void printk_safe_flush_line(const char *text, int len)
112 * Avoid any console drivers calls from here, because we may be
113 * in NMI or printk_safe context (when in panic). The messages
114 * must go only into the ring buffer at this stage. Consoles will
115 * get explicitly called later when a crashdump is not generated.
117 printk_deferred("%.*s", len, text);
120 /* printk part of the temporary buffer line by line */
121 static int printk_safe_flush_buffer(const char *start, size_t len)
130 /* Print line by line. */
133 printk_safe_flush_line(start, c - start + 1);
139 /* Handle continuous lines or missing new line. */
140 if ((c + 1 < end) && printk_get_level(c)) {
142 c = printk_skip_level(c);
146 printk_safe_flush_line(start, c - start);
156 /* Check if there was a partial line. Ignore pure header. */
157 if (start < end && !header) {
158 static const char newline[] = KERN_CONT "\n";
160 printk_safe_flush_line(start, end - start);
161 printk_safe_flush_line(newline, strlen(newline));
167 static void report_message_lost(struct printk_safe_seq_buf *s)
169 int lost = atomic_xchg(&s->message_lost, 0);
172 printk_deferred("Lost %d message(s)!\n", lost);
176 * Flush data from the associated per-CPU buffer. The function
177 * can be called either via IRQ work or independently.
179 static void __printk_safe_flush(struct irq_work *work)
181 static raw_spinlock_t read_lock =
182 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
183 struct printk_safe_seq_buf *s =
184 container_of(work, struct printk_safe_seq_buf, work);
190 * The lock has two functions. First, one reader has to flush all
191 * available message to make the lockless synchronization with
192 * writers easier. Second, we do not want to mix messages from
193 * different CPUs. This is especially important when printing
196 raw_spin_lock_irqsave(&read_lock, flags);
200 len = atomic_read(&s->len);
203 * This is just a paranoid check that nobody has manipulated
204 * the buffer an unexpected way. If we printed something then
205 * @len must only increase. Also it should never overflow the
208 if ((i && i >= len) || len > sizeof(s->buffer)) {
209 const char *msg = "printk_safe_flush: internal error\n";
211 printk_safe_flush_line(msg, strlen(msg));
216 goto out; /* Someone else has already flushed the buffer. */
218 /* Make sure that data has been written up to the @len */
220 i += printk_safe_flush_buffer(s->buffer + i, len - i);
223 * Check that nothing has got added in the meantime and truncate
224 * the buffer. Note that atomic_cmpxchg() is an implicit memory
225 * barrier that makes sure that the data were copied before
228 if (atomic_cmpxchg(&s->len, len, 0) != len)
232 report_message_lost(s);
233 raw_spin_unlock_irqrestore(&read_lock, flags);
237 * printk_safe_flush - flush all per-cpu nmi buffers.
239 * The buffers are flushed automatically via IRQ work. This function
240 * is useful only when someone wants to be sure that all buffers have
241 * been flushed at some point.
243 void printk_safe_flush(void)
247 for_each_possible_cpu(cpu) {
248 #ifdef CONFIG_PRINTK_NMI
249 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
251 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
256 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
259 * Similar to printk_safe_flush() but it can be called even in NMI context when
260 * the system goes down. It does the best effort to get NMI messages into
261 * the main ring buffer.
263 * Note that it could try harder when there is only one CPU online.
265 void printk_safe_flush_on_panic(void)
268 * Make sure that we could access the main ring buffer.
269 * Do not risk a double release when more CPUs are up.
271 if (raw_spin_is_locked(&logbuf_lock)) {
272 if (num_online_cpus() > 1)
276 raw_spin_lock_init(&logbuf_lock);
282 #ifdef CONFIG_PRINTK_NMI
284 * Safe printk() for NMI context. It uses a per-CPU buffer to
285 * store the message. NMIs are not nested, so there is always only
286 * one writer running. But the buffer might get flushed from another
287 * CPU, so we need to be careful.
289 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
291 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
293 return printk_safe_log_store(s, fmt, args);
296 void notrace printk_nmi_enter(void)
298 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
301 void notrace printk_nmi_exit(void)
303 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
307 * Marks a code that might produce many messages in NMI context
308 * and the risk of losing them is more critical than eventual
311 * It has effect only when called in NMI context. Then printk()
312 * will try to store the messages into the main logbuf directly
313 * and use the per-CPU buffers only as a fallback when the lock
316 void printk_nmi_direct_enter(void)
318 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
319 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
322 void printk_nmi_direct_exit(void)
324 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
329 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
334 #endif /* CONFIG_PRINTK_NMI */
337 * Lock-less printk(), to avoid deadlocks should the printk() recurse
338 * into itself. It uses a per-CPU buffer to store the message, just like
341 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345 return printk_safe_log_store(s, fmt, args);
348 /* Can be preempted by NMI. */
349 void __printk_safe_enter(void)
351 this_cpu_inc(printk_context);
354 /* Can be preempted by NMI. */
355 void __printk_safe_exit(void)
357 this_cpu_dec(printk_context);
360 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
363 * Try to use the main logbuf even in NMI. But avoid calling console
364 * drivers that might have their own locks.
366 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
367 raw_spin_trylock(&logbuf_lock)) {
370 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
371 raw_spin_unlock(&logbuf_lock);
372 defer_console_output();
376 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
377 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
378 return vprintk_nmi(fmt, args);
380 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
381 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
382 return vprintk_safe(fmt, args);
385 return vprintk_default(fmt, args);
388 void __init printk_safe_init(void)
392 for_each_possible_cpu(cpu) {
393 struct printk_safe_seq_buf *s;
395 s = &per_cpu(safe_print_seq, cpu);
396 init_irq_work(&s->work, __printk_safe_flush);
398 #ifdef CONFIG_PRINTK_NMI
399 s = &per_cpu(nmi_print_seq, cpu);
400 init_irq_work(&s->work, __printk_safe_flush);
404 /* Flush pending messages that did not have scheduled IRQ works. */