mirror of https://github.com/torvalds/linux.git
printk changes for 6.19
-----BEGIN PGP SIGNATURE-----
iQJPBAABCAA5FiEESH4wyp42V4tXvYsjUqAMR0iAlPIFAmktlbUbFIAAAAAABAAO
bWFudTIsMi41KzEuMTEsMiwyAAoJEFKgDEdIgJTyevsP/1z98/wfCaSCquIq4H8S
OTqFGybGgYQt1NmMj2cGPpbAE3LJNYORT0A4tcoqOTy1Z5xbQz63rO3clSI/e7Mf
n4ZZ7NvkE40i8et1BjqtZa9dSkAv4QLYH73KrtNeuTr5tqvHo1x8FakUH6gQnb1k
QOOebvbVXnOb+rh89j1GZShrLFcCil0psjp165WHAYE/3PyFBgYGLMCgwLqS+W3H
re5Q4sl/ySXpMFF/XN1Kww48FWxy/h+YQFCxZwuWlUcXtVjqZ+BN+keb7AqaFQ7R
dC2exV2W0RBoupEJR/FWHoXrm/bDDLhzqRaMvoggLJrMJ9L6V0WdIhaFA4qzoG63
paJGFjUfmDX3dpPsAddq7kKeevCz4a2/HwFKhiBqqq4tdHuely7wZgnoFO7ovgmu
DYDCXHtpJuWZR3WJ5I/V/sJ9i9KFXhhyWcKVf13QTAFiCaA09aeSAcUWNYNaaxbn
nu6IkUxdIVnWIEBgcYH6jz1DrPGreYLYuD4bVb2gdZoP0r3tnMpG6xfSNIUueSGd
VFAKW9PJYaj7Id+jgACH6V+gQ22L600xJDdL1bPjRbGE0LD7vlz2F1MZTq3BFJFn
hUxJeOZplHX+TPophdvH4MO9VLmydWLUyJiDBP1yA8M9XZms/5s7IJJ1RYXqUCcf
qEB4L7W1+Qy1R/lzf2PU9X4R
=FnfO
-----END PGP SIGNATURE-----
Merge tag 'printk-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux
Pull printk updates from Petr Mladek:
- Allow creaing nbcon console drivers with an unsafe write_atomic()
callback that can only be called by the final nbcon_atomic_flush_unsafe().
Otherwise, the driver would rely on the kthread.
It is going to be used as the-best-effort approach for an
experimental nbcon netconsole driver, see
https://lore.kernel.org/r/20251121-nbcon-v1-2-503d17b2b4af@debian.org
Note that a safe .write_atomic() callback is supposed to work in NMI
context. But some networking drivers are not safe even in IRQ
context:
https://lore.kernel.org/r/oc46gdpmmlly5o44obvmoatfqo5bhpgv7pabpvb6sjuqioymcg@gjsma3ghoz35
In an ideal world, all networking drivers would be fixed first and
the atomic flush would be blocked only in NMI context. But it brings
the question how reliable networking drivers are when the system is
in a bad state. They might block flushing more reliable serial
consoles which are more suitable for serious debugging anyway.
- Allow to use the last 4 bytes of the printk ring buffer.
- Prevent queuing IRQ work and block printk kthreads when consoles are
suspended. Otherwise, they create non-necessary churn or even block
the suspend.
- Release console_lock() between each record in the kthread used for
legacy consoles on RT. It might significantly speed up the boot.
- Release nbcon context between each record in the atomic flush. It
prevents stalls of the related printk kthread after it has lost the
ownership in the middle of a record
- Add support for NBCON consoles into KDB
- Add %ptsP modifier for printing struct timespec64 and use it where
possible
- Misc code clean up
* tag 'printk-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux: (48 commits)
printk: Use console_is_usable on console_unblank
arch: um: kmsg_dump: Use console_is_usable
drivers: serial: kgdboc: Drop checks for CON_ENABLED and CON_BOOT
lib/vsprintf: Unify FORMAT_STATE_NUM handlers
printk: Avoid irq_work for printk_deferred() on suspend
printk: Avoid scheduling irq_work on suspend
printk: Allow printk_trigger_flush() to flush all types
tracing: Switch to use %ptSp
scsi: snic: Switch to use %ptSp
scsi: fnic: Switch to use %ptSp
s390/dasd: Switch to use %ptSp
ptp: ocp: Switch to use %ptSp
pps: Switch to use %ptSp
PCI: epf-test: Switch to use %ptSp
net: dsa: sja1105: Switch to use %ptSp
mmc: mmc_test: Switch to use %ptSp
media: av7110: Switch to use %ptSp
ipmi: Switch to use %ptSp
igb: Switch to use %ptSp
e1000e: Switch to use %ptSp
...
This commit is contained in:
commit
4d38b88fd1
|
|
@ -547,11 +547,13 @@ Time and date
|
||||||
%pt[RT]s YYYY-mm-dd HH:MM:SS
|
%pt[RT]s YYYY-mm-dd HH:MM:SS
|
||||||
%pt[RT]d YYYY-mm-dd
|
%pt[RT]d YYYY-mm-dd
|
||||||
%pt[RT]t HH:MM:SS
|
%pt[RT]t HH:MM:SS
|
||||||
%pt[RT][dt][r][s]
|
%ptSp <seconds>.<nanoseconds>
|
||||||
|
%pt[RST][dt][r][s]
|
||||||
|
|
||||||
For printing date and time as represented by::
|
For printing date and time as represented by::
|
||||||
|
|
||||||
R struct rtc_time structure
|
R content of struct rtc_time
|
||||||
|
S content of struct timespec64
|
||||||
T time64_t type
|
T time64_t type
|
||||||
|
|
||||||
in human readable format.
|
in human readable format.
|
||||||
|
|
@ -563,6 +565,11 @@ The %pt[RT]s (space) will override ISO 8601 separator by using ' ' (space)
|
||||||
instead of 'T' (Capital T) between date and time. It won't have any effect
|
instead of 'T' (Capital T) between date and time. It won't have any effect
|
||||||
when date or time is omitted.
|
when date or time is omitted.
|
||||||
|
|
||||||
|
The %ptSp is equivalent to %lld.%09ld for the content of the struct timespec64.
|
||||||
|
When the other specifiers are given, it becomes the respective equivalent of
|
||||||
|
%ptT[dt][r][s].%09ld. In other words, the seconds are being printed in
|
||||||
|
the human readable format followed by a dot and nanoseconds.
|
||||||
|
|
||||||
Passed by reference.
|
Passed by reference.
|
||||||
|
|
||||||
struct clk
|
struct clk
|
||||||
|
|
|
||||||
|
|
@ -20608,6 +20608,7 @@ R: John Ogness <john.ogness@linutronix.de>
|
||||||
R: Sergey Senozhatsky <senozhatsky@chromium.org>
|
R: Sergey Senozhatsky <senozhatsky@chromium.org>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git
|
||||||
|
F: Documentation/core-api/printk-basics.rst
|
||||||
F: include/linux/printk.h
|
F: include/linux/printk.h
|
||||||
F: kernel/printk/
|
F: kernel/printk/
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
|
||||||
* expected to output the crash information.
|
* expected to output the crash information.
|
||||||
*/
|
*/
|
||||||
if (strcmp(con->name, "ttynull") != 0 &&
|
if (strcmp(con->name, "ttynull") != 0 &&
|
||||||
(console_srcu_read_flags(con) & CON_ENABLED)) {
|
console_is_usable(con, console_srcu_read_flags(con), true)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -275,8 +275,7 @@ void debug_timestamp(struct smi_info *smi_info, char *msg)
|
||||||
struct timespec64 t;
|
struct timespec64 t;
|
||||||
|
|
||||||
ktime_get_ts64(&t);
|
ktime_get_ts64(&t);
|
||||||
dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
|
dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t);
|
||||||
msg, t.tv_sec, t.tv_nsec);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define debug_timestamp(smi_info, x)
|
#define debug_timestamp(smi_info, x)
|
||||||
|
|
|
||||||
|
|
@ -1083,10 +1083,8 @@ static int sender(void *send_info, struct ipmi_smi_msg *msg)
|
||||||
struct timespec64 t;
|
struct timespec64 t;
|
||||||
|
|
||||||
ktime_get_real_ts64(&t);
|
ktime_get_real_ts64(&t);
|
||||||
dev_dbg(&ssif_info->client->dev,
|
dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n",
|
||||||
"**Enqueue %02x %02x: %lld.%6.6ld\n",
|
msg->data[0], msg->data[1], &t);
|
||||||
msg->data[0], msg->data[1],
|
|
||||||
(long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
|
|
||||||
}
|
}
|
||||||
return IPMI_CC_NO_ERROR;
|
return IPMI_CC_NO_ERROR;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,7 @@ static void sync_print_fence(struct seq_file *s,
|
||||||
struct timespec64 ts64 =
|
struct timespec64 ts64 =
|
||||||
ktime_to_timespec64(fence->timestamp);
|
ktime_to_timespec64(fence->timestamp);
|
||||||
|
|
||||||
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
|
seq_printf(s, "@%ptSp", &ts64);
|
||||||
}
|
}
|
||||||
|
|
||||||
seq_printf(s, ": %lld", fence->seqno);
|
seq_printf(s, ": %lld", fence->seqno);
|
||||||
|
|
|
||||||
|
|
@ -217,8 +217,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
|
||||||
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
|
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
|
||||||
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
||||||
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
||||||
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
|
drm_printf(&p, "time: %ptSp\n", &coredump->reset_time);
|
||||||
coredump->reset_time.tv_nsec);
|
|
||||||
|
|
||||||
if (coredump->reset_task_info.task.pid)
|
if (coredump->reset_task_info.task.pid)
|
||||||
drm_printf(&p, "process_name: %s PID: %d\n",
|
drm_printf(&p, "process_name: %s PID: %d\n",
|
||||||
|
|
|
||||||
|
|
@ -794,10 +794,8 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
|
||||||
ts_vblank_time = ktime_to_timespec64(*vblank_time);
|
ts_vblank_time = ktime_to_timespec64(*vblank_time);
|
||||||
|
|
||||||
drm_dbg_vbl(dev,
|
drm_dbg_vbl(dev,
|
||||||
"crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
|
"crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n",
|
||||||
pipe, hpos, vpos,
|
pipe, hpos, vpos, &ts_etime, &ts_vblank_time,
|
||||||
(u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
|
|
||||||
(u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
|
|
||||||
duration_ns / 1000, i);
|
duration_ns / 1000, i);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
|
|
@ -82,8 +82,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
|
||||||
drm_printf(p, "kernel: " UTS_RELEASE "\n");
|
drm_printf(p, "kernel: " UTS_RELEASE "\n");
|
||||||
drm_printf(p, "module: " KBUILD_MODNAME "\n");
|
drm_printf(p, "module: " KBUILD_MODNAME "\n");
|
||||||
drm_printf(p, "dpu devcoredump\n");
|
drm_printf(p, "dpu devcoredump\n");
|
||||||
drm_printf(p, "time: %lld.%09ld\n",
|
drm_printf(p, "time: %ptSp\n", &state->time);
|
||||||
state->time.tv_sec, state->time.tv_nsec);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
|
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
|
||||||
drm_printf(p, "====================%s================\n", block->name);
|
drm_printf(p, "====================%s================\n", block->name);
|
||||||
|
|
|
||||||
|
|
@ -197,8 +197,7 @@ static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
|
||||||
drm_printf(&p, "---\n");
|
drm_printf(&p, "---\n");
|
||||||
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
||||||
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
||||||
drm_printf(&p, "time: %lld.%09ld\n",
|
drm_printf(&p, "time: %ptSp\n", &state->time);
|
||||||
state->time.tv_sec, state->time.tv_nsec);
|
|
||||||
if (state->comm)
|
if (state->comm)
|
||||||
drm_printf(&p, "comm: %s\n", state->comm);
|
drm_printf(&p, "comm: %s\n", state->comm);
|
||||||
if (state->cmd)
|
if (state->cmd)
|
||||||
|
|
|
||||||
|
|
@ -106,9 +106,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
|
||||||
drm_puts(&p, "module: " KBUILD_MODNAME "\n");
|
drm_puts(&p, "module: " KBUILD_MODNAME "\n");
|
||||||
|
|
||||||
ts = ktime_to_timespec64(ss->snapshot_time);
|
ts = ktime_to_timespec64(ss->snapshot_time);
|
||||||
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
|
drm_printf(&p, "Snapshot time: %ptSp\n", &ts);
|
||||||
ts = ktime_to_timespec64(ss->boot_time);
|
ts = ktime_to_timespec64(ss->boot_time);
|
||||||
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
|
drm_printf(&p, "Uptime: %ptSp\n", &ts);
|
||||||
drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
|
drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
|
||||||
xe_device_snapshot_print(xe, &p);
|
xe_device_snapshot_print(xe, &p);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -586,14 +586,11 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
|
||||||
rate = mmc_test_rate(tot, &ts);
|
rate = mmc_test_rate(tot, &ts);
|
||||||
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
|
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
|
||||||
|
|
||||||
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
|
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took %ptSp seconds (%u kB/s, %u KiB/s, %u.%02u IOPS, sg_len %d)\n",
|
||||||
"%llu.%09u seconds (%u kB/s, %u KiB/s, "
|
mmc_hostname(test->card->host), count, sectors, count,
|
||||||
"%u.%02u IOPS, sg_len %d)\n",
|
sectors >> 1, (sectors & 1 ? ".5" : ""), &ts,
|
||||||
mmc_hostname(test->card->host), count, sectors, count,
|
rate / 1000, rate / 1024, iops / 100, iops % 100,
|
||||||
sectors >> 1, (sectors & 1 ? ".5" : ""),
|
test->area.sg_len);
|
||||||
(u64)ts.tv_sec, (u32)ts.tv_nsec,
|
|
||||||
rate / 1000, rate / 1024, iops / 100, iops % 100,
|
|
||||||
test->area.sg_len);
|
|
||||||
|
|
||||||
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
|
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
|
||||||
}
|
}
|
||||||
|
|
@ -3074,10 +3071,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
|
||||||
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
|
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
|
||||||
|
|
||||||
list_for_each_entry(tr, &gr->tr_lst, link) {
|
list_for_each_entry(tr, &gr->tr_lst, link) {
|
||||||
seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
|
seq_printf(sf, "%u %d %ptSp %u %u.%02u\n",
|
||||||
tr->count, tr->sectors,
|
tr->count, tr->sectors, &tr->ts, tr->rate,
|
||||||
(u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
|
tr->iops / 100, tr->iops % 100);
|
||||||
tr->rate, tr->iops / 100, tr->iops % 100);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -775,9 +775,8 @@ static void sja1105_tas_state_machine(struct work_struct *work)
|
||||||
base_time_ts = ns_to_timespec64(base_time);
|
base_time_ts = ns_to_timespec64(base_time);
|
||||||
now_ts = ns_to_timespec64(now);
|
now_ts = ns_to_timespec64(now);
|
||||||
|
|
||||||
dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
|
dev_dbg(ds->dev, "OPER base time %ptSp (now %ptSp)\n",
|
||||||
base_time_ts.tv_sec, base_time_ts.tv_nsec,
|
&base_time_ts, &now_ts);
|
||||||
now_ts.tv_sec, now_ts.tv_nsec);
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
@ -798,8 +797,7 @@ static void sja1105_tas_state_machine(struct work_struct *work)
|
||||||
if (now < tas_data->oper_base_time) {
|
if (now < tas_data->oper_base_time) {
|
||||||
/* TAS has not started yet */
|
/* TAS has not started yet */
|
||||||
diff = ns_to_timespec64(tas_data->oper_base_time - now);
|
diff = ns_to_timespec64(tas_data->oper_base_time - now);
|
||||||
dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
|
dev_dbg(ds->dev, "time to start: [%ptSp]", &diff);
|
||||||
diff.tv_sec, diff.tv_nsec);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -229,14 +229,11 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
|
||||||
systim_overflow_work.work);
|
systim_overflow_work.work);
|
||||||
struct e1000_hw *hw = &adapter->hw;
|
struct e1000_hw *hw = &adapter->hw;
|
||||||
struct timespec64 ts;
|
struct timespec64 ts;
|
||||||
u64 ns;
|
|
||||||
|
|
||||||
/* Update the timecounter */
|
/* Update the timecounter */
|
||||||
ns = timecounter_read(&adapter->tc);
|
ts = ns_to_timespec64(timecounter_read(&adapter->tc));
|
||||||
|
|
||||||
ts = ns_to_timespec64(ns);
|
e_dbg("SYSTIM overflow check at %ptSp\n", &ts);
|
||||||
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
|
|
||||||
(long long) ts.tv_sec, ts.tv_nsec);
|
|
||||||
|
|
||||||
schedule_delayed_work(&adapter->systim_overflow_work,
|
schedule_delayed_work(&adapter->systim_overflow_work,
|
||||||
E1000_SYSTIM_OVERFLOW_PERIOD);
|
E1000_SYSTIM_OVERFLOW_PERIOD);
|
||||||
|
|
|
||||||
|
|
@ -840,14 +840,11 @@ static void igb_ptp_overflow_check(struct work_struct *work)
|
||||||
struct igb_adapter *igb =
|
struct igb_adapter *igb =
|
||||||
container_of(work, struct igb_adapter, ptp_overflow_work.work);
|
container_of(work, struct igb_adapter, ptp_overflow_work.work);
|
||||||
struct timespec64 ts;
|
struct timespec64 ts;
|
||||||
u64 ns;
|
|
||||||
|
|
||||||
/* Update the timecounter */
|
/* Update the timecounter */
|
||||||
ns = timecounter_read(&igb->tc);
|
ts = ns_to_timespec64(timecounter_read(&igb->tc));
|
||||||
|
|
||||||
ts = ns_to_timespec64(ns);
|
pr_debug("igb overflow check at %ptSp\n", &ts);
|
||||||
pr_debug("igb overflow check at %lld.%09lu\n",
|
|
||||||
(long long) ts.tv_sec, ts.tv_nsec);
|
|
||||||
|
|
||||||
schedule_delayed_work(&igb->ptp_overflow_work,
|
schedule_delayed_work(&igb->ptp_overflow_work,
|
||||||
IGB_SYSTIM_OVERFLOW_PERIOD);
|
IGB_SYSTIM_OVERFLOW_PERIOD);
|
||||||
|
|
|
||||||
|
|
@ -331,9 +331,8 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
|
||||||
rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
|
rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
|
||||||
|
|
||||||
dev_info(&epf_test->epf->dev,
|
dev_info(&epf_test->epf->dev,
|
||||||
"%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
|
"%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n",
|
||||||
op, size, dma ? "YES" : "NO",
|
op, size, dma ? "YES" : "NO", &ts, rate);
|
||||||
(u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
|
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
|
||||||
|
|
|
||||||
|
|
@ -80,8 +80,7 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
|
||||||
/* check if we are late */
|
/* check if we are late */
|
||||||
if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
|
if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
pr_err("we are late this time %lld.%09ld\n",
|
pr_err("we are late this time %ptSp\n", &ts1);
|
||||||
(s64)ts1.tv_sec, ts1.tv_nsec);
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -163,8 +163,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
|
||||||
/* check event type */
|
/* check event type */
|
||||||
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
|
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
|
||||||
|
|
||||||
dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n",
|
dev_dbg(&pps->dev, "PPS event at %ptSp\n", &ts->ts_real);
|
||||||
(s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
|
|
||||||
|
|
||||||
timespec_to_pps_ktime(&ts_real, ts->ts_real);
|
timespec_to_pps_ktime(&ts_real, ts->ts_real);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4287,11 +4287,9 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
|
||||||
ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC;
|
ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC;
|
||||||
sys_ts = ns_to_timespec64(ns);
|
sys_ts = ns_to_timespec64(ns);
|
||||||
|
|
||||||
seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC",
|
seq_printf(s, "%7s: %ptSp == %ptS TAI\n", "PHC", &ts, &ts);
|
||||||
ts.tv_sec, ts.tv_nsec, &ts);
|
seq_printf(s, "%7s: %ptSp == %ptS UTC offset %d\n", "SYS",
|
||||||
seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS",
|
&sys_ts, &sys_ts, bp->utc_tai_offset);
|
||||||
sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts,
|
|
||||||
bp->utc_tai_offset);
|
|
||||||
seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "",
|
seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "",
|
||||||
timespec64_to_ns(&ts) - ns,
|
timespec64_to_ns(&ts) - ns,
|
||||||
post_ns - pre_ns);
|
post_ns - pre_ns);
|
||||||
|
|
@ -4499,9 +4497,8 @@ ptp_ocp_phc_info(struct ptp_ocp *bp)
|
||||||
ptp_clock_index(bp->ptp));
|
ptp_clock_index(bp->ptp));
|
||||||
|
|
||||||
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
|
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
|
||||||
dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
|
dev_info(&bp->pdev->dev, "Time: %ptSp, %s\n",
|
||||||
ts.tv_sec, ts.tv_nsec,
|
&ts, bp->sync ? "in-sync" : "UNSYNCED");
|
||||||
bp->sync ? "in-sync" : "UNSYNCED");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
||||||
|
|
@ -974,8 +974,7 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array)
|
||||||
static void dasd_stats_seq_print(struct seq_file *m,
|
static void dasd_stats_seq_print(struct seq_file *m,
|
||||||
struct dasd_profile_info *data)
|
struct dasd_profile_info *data)
|
||||||
{
|
{
|
||||||
seq_printf(m, "start_time %lld.%09ld\n",
|
seq_printf(m, "start_time %ptSp\n", &data->starttod);
|
||||||
(s64)data->starttod.tv_sec, data->starttod.tv_nsec);
|
|
||||||
seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
|
seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
|
||||||
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
|
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
|
||||||
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
|
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
|
||||||
|
|
|
||||||
|
|
@ -138,9 +138,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||||
*/
|
*/
|
||||||
len += scnprintf(fnic_dbgfs_prt->buffer + len,
|
len += scnprintf(fnic_dbgfs_prt->buffer + len,
|
||||||
(trace_max_pages * PAGE_SIZE * 3) - len,
|
(trace_max_pages * PAGE_SIZE * 3) - len,
|
||||||
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
|
"%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n",
|
||||||
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
|
&val, str, tbp->host_no, tbp->tag,
|
||||||
val.tv_nsec, str, tbp->host_no, tbp->tag,
|
|
||||||
tbp->data[0], tbp->data[1], tbp->data[2],
|
tbp->data[0], tbp->data[1], tbp->data[2],
|
||||||
tbp->data[3], tbp->data[4]);
|
tbp->data[3], tbp->data[4]);
|
||||||
rd_idx++;
|
rd_idx++;
|
||||||
|
|
@ -180,9 +179,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||||
*/
|
*/
|
||||||
len += scnprintf(fnic_dbgfs_prt->buffer + len,
|
len += scnprintf(fnic_dbgfs_prt->buffer + len,
|
||||||
(trace_max_pages * PAGE_SIZE * 3) - len,
|
(trace_max_pages * PAGE_SIZE * 3) - len,
|
||||||
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
|
"%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n",
|
||||||
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
|
&val, str, tbp->host_no, tbp->tag,
|
||||||
val.tv_nsec, str, tbp->host_no, tbp->tag,
|
|
||||||
tbp->data[0], tbp->data[1], tbp->data[2],
|
tbp->data[0], tbp->data[1], tbp->data[2],
|
||||||
tbp->data[3], tbp->data[4]);
|
tbp->data[3], tbp->data[4]);
|
||||||
rd_idx++;
|
rd_idx++;
|
||||||
|
|
@ -215,32 +213,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||||
{
|
{
|
||||||
int len = 0;
|
int len = 0;
|
||||||
int buf_size = debug->buf_size;
|
int buf_size = debug->buf_size;
|
||||||
struct timespec64 val1, val2;
|
struct timespec64 val, val1, val2;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
ktime_get_real_ts64(&val1);
|
ktime_get_real_ts64(&val);
|
||||||
len = scnprintf(debug->debug_buffer + len, buf_size - len,
|
len = scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||||
"------------------------------------------\n"
|
"------------------------------------------\n"
|
||||||
"\t\tTime\n"
|
"\t\tTime\n"
|
||||||
"------------------------------------------\n");
|
"------------------------------------------\n");
|
||||||
|
|
||||||
|
val1 = timespec64_sub(val, stats->stats_timestamps.last_reset_time);
|
||||||
|
val2 = timespec64_sub(val, stats->stats_timestamps.last_read_time);
|
||||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||||
"Current time : [%lld:%ld]\n"
|
"Current time : [%ptSp]\n"
|
||||||
"Last stats reset time: [%lld:%09ld]\n"
|
"Last stats reset time: [%ptSp]\n"
|
||||||
"Last stats read time: [%lld:%ld]\n"
|
"Last stats read time: [%ptSp]\n"
|
||||||
"delta since last reset: [%lld:%ld]\n"
|
"delta since last reset: [%ptSp]\n"
|
||||||
"delta since last read: [%lld:%ld]\n",
|
"delta since last read: [%ptSp]\n",
|
||||||
(s64)val1.tv_sec, val1.tv_nsec,
|
&val,
|
||||||
(s64)stats->stats_timestamps.last_reset_time.tv_sec,
|
&stats->stats_timestamps.last_reset_time,
|
||||||
stats->stats_timestamps.last_reset_time.tv_nsec,
|
&stats->stats_timestamps.last_read_time,
|
||||||
(s64)stats->stats_timestamps.last_read_time.tv_sec,
|
&val1, &val2);
|
||||||
stats->stats_timestamps.last_read_time.tv_nsec,
|
|
||||||
(s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
|
|
||||||
timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
|
|
||||||
(s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
|
|
||||||
timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
|
|
||||||
|
|
||||||
stats->stats_timestamps.last_read_time = val1;
|
stats->stats_timestamps.last_read_time = val;
|
||||||
|
|
||||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||||
"------------------------------------------\n"
|
"------------------------------------------\n"
|
||||||
|
|
@ -416,8 +411,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||||
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
|
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
|
||||||
|
|
||||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||||
"Last ISR time: %llu (%8llu.%09lu)\n"
|
"Last ISR time: %llu (%ptSp)\n"
|
||||||
"Last ACK time: %llu (%8llu.%09lu)\n"
|
"Last ACK time: %llu (%ptSp)\n"
|
||||||
"Max ISR jiffies: %llu\n"
|
"Max ISR jiffies: %llu\n"
|
||||||
"Max ISR time (ms) (0 denotes < 1 ms): %llu\n"
|
"Max ISR time (ms) (0 denotes < 1 ms): %llu\n"
|
||||||
"Corr. work done: %llu\n"
|
"Corr. work done: %llu\n"
|
||||||
|
|
@ -437,10 +432,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||||
"Number of rport not ready: %lld\n"
|
"Number of rport not ready: %lld\n"
|
||||||
"Number of receive frame errors: %lld\n"
|
"Number of receive frame errors: %lld\n"
|
||||||
"Port speed (in Mbps): %lld\n",
|
"Port speed (in Mbps): %lld\n",
|
||||||
(u64)stats->misc_stats.last_isr_time,
|
(u64)stats->misc_stats.last_isr_time, &val1,
|
||||||
(s64)val1.tv_sec, val1.tv_nsec,
|
(u64)stats->misc_stats.last_ack_time, &val2,
|
||||||
(u64)stats->misc_stats.last_ack_time,
|
|
||||||
(s64)val2.tv_sec, val2.tv_nsec,
|
|
||||||
(u64)atomic64_read(&stats->misc_stats.max_isr_jiffies),
|
(u64)atomic64_read(&stats->misc_stats.max_isr_jiffies),
|
||||||
(u64)atomic64_read(&stats->misc_stats.max_isr_time_ms),
|
(u64)atomic64_read(&stats->misc_stats.max_isr_time_ms),
|
||||||
(u64)atomic64_read(&stats->misc_stats.corr_work_done),
|
(u64)atomic64_read(&stats->misc_stats.corr_work_done),
|
||||||
|
|
@ -857,8 +850,8 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
|
||||||
len = *orig_len;
|
len = *orig_len;
|
||||||
|
|
||||||
len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len,
|
len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len,
|
||||||
"%ptTs.%09lu ns%8x %c%8x\t",
|
"%ptSs ns%8x %c%8x\t",
|
||||||
&tdata->time_stamp.tv_sec, tdata->time_stamp.tv_nsec,
|
&tdata->time_stamp,
|
||||||
tdata->host_no, tdata->frame_type, tdata->frame_len);
|
tdata->host_no, tdata->frame_type, tdata->frame_len);
|
||||||
|
|
||||||
fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
|
fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
|
||||||
|
|
|
||||||
|
|
@ -282,8 +282,8 @@ snic_stats_show(struct seq_file *sfp, void *data)
|
||||||
jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms);
|
jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms);
|
||||||
|
|
||||||
seq_printf(sfp,
|
seq_printf(sfp,
|
||||||
"Last ISR Time : %llu (%8llu.%09lu)\n"
|
"Last ISR Time : %llu (%ptSp)\n"
|
||||||
"Last Ack Time : %llu (%8llu.%09lu)\n"
|
"Last Ack Time : %llu (%ptSp)\n"
|
||||||
"Ack ISRs : %llu\n"
|
"Ack ISRs : %llu\n"
|
||||||
"IO Cmpl ISRs : %llu\n"
|
"IO Cmpl ISRs : %llu\n"
|
||||||
"Err Notify ISRs : %llu\n"
|
"Err Notify ISRs : %llu\n"
|
||||||
|
|
@ -298,10 +298,8 @@ snic_stats_show(struct seq_file *sfp, void *data)
|
||||||
"Queue Ramp Down : %lld\n"
|
"Queue Ramp Down : %lld\n"
|
||||||
"Queue Last Queue Depth : %lld\n"
|
"Queue Last Queue Depth : %lld\n"
|
||||||
"Target Not Ready : %lld\n",
|
"Target Not Ready : %lld\n",
|
||||||
(u64) stats->misc.last_isr_time,
|
(u64) stats->misc.last_isr_time, &last_isr_tms,
|
||||||
last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
|
(u64) stats->misc.last_ack_time, &last_ack_tms,
|
||||||
(u64)stats->misc.last_ack_time,
|
|
||||||
last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
|
|
||||||
(u64) atomic64_read(&stats->misc.ack_isr_cnt),
|
(u64) atomic64_read(&stats->misc.ack_isr_cnt),
|
||||||
(u64) atomic64_read(&stats->misc.cmpl_isr_cnt),
|
(u64) atomic64_read(&stats->misc.cmpl_isr_cnt),
|
||||||
(u64) atomic64_read(&stats->misc.errnotify_isr_cnt),
|
(u64) atomic64_read(&stats->misc.errnotify_isr_cnt),
|
||||||
|
|
|
||||||
|
|
@ -56,9 +56,8 @@ snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
|
||||||
jiffies_to_timespec64(td->ts, &tmspec);
|
jiffies_to_timespec64(td->ts, &tmspec);
|
||||||
|
|
||||||
len += snprintf(buf, buf_sz,
|
len += snprintf(buf, buf_sz,
|
||||||
"%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
|
"%ptSp %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
|
||||||
tmspec.tv_sec,
|
&tmspec,
|
||||||
tmspec.tv_nsec,
|
|
||||||
td->fn,
|
td->fn,
|
||||||
td->hno,
|
td->hno,
|
||||||
td->tag,
|
td->tag,
|
||||||
|
|
|
||||||
|
|
@ -321,7 +321,7 @@ static inline void print_time(char *s)
|
||||||
struct timespec64 ts;
|
struct timespec64 ts;
|
||||||
|
|
||||||
ktime_get_real_ts64(&ts);
|
ktime_get_real_ts64(&ts);
|
||||||
pr_info("%s(): %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec);
|
pr_info("%s(): %ptSp\n", s, &ts);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -577,7 +577,6 @@ static int __init kgdboc_earlycon_init(char *opt)
|
||||||
console_list_lock();
|
console_list_lock();
|
||||||
for_each_console(con) {
|
for_each_console(con) {
|
||||||
if (con->write && con->read &&
|
if (con->write && con->read &&
|
||||||
(con->flags & (CON_BOOT | CON_ENABLED)) &&
|
|
||||||
(!opt || !opt[0] || strcmp(con->name, opt) == 0))
|
(!opt || !opt[0] || strcmp(con->name, opt) == 0))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2155,7 +2155,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
|
||||||
" rfiles: %20lld\n"
|
" rfiles: %20lld\n"
|
||||||
" rsubdirs: %20lld\n"
|
" rsubdirs: %20lld\n"
|
||||||
"rbytes: %20lld\n"
|
"rbytes: %20lld\n"
|
||||||
"rctime: %10lld.%09ld\n",
|
"rctime: %ptSp\n",
|
||||||
ci->i_files + ci->i_subdirs,
|
ci->i_files + ci->i_subdirs,
|
||||||
ci->i_files,
|
ci->i_files,
|
||||||
ci->i_subdirs,
|
ci->i_subdirs,
|
||||||
|
|
@ -2163,8 +2163,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
|
||||||
ci->i_rfiles,
|
ci->i_rfiles,
|
||||||
ci->i_rsubdirs,
|
ci->i_rsubdirs,
|
||||||
ci->i_rbytes,
|
ci->i_rbytes,
|
||||||
ci->i_rctime.tv_sec,
|
&ci->i_rctime);
|
||||||
ci->i_rctime.tv_nsec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*ppos >= dfi->dir_info_len)
|
if (*ppos >= dfi->dir_info_len)
|
||||||
|
|
|
||||||
|
|
@ -879,7 +879,9 @@ void ceph_fill_file_time(struct inode *inode, int issued,
|
||||||
{
|
{
|
||||||
struct ceph_client *cl = ceph_inode_to_client(inode);
|
struct ceph_client *cl = ceph_inode_to_client(inode);
|
||||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||||
|
struct timespec64 iatime = inode_get_atime(inode);
|
||||||
struct timespec64 ictime = inode_get_ctime(inode);
|
struct timespec64 ictime = inode_get_ctime(inode);
|
||||||
|
struct timespec64 imtime = inode_get_mtime(inode);
|
||||||
int warn = 0;
|
int warn = 0;
|
||||||
|
|
||||||
if (issued & (CEPH_CAP_FILE_EXCL|
|
if (issued & (CEPH_CAP_FILE_EXCL|
|
||||||
|
|
@ -889,39 +891,26 @@ void ceph_fill_file_time(struct inode *inode, int issued,
|
||||||
CEPH_CAP_XATTR_EXCL)) {
|
CEPH_CAP_XATTR_EXCL)) {
|
||||||
if (ci->i_version == 0 ||
|
if (ci->i_version == 0 ||
|
||||||
timespec64_compare(ctime, &ictime) > 0) {
|
timespec64_compare(ctime, &ictime) > 0) {
|
||||||
doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
|
doutc(cl, "ctime %ptSp -> %ptSp inc w/ cap\n", &ictime, ctime);
|
||||||
ictime.tv_sec, ictime.tv_nsec,
|
|
||||||
ctime->tv_sec, ctime->tv_nsec);
|
|
||||||
inode_set_ctime_to_ts(inode, *ctime);
|
inode_set_ctime_to_ts(inode, *ctime);
|
||||||
}
|
}
|
||||||
if (ci->i_version == 0 ||
|
if (ci->i_version == 0 ||
|
||||||
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
|
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
|
||||||
/* the MDS did a utimes() */
|
/* the MDS did a utimes() */
|
||||||
doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
|
doutc(cl, "mtime %ptSp -> %ptSp tw %d -> %d\n", &imtime, mtime,
|
||||||
inode_get_mtime_sec(inode),
|
ci->i_time_warp_seq, (int)time_warp_seq);
|
||||||
inode_get_mtime_nsec(inode),
|
|
||||||
mtime->tv_sec, mtime->tv_nsec,
|
|
||||||
ci->i_time_warp_seq, (int)time_warp_seq);
|
|
||||||
|
|
||||||
inode_set_mtime_to_ts(inode, *mtime);
|
inode_set_mtime_to_ts(inode, *mtime);
|
||||||
inode_set_atime_to_ts(inode, *atime);
|
inode_set_atime_to_ts(inode, *atime);
|
||||||
ci->i_time_warp_seq = time_warp_seq;
|
ci->i_time_warp_seq = time_warp_seq;
|
||||||
} else if (time_warp_seq == ci->i_time_warp_seq) {
|
} else if (time_warp_seq == ci->i_time_warp_seq) {
|
||||||
struct timespec64 ts;
|
|
||||||
|
|
||||||
/* nobody did utimes(); take the max */
|
/* nobody did utimes(); take the max */
|
||||||
ts = inode_get_mtime(inode);
|
if (timespec64_compare(mtime, &imtime) > 0) {
|
||||||
if (timespec64_compare(mtime, &ts) > 0) {
|
doutc(cl, "mtime %ptSp -> %ptSp inc\n", &imtime, mtime);
|
||||||
doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
|
|
||||||
ts.tv_sec, ts.tv_nsec,
|
|
||||||
mtime->tv_sec, mtime->tv_nsec);
|
|
||||||
inode_set_mtime_to_ts(inode, *mtime);
|
inode_set_mtime_to_ts(inode, *mtime);
|
||||||
}
|
}
|
||||||
ts = inode_get_atime(inode);
|
if (timespec64_compare(atime, &iatime) > 0) {
|
||||||
if (timespec64_compare(atime, &ts) > 0) {
|
doutc(cl, "atime %ptSp -> %ptSp inc\n", &iatime, atime);
|
||||||
doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
|
|
||||||
ts.tv_sec, ts.tv_nsec,
|
|
||||||
atime->tv_sec, atime->tv_nsec);
|
|
||||||
inode_set_atime_to_ts(inode, *atime);
|
inode_set_atime_to_ts(inode, *atime);
|
||||||
}
|
}
|
||||||
} else if (issued & CEPH_CAP_FILE_EXCL) {
|
} else if (issued & CEPH_CAP_FILE_EXCL) {
|
||||||
|
|
@ -2703,10 +2692,8 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
|
||||||
if (ia_valid & ATTR_ATIME) {
|
if (ia_valid & ATTR_ATIME) {
|
||||||
struct timespec64 atime = inode_get_atime(inode);
|
struct timespec64 atime = inode_get_atime(inode);
|
||||||
|
|
||||||
doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
|
doutc(cl, "%p %llx.%llx atime %ptSp -> %ptSp\n",
|
||||||
inode, ceph_vinop(inode),
|
inode, ceph_vinop(inode), &atime, &attr->ia_atime);
|
||||||
atime.tv_sec, atime.tv_nsec,
|
|
||||||
attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
|
|
||||||
if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
|
if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
|
||||||
ci->i_time_warp_seq++;
|
ci->i_time_warp_seq++;
|
||||||
inode_set_atime_to_ts(inode, attr->ia_atime);
|
inode_set_atime_to_ts(inode, attr->ia_atime);
|
||||||
|
|
@ -2780,10 +2767,8 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
|
||||||
if (ia_valid & ATTR_MTIME) {
|
if (ia_valid & ATTR_MTIME) {
|
||||||
struct timespec64 mtime = inode_get_mtime(inode);
|
struct timespec64 mtime = inode_get_mtime(inode);
|
||||||
|
|
||||||
doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
|
doutc(cl, "%p %llx.%llx mtime %ptSp -> %ptSp\n",
|
||||||
inode, ceph_vinop(inode),
|
inode, ceph_vinop(inode), &mtime, &attr->ia_mtime);
|
||||||
mtime.tv_sec, mtime.tv_nsec,
|
|
||||||
attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
|
|
||||||
if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
|
if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
|
||||||
ci->i_time_warp_seq++;
|
ci->i_time_warp_seq++;
|
||||||
inode_set_mtime_to_ts(inode, attr->ia_mtime);
|
inode_set_mtime_to_ts(inode, attr->ia_mtime);
|
||||||
|
|
@ -2804,13 +2789,11 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
|
||||||
|
|
||||||
/* these do nothing */
|
/* these do nothing */
|
||||||
if (ia_valid & ATTR_CTIME) {
|
if (ia_valid & ATTR_CTIME) {
|
||||||
|
struct timespec64 ictime = inode_get_ctime(inode);
|
||||||
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
|
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
|
||||||
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
|
||||||
doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
|
doutc(cl, "%p %llx.%llx ctime %ptSp -> %ptSp (%s)\n",
|
||||||
inode, ceph_vinop(inode),
|
inode, ceph_vinop(inode), &ictime, &attr->ia_ctime,
|
||||||
inode_get_ctime_sec(inode),
|
|
||||||
inode_get_ctime_nsec(inode),
|
|
||||||
attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
|
|
||||||
only ? "ctime only" : "ignored");
|
only ? "ctime only" : "ignored");
|
||||||
if (only) {
|
if (only) {
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -249,8 +249,7 @@ static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
|
||||||
static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
|
static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
|
return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_rctime);
|
||||||
ci->i_rctime.tv_nsec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* dir pin */
|
/* dir pin */
|
||||||
|
|
@ -307,8 +306,7 @@ static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
|
||||||
static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
|
static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
|
return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_snap_btime);
|
||||||
ci->i_snap_btime.tv_nsec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
|
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/irq_work.h>
|
#include <linux/irq_work.h>
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <linux/rcuwait.h>
|
#include <linux/rcuwait.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/vesa.h>
|
#include <linux/vesa.h>
|
||||||
|
|
||||||
|
|
@ -185,6 +186,8 @@ static inline void con_debug_leave(void) { }
|
||||||
* printing callbacks must not be called.
|
* printing callbacks must not be called.
|
||||||
* @CON_NBCON: Console can operate outside of the legacy style console_lock
|
* @CON_NBCON: Console can operate outside of the legacy style console_lock
|
||||||
* constraints.
|
* constraints.
|
||||||
|
* @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
|
||||||
|
* therefore only used by nbcon_atomic_flush_unsafe().
|
||||||
*/
|
*/
|
||||||
enum cons_flags {
|
enum cons_flags {
|
||||||
CON_PRINTBUFFER = BIT(0),
|
CON_PRINTBUFFER = BIT(0),
|
||||||
|
|
@ -196,6 +199,7 @@ enum cons_flags {
|
||||||
CON_EXTENDED = BIT(6),
|
CON_EXTENDED = BIT(6),
|
||||||
CON_SUSPENDED = BIT(7),
|
CON_SUSPENDED = BIT(7),
|
||||||
CON_NBCON = BIT(8),
|
CON_NBCON = BIT(8),
|
||||||
|
CON_NBCON_ATOMIC_UNSAFE = BIT(9),
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -602,16 +606,80 @@ static inline bool console_is_registered(const struct console *con)
|
||||||
extern void nbcon_cpu_emergency_enter(void);
|
extern void nbcon_cpu_emergency_enter(void);
|
||||||
extern void nbcon_cpu_emergency_exit(void);
|
extern void nbcon_cpu_emergency_exit(void);
|
||||||
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
|
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
|
||||||
|
extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
||||||
|
char *buf, unsigned int len);
|
||||||
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
|
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
|
||||||
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
|
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
|
||||||
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
|
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
|
||||||
|
extern bool nbcon_allow_unsafe_takeover(void);
|
||||||
|
extern bool nbcon_kdb_try_acquire(struct console *con,
|
||||||
|
struct nbcon_write_context *wctxt);
|
||||||
|
extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the given console is currently capable and allowed to print
|
||||||
|
* records. Note that this function does not consider the current context,
|
||||||
|
* which can also play a role in deciding if @con can be used to print
|
||||||
|
* records.
|
||||||
|
*/
|
||||||
|
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
|
||||||
|
{
|
||||||
|
if (!(flags & CON_ENABLED))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if ((flags & CON_SUSPENDED))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (flags & CON_NBCON) {
|
||||||
|
if (use_atomic) {
|
||||||
|
/* The write_atomic() callback is optional. */
|
||||||
|
if (!con->write_atomic)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An unsafe write_atomic() callback is only usable
|
||||||
|
* when unsafe takeovers are allowed.
|
||||||
|
*/
|
||||||
|
if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For the !use_atomic case, @printk_kthreads_running is not
|
||||||
|
* checked because the write_thread() callback is also used
|
||||||
|
* via the legacy loop when the printer threads are not
|
||||||
|
* available.
|
||||||
|
*/
|
||||||
|
} else {
|
||||||
|
if (!con->write)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Console drivers may assume that per-cpu resources have been
|
||||||
|
* allocated. So unless they're explicitly marked as being able to
|
||||||
|
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
|
||||||
|
*/
|
||||||
|
if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void nbcon_cpu_emergency_enter(void) { }
|
static inline void nbcon_cpu_emergency_enter(void) { }
|
||||||
static inline void nbcon_cpu_emergency_exit(void) { }
|
static inline void nbcon_cpu_emergency_exit(void) { }
|
||||||
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
|
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
|
||||||
|
static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
||||||
|
char *buf, unsigned int len) { }
|
||||||
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
|
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
|
||||||
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
|
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
|
||||||
static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
|
static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
|
||||||
|
static inline bool nbcon_kdb_try_acquire(struct console *con,
|
||||||
|
struct nbcon_write_context *wctxt) { return false; }
|
||||||
|
static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { }
|
||||||
|
static inline bool console_is_usable(struct console *con, short flags,
|
||||||
|
bool use_atomic) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern int console_set_on_cmdline;
|
extern int console_set_on_cmdline;
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
/* Shifted versions of the command enable bits are be used if the command
|
/* Shifted versions of the command enable bits are be used if the command
|
||||||
* has no arguments (see kdb_check_flags). This allows commands, such as
|
* has no arguments (see kdb_check_flags). This allows commands, such as
|
||||||
|
|
@ -207,11 +208,26 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
|
||||||
/* Dynamic kdb shell command registration */
|
/* Dynamic kdb shell command registration */
|
||||||
extern int kdb_register(kdbtab_t *cmd);
|
extern int kdb_register(kdbtab_t *cmd);
|
||||||
extern void kdb_unregister(kdbtab_t *cmd);
|
extern void kdb_unregister(kdbtab_t *cmd);
|
||||||
|
|
||||||
|
/* Return true when KDB as locked for printing a message on this CPU. */
|
||||||
|
static inline
|
||||||
|
bool kdb_printf_on_this_cpu(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We can use raw_smp_processor_id() here because the task could
|
||||||
|
* not get migrated when KDB has locked for printing on this CPU.
|
||||||
|
*/
|
||||||
|
return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id());
|
||||||
|
}
|
||||||
|
|
||||||
#else /* ! CONFIG_KGDB_KDB */
|
#else /* ! CONFIG_KGDB_KDB */
|
||||||
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
|
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
|
||||||
static inline void kdb_init(int level) {}
|
static inline void kdb_init(int level) {}
|
||||||
static inline int kdb_register(kdbtab_t *cmd) { return 0; }
|
static inline int kdb_register(kdbtab_t *cmd) { return 0; }
|
||||||
static inline void kdb_unregister(kdbtab_t *cmd) {}
|
static inline void kdb_unregister(kdbtab_t *cmd) {}
|
||||||
|
|
||||||
|
static inline bool kdb_printf_on_this_cpu(void) { return false; }
|
||||||
|
|
||||||
#endif /* CONFIG_KGDB_KDB */
|
#endif /* CONFIG_KGDB_KDB */
|
||||||
enum {
|
enum {
|
||||||
KDB_NOT_INITIALIZED,
|
KDB_NOT_INITIALIZED,
|
||||||
|
|
|
||||||
|
|
@ -589,24 +589,41 @@ static void kdb_msg_write(const char *msg, int msg_len)
|
||||||
*/
|
*/
|
||||||
cookie = console_srcu_read_lock();
|
cookie = console_srcu_read_lock();
|
||||||
for_each_console_srcu(c) {
|
for_each_console_srcu(c) {
|
||||||
if (!(console_srcu_read_flags(c) & CON_ENABLED))
|
short flags = console_srcu_read_flags(c);
|
||||||
|
|
||||||
|
if (!console_is_usable(c, flags, true))
|
||||||
continue;
|
continue;
|
||||||
if (c == dbg_io_ops->cons)
|
if (c == dbg_io_ops->cons)
|
||||||
continue;
|
continue;
|
||||||
if (!c->write)
|
|
||||||
continue;
|
if (flags & CON_NBCON) {
|
||||||
/*
|
struct nbcon_write_context wctxt = { };
|
||||||
* Set oops_in_progress to encourage the console drivers to
|
|
||||||
* disregard their internal spin locks: in the current calling
|
/*
|
||||||
* context the risk of deadlock is a bigger problem than risks
|
* Do not continue if the console is NBCON and the context
|
||||||
* due to re-entering the console driver. We operate directly on
|
* can't be acquired.
|
||||||
* oops_in_progress rather than using bust_spinlocks() because
|
*/
|
||||||
* the calls bust_spinlocks() makes on exit are not appropriate
|
if (!nbcon_kdb_try_acquire(c, &wctxt))
|
||||||
* for this calling context.
|
continue;
|
||||||
*/
|
|
||||||
++oops_in_progress;
|
nbcon_write_context_set_buf(&wctxt, (char *)msg, msg_len);
|
||||||
c->write(c, msg, msg_len);
|
|
||||||
--oops_in_progress;
|
c->write_atomic(c, &wctxt);
|
||||||
|
nbcon_kdb_release(&wctxt);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Set oops_in_progress to encourage the console drivers to
|
||||||
|
* disregard their internal spin locks: in the current calling
|
||||||
|
* context the risk of deadlock is a bigger problem than risks
|
||||||
|
* due to re-entering the console driver. We operate directly on
|
||||||
|
* oops_in_progress rather than using bust_spinlocks() because
|
||||||
|
* the calls bust_spinlocks() makes on exit are not appropriate
|
||||||
|
* for this calling context.
|
||||||
|
*/
|
||||||
|
++oops_in_progress;
|
||||||
|
c->write(c, msg, msg_len);
|
||||||
|
--oops_in_progress;
|
||||||
|
}
|
||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@
|
||||||
* internal.h - printk internal definitions
|
* internal.h - printk internal definitions
|
||||||
*/
|
*/
|
||||||
#include <linux/console.h>
|
#include <linux/console.h>
|
||||||
#include <linux/percpu.h>
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
|
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
|
||||||
|
|
@ -112,47 +111,6 @@ bool nbcon_kthread_create(struct console *con);
|
||||||
void nbcon_kthread_stop(struct console *con);
|
void nbcon_kthread_stop(struct console *con);
|
||||||
void nbcon_kthreads_wake(void);
|
void nbcon_kthreads_wake(void);
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the given console is currently capable and allowed to print
|
|
||||||
* records. Note that this function does not consider the current context,
|
|
||||||
* which can also play a role in deciding if @con can be used to print
|
|
||||||
* records.
|
|
||||||
*/
|
|
||||||
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
|
|
||||||
{
|
|
||||||
if (!(flags & CON_ENABLED))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if ((flags & CON_SUSPENDED))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (flags & CON_NBCON) {
|
|
||||||
/* The write_atomic() callback is optional. */
|
|
||||||
if (use_atomic && !con->write_atomic)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* For the !use_atomic case, @printk_kthreads_running is not
|
|
||||||
* checked because the write_thread() callback is also used
|
|
||||||
* via the legacy loop when the printer threads are not
|
|
||||||
* available.
|
|
||||||
*/
|
|
||||||
} else {
|
|
||||||
if (!con->write)
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Console drivers may assume that per-cpu resources have been
|
|
||||||
* allocated. So unless they're explicitly marked as being able to
|
|
||||||
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
|
|
||||||
*/
|
|
||||||
if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nbcon_kthread_wake - Wake up a console printing thread
|
* nbcon_kthread_wake - Wake up a console printing thread
|
||||||
* @con: Console to operate on
|
* @con: Console to operate on
|
||||||
|
|
@ -204,9 +162,6 @@ static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *hand
|
||||||
static inline void nbcon_kthread_wake(struct console *con) { }
|
static inline void nbcon_kthread_wake(struct console *con) { }
|
||||||
static inline void nbcon_kthreads_wake(void) { }
|
static inline void nbcon_kthreads_wake(void) { }
|
||||||
|
|
||||||
static inline bool console_is_usable(struct console *con, short flags,
|
|
||||||
bool use_atomic) { return false; }
|
|
||||||
|
|
||||||
#endif /* CONFIG_PRINTK */
|
#endif /* CONFIG_PRINTK */
|
||||||
|
|
||||||
extern bool have_boot_console;
|
extern bool have_boot_console;
|
||||||
|
|
@ -230,6 +185,8 @@ struct console_flush_type {
|
||||||
bool legacy_offload;
|
bool legacy_offload;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern bool console_irqwork_blocked;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Identify which console flushing methods should be used in the context of
|
* Identify which console flushing methods should be used in the context of
|
||||||
* the caller.
|
* the caller.
|
||||||
|
|
@ -241,7 +198,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
||||||
switch (nbcon_get_default_prio()) {
|
switch (nbcon_get_default_prio()) {
|
||||||
case NBCON_PRIO_NORMAL:
|
case NBCON_PRIO_NORMAL:
|
||||||
if (have_nbcon_console && !have_boot_console) {
|
if (have_nbcon_console && !have_boot_console) {
|
||||||
if (printk_kthreads_running)
|
if (printk_kthreads_running && !console_irqwork_blocked)
|
||||||
ft->nbcon_offload = true;
|
ft->nbcon_offload = true;
|
||||||
else
|
else
|
||||||
ft->nbcon_atomic = true;
|
ft->nbcon_atomic = true;
|
||||||
|
|
@ -251,7 +208,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
||||||
if (have_legacy_console || have_boot_console) {
|
if (have_legacy_console || have_boot_console) {
|
||||||
if (!is_printk_legacy_deferred())
|
if (!is_printk_legacy_deferred())
|
||||||
ft->legacy_direct = true;
|
ft->legacy_direct = true;
|
||||||
else
|
else if (!console_irqwork_blocked)
|
||||||
ft->legacy_offload = true;
|
ft->legacy_offload = true;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
@ -264,7 +221,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
||||||
if (have_legacy_console || have_boot_console) {
|
if (have_legacy_console || have_boot_console) {
|
||||||
if (!is_printk_legacy_deferred())
|
if (!is_printk_legacy_deferred())
|
||||||
ft->legacy_direct = true;
|
ft->legacy_direct = true;
|
||||||
else
|
else if (!console_irqwork_blocked)
|
||||||
ft->legacy_offload = true;
|
ft->legacy_offload = true;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
|
#include <linux/kdb.h>
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/minmax.h>
|
#include <linux/minmax.h>
|
||||||
#include <linux/panic.h>
|
#include <linux/panic.h>
|
||||||
|
|
@ -118,6 +119,9 @@
|
||||||
* from scratch.
|
* from scratch.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* Counter of active nbcon emergency contexts. */
|
||||||
|
static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT(0);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nbcon_state_set - Helper function to set the console state
|
* nbcon_state_set - Helper function to set the console state
|
||||||
* @con: Console to update
|
* @con: Console to update
|
||||||
|
|
@ -249,13 +253,16 @@ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
|
||||||
* since all non-panic CPUs are stopped during panic(), it
|
* since all non-panic CPUs are stopped during panic(), it
|
||||||
* is safer to have them avoid gaining console ownership.
|
* is safer to have them avoid gaining console ownership.
|
||||||
*
|
*
|
||||||
* If this acquire is a reacquire (and an unsafe takeover
|
* One exception is when kdb has locked for printing on this CPU.
|
||||||
|
*
|
||||||
|
* Second exception is a reacquire (and an unsafe takeover
|
||||||
* has not previously occurred) then it is allowed to attempt
|
* has not previously occurred) then it is allowed to attempt
|
||||||
* a direct acquire in panic. This gives console drivers an
|
* a direct acquire in panic. This gives console drivers an
|
||||||
* opportunity to perform any necessary cleanup if they were
|
* opportunity to perform any necessary cleanup if they were
|
||||||
* interrupted by the panic CPU while printing.
|
* interrupted by the panic CPU while printing.
|
||||||
*/
|
*/
|
||||||
if (panic_on_other_cpu() &&
|
if (panic_on_other_cpu() &&
|
||||||
|
!kdb_printf_on_this_cpu() &&
|
||||||
(!is_reacquire || cur->unsafe_takeover)) {
|
(!is_reacquire || cur->unsafe_takeover)) {
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
@ -850,8 +857,8 @@ static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsaf
|
||||||
return nbcon_context_can_proceed(ctxt, &cur);
|
return nbcon_context_can_proceed(ctxt, &cur);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
||||||
char *buf, unsigned int len)
|
char *buf, unsigned int len)
|
||||||
{
|
{
|
||||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||||||
struct console *con = ctxt->console;
|
struct console *con = ctxt->console;
|
||||||
|
|
@ -1163,6 +1170,17 @@ static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_contex
|
||||||
if (kthread_should_stop())
|
if (kthread_should_stop())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Block the kthread when the system is in an emergency or panic mode.
|
||||||
|
* It increases the chance that these contexts would be able to show
|
||||||
|
* the messages directly. And it reduces the risk of interrupted writes
|
||||||
|
* where the context with a higher priority takes over the nbcon console
|
||||||
|
* ownership in the middle of a message.
|
||||||
|
*/
|
||||||
|
if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
|
||||||
|
unlikely(panic_in_progress()))
|
||||||
|
return false;
|
||||||
|
|
||||||
cookie = console_srcu_read_lock();
|
cookie = console_srcu_read_lock();
|
||||||
|
|
||||||
flags = console_srcu_read_flags(con);
|
flags = console_srcu_read_flags(con);
|
||||||
|
|
@ -1214,6 +1232,14 @@ static int nbcon_kthread_func(void *__console)
|
||||||
if (kthread_should_stop())
|
if (kthread_should_stop())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Block the kthread when the system is in an emergency or panic
|
||||||
|
* mode. See nbcon_kthread_should_wakeup() for more details.
|
||||||
|
*/
|
||||||
|
if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
|
||||||
|
unlikely(panic_in_progress()))
|
||||||
|
goto wait_for_event;
|
||||||
|
|
||||||
backlog = false;
|
backlog = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -1276,6 +1302,13 @@ void nbcon_kthreads_wake(void)
|
||||||
if (!printk_kthreads_running)
|
if (!printk_kthreads_running)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is not allowed to call this function when console irq_work
|
||||||
|
* is blocked.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(console_irqwork_blocked))
|
||||||
|
return;
|
||||||
|
|
||||||
cookie = console_srcu_read_lock();
|
cookie = console_srcu_read_lock();
|
||||||
for_each_console_srcu(con) {
|
for_each_console_srcu(con) {
|
||||||
if (!(console_srcu_read_flags(con) & CON_NBCON))
|
if (!(console_srcu_read_flags(con) & CON_NBCON))
|
||||||
|
|
@ -1404,6 +1437,26 @@ enum nbcon_prio nbcon_get_default_prio(void)
|
||||||
return NBCON_PRIO_NORMAL;
|
return NBCON_PRIO_NORMAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Track if it is allowed to perform unsafe hostile takeovers of console
|
||||||
|
* ownership. When true, console drivers might perform unsafe actions while
|
||||||
|
* printing. It is externally available via nbcon_allow_unsafe_takeover().
|
||||||
|
*/
|
||||||
|
static bool panic_nbcon_allow_unsafe_takeover;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed
|
||||||
|
*
|
||||||
|
* Return: True, when it is permitted to perform unsafe console printing
|
||||||
|
*
|
||||||
|
* This is also used by console_is_usable() to determine if it is allowed to
|
||||||
|
* call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE).
|
||||||
|
*/
|
||||||
|
bool nbcon_allow_unsafe_takeover(void)
|
||||||
|
{
|
||||||
|
return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
|
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
|
||||||
* in legacy contexts
|
* in legacy contexts
|
||||||
|
|
@ -1474,7 +1527,6 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
||||||
* write_atomic() callback
|
* write_atomic() callback
|
||||||
* @con: The nbcon console to flush
|
* @con: The nbcon console to flush
|
||||||
* @stop_seq: Flush up until this record
|
* @stop_seq: Flush up until this record
|
||||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
|
||||||
*
|
*
|
||||||
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
|
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
|
||||||
* failure.
|
* failure.
|
||||||
|
|
@ -1493,8 +1545,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
||||||
* returned, it cannot be expected that the unfinalized record will become
|
* returned, it cannot be expected that the unfinalized record will become
|
||||||
* available.
|
* available.
|
||||||
*/
|
*/
|
||||||
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
|
||||||
bool allow_unsafe_takeover)
|
|
||||||
{
|
{
|
||||||
struct nbcon_write_context wctxt = { };
|
struct nbcon_write_context wctxt = { };
|
||||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
||||||
|
|
@ -1503,12 +1554,12 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
ctxt->console = con;
|
ctxt->console = con;
|
||||||
ctxt->spinwait_max_us = 2000;
|
ctxt->spinwait_max_us = 2000;
|
||||||
ctxt->prio = nbcon_get_default_prio();
|
ctxt->prio = nbcon_get_default_prio();
|
||||||
ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
|
ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
|
||||||
|
|
||||||
if (!nbcon_context_try_acquire(ctxt, false))
|
|
||||||
return -EPERM;
|
|
||||||
|
|
||||||
while (nbcon_seq_read(con) < stop_seq) {
|
while (nbcon_seq_read(con) < stop_seq) {
|
||||||
|
if (!nbcon_context_try_acquire(ctxt, false))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nbcon_emit_next_record() returns false when the console was
|
* nbcon_emit_next_record() returns false when the console was
|
||||||
* handed over or taken over. In both cases the context is no
|
* handed over or taken over. In both cases the context is no
|
||||||
|
|
@ -1517,6 +1568,8 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
if (!nbcon_emit_next_record(&wctxt, true))
|
if (!nbcon_emit_next_record(&wctxt, true))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
|
nbcon_context_release(ctxt);
|
||||||
|
|
||||||
if (!ctxt->backlog) {
|
if (!ctxt->backlog) {
|
||||||
/* Are there reserved but not yet finalized records? */
|
/* Are there reserved but not yet finalized records? */
|
||||||
if (nbcon_seq_read(con) < stop_seq)
|
if (nbcon_seq_read(con) < stop_seq)
|
||||||
|
|
@ -1525,7 +1578,6 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nbcon_context_release(ctxt);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1534,15 +1586,13 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
* write_atomic() callback
|
* write_atomic() callback
|
||||||
* @con: The nbcon console to flush
|
* @con: The nbcon console to flush
|
||||||
* @stop_seq: Flush up until this record
|
* @stop_seq: Flush up until this record
|
||||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
|
||||||
*
|
*
|
||||||
* This will stop flushing before @stop_seq if another context has ownership.
|
* This will stop flushing before @stop_seq if another context has ownership.
|
||||||
* That context is then responsible for the flushing. Likewise, if new records
|
* That context is then responsible for the flushing. Likewise, if new records
|
||||||
* are added while this context was flushing and there is no other context
|
* are added while this context was flushing and there is no other context
|
||||||
* to handle the printing, this context must also flush those records.
|
* to handle the printing, this context must also flush those records.
|
||||||
*/
|
*/
|
||||||
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
|
||||||
bool allow_unsafe_takeover)
|
|
||||||
{
|
{
|
||||||
struct console_flush_type ft;
|
struct console_flush_type ft;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
@ -1557,7 +1607,7 @@ static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
|
err = __nbcon_atomic_flush_pending_con(con, stop_seq);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
|
@ -1589,9 +1639,8 @@ static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
|
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
|
||||||
* write_atomic() callback
|
* write_atomic() callback
|
||||||
* @stop_seq: Flush up until this record
|
* @stop_seq: Flush up until this record
|
||||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
|
||||||
*/
|
*/
|
||||||
static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
|
static void __nbcon_atomic_flush_pending(u64 stop_seq)
|
||||||
{
|
{
|
||||||
struct console *con;
|
struct console *con;
|
||||||
int cookie;
|
int cookie;
|
||||||
|
|
@ -1609,7 +1658,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
|
||||||
if (nbcon_seq_read(con) >= stop_seq)
|
if (nbcon_seq_read(con) >= stop_seq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
|
nbcon_atomic_flush_pending_con(con, stop_seq);
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
}
|
}
|
||||||
|
|
@ -1625,7 +1674,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
|
||||||
*/
|
*/
|
||||||
void nbcon_atomic_flush_pending(void)
|
void nbcon_atomic_flush_pending(void)
|
||||||
{
|
{
|
||||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
|
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -1637,7 +1686,9 @@ void nbcon_atomic_flush_pending(void)
|
||||||
*/
|
*/
|
||||||
void nbcon_atomic_flush_unsafe(void)
|
void nbcon_atomic_flush_unsafe(void)
|
||||||
{
|
{
|
||||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
|
panic_nbcon_allow_unsafe_takeover = true;
|
||||||
|
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
|
||||||
|
panic_nbcon_allow_unsafe_takeover = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -1655,6 +1706,8 @@ void nbcon_cpu_emergency_enter(void)
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
||||||
|
atomic_inc(&nbcon_cpu_emergency_cnt);
|
||||||
|
|
||||||
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||||||
(*cpu_emergency_nesting)++;
|
(*cpu_emergency_nesting)++;
|
||||||
}
|
}
|
||||||
|
|
@ -1669,10 +1722,24 @@ void nbcon_cpu_emergency_exit(void)
|
||||||
unsigned int *cpu_emergency_nesting;
|
unsigned int *cpu_emergency_nesting;
|
||||||
|
|
||||||
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||||||
|
|
||||||
if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
|
if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
|
||||||
(*cpu_emergency_nesting)--;
|
(*cpu_emergency_nesting)--;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wake up kthreads because there might be some pending messages
|
||||||
|
* added by other CPUs with normal priority since the last flush
|
||||||
|
* in the emergency context.
|
||||||
|
*/
|
||||||
|
if (!WARN_ON_ONCE(atomic_read(&nbcon_cpu_emergency_cnt) == 0)) {
|
||||||
|
if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) {
|
||||||
|
struct console_flush_type ft;
|
||||||
|
|
||||||
|
printk_get_console_flush_type(&ft);
|
||||||
|
if (ft.nbcon_offload)
|
||||||
|
nbcon_kthreads_wake();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1844,14 +1911,75 @@ void nbcon_device_release(struct console *con)
|
||||||
* using the legacy loop.
|
* using the legacy loop.
|
||||||
*/
|
*/
|
||||||
if (ft.nbcon_atomic) {
|
if (ft.nbcon_atomic) {
|
||||||
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
|
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
|
||||||
} else if (ft.legacy_direct) {
|
} else if (ft.legacy_direct) {
|
||||||
if (console_trylock())
|
if (console_trylock())
|
||||||
console_unlock();
|
console_unlock();
|
||||||
} else if (ft.legacy_offload) {
|
} else if (ft.legacy_offload) {
|
||||||
printk_trigger_flush();
|
defer_console_output();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nbcon_device_release);
|
EXPORT_SYMBOL_GPL(nbcon_device_release);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nbcon_kdb_try_acquire - Try to acquire nbcon console and enter unsafe
|
||||||
|
* section
|
||||||
|
* @con: The nbcon console to acquire
|
||||||
|
* @wctxt: The nbcon write context to be used on success
|
||||||
|
*
|
||||||
|
* Context: Under console_srcu_read_lock() for emitting a single kdb message
|
||||||
|
* using the given con->write_atomic() callback. Can be called
|
||||||
|
* only when the console is usable at the moment.
|
||||||
|
*
|
||||||
|
* Return: True if the console was acquired. False otherwise.
|
||||||
|
*
|
||||||
|
* kdb emits messages on consoles registered for printk() without
|
||||||
|
* storing them into the ring buffer. It has to acquire the console
|
||||||
|
* ownerhip so that it could call con->write_atomic() callback a safe way.
|
||||||
|
*
|
||||||
|
* This function acquires the nbcon console using priority NBCON_PRIO_EMERGENCY
|
||||||
|
* and marks it unsafe for handover/takeover.
|
||||||
|
*/
|
||||||
|
bool nbcon_kdb_try_acquire(struct console *con,
|
||||||
|
struct nbcon_write_context *wctxt)
|
||||||
|
{
|
||||||
|
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||||||
|
|
||||||
|
memset(ctxt, 0, sizeof(*ctxt));
|
||||||
|
ctxt->console = con;
|
||||||
|
ctxt->prio = NBCON_PRIO_EMERGENCY;
|
||||||
|
|
||||||
|
if (!nbcon_context_try_acquire(ctxt, false))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!nbcon_context_enter_unsafe(ctxt))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nbcon_kdb_release - Exit unsafe section and release the nbcon console
|
||||||
|
*
|
||||||
|
* @wctxt: The nbcon write context initialized by a successful
|
||||||
|
* nbcon_kdb_try_acquire()
|
||||||
|
*/
|
||||||
|
void nbcon_kdb_release(struct nbcon_write_context *wctxt)
|
||||||
|
{
|
||||||
|
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||||||
|
|
||||||
|
if (!nbcon_context_exit_unsafe(ctxt))
|
||||||
|
return;
|
||||||
|
|
||||||
|
nbcon_context_release(ctxt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush any new printk() messages added when the console was blocked.
|
||||||
|
* Only the console used by the given write context was blocked.
|
||||||
|
* The console was locked only when the write_atomic() callback
|
||||||
|
* was usable.
|
||||||
|
*/
|
||||||
|
__nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb));
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -462,6 +462,9 @@ bool have_boot_console;
|
||||||
/* See printk_legacy_allow_panic_sync() for details. */
|
/* See printk_legacy_allow_panic_sync() for details. */
|
||||||
bool legacy_allow_panic_sync;
|
bool legacy_allow_panic_sync;
|
||||||
|
|
||||||
|
/* Avoid using irq_work when suspending. */
|
||||||
|
bool console_irqwork_blocked;
|
||||||
|
|
||||||
#ifdef CONFIG_PRINTK
|
#ifdef CONFIG_PRINTK
|
||||||
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
|
static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
|
||||||
|
|
@ -2390,7 +2393,7 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||||
/* If called from the scheduler, we can not call up(). */
|
/* If called from the scheduler, we can not call up(). */
|
||||||
if (level == LOGLEVEL_SCHED) {
|
if (level == LOGLEVEL_SCHED) {
|
||||||
level = LOGLEVEL_DEFAULT;
|
level = LOGLEVEL_DEFAULT;
|
||||||
ft.legacy_offload |= ft.legacy_direct;
|
ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked;
|
||||||
ft.legacy_direct = false;
|
ft.legacy_direct = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2426,7 +2429,7 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||||
|
|
||||||
if (ft.legacy_offload)
|
if (ft.legacy_offload)
|
||||||
defer_console_output();
|
defer_console_output();
|
||||||
else
|
else if (!console_irqwork_blocked)
|
||||||
wake_up_klogd();
|
wake_up_klogd();
|
||||||
|
|
||||||
return printed_len;
|
return printed_len;
|
||||||
|
|
@ -2730,10 +2733,20 @@ void console_suspend_all(void)
|
||||||
{
|
{
|
||||||
struct console *con;
|
struct console *con;
|
||||||
|
|
||||||
|
if (console_suspend_enabled)
|
||||||
|
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush any console backlog and then avoid queueing irq_work until
|
||||||
|
* console_resume_all(). Until then deferred printing is no longer
|
||||||
|
* triggered, NBCON consoles transition to atomic flushing, and
|
||||||
|
* any klogd waiters are not triggered.
|
||||||
|
*/
|
||||||
|
pr_flush(1000, true);
|
||||||
|
console_irqwork_blocked = true;
|
||||||
|
|
||||||
if (!console_suspend_enabled)
|
if (!console_suspend_enabled)
|
||||||
return;
|
return;
|
||||||
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
|
|
||||||
pr_flush(1000, true);
|
|
||||||
|
|
||||||
console_list_lock();
|
console_list_lock();
|
||||||
for_each_console(con)
|
for_each_console(con)
|
||||||
|
|
@ -2754,26 +2767,34 @@ void console_resume_all(void)
|
||||||
struct console_flush_type ft;
|
struct console_flush_type ft;
|
||||||
struct console *con;
|
struct console *con;
|
||||||
|
|
||||||
if (!console_suspend_enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
console_list_lock();
|
|
||||||
for_each_console(con)
|
|
||||||
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
|
|
||||||
console_list_unlock();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that all SRCU list walks have completed. All printing
|
* Allow queueing irq_work. After restoring console state, deferred
|
||||||
* contexts must be able to see they are no longer suspended so
|
* printing and any klogd waiters need to be triggered in case there
|
||||||
* that they are guaranteed to wake up and resume printing.
|
* is now a console backlog.
|
||||||
*/
|
*/
|
||||||
synchronize_srcu(&console_srcu);
|
console_irqwork_blocked = false;
|
||||||
|
|
||||||
|
if (console_suspend_enabled) {
|
||||||
|
console_list_lock();
|
||||||
|
for_each_console(con)
|
||||||
|
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
|
||||||
|
console_list_unlock();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that all SRCU list walks have completed. All printing
|
||||||
|
* contexts must be able to see they are no longer suspended so
|
||||||
|
* that they are guaranteed to wake up and resume printing.
|
||||||
|
*/
|
||||||
|
synchronize_srcu(&console_srcu);
|
||||||
|
}
|
||||||
|
|
||||||
printk_get_console_flush_type(&ft);
|
printk_get_console_flush_type(&ft);
|
||||||
if (ft.nbcon_offload)
|
if (ft.nbcon_offload)
|
||||||
nbcon_kthreads_wake();
|
nbcon_kthreads_wake();
|
||||||
if (ft.legacy_offload)
|
if (ft.legacy_offload)
|
||||||
defer_console_output();
|
defer_console_output();
|
||||||
|
else
|
||||||
|
wake_up_klogd();
|
||||||
|
|
||||||
pr_flush(1000, true);
|
pr_flush(1000, true);
|
||||||
}
|
}
|
||||||
|
|
@ -3002,21 +3023,18 @@ bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Legacy console printing from printk() caller context does not respect
|
* The legacy console always acquires a spinlock_t from its printing
|
||||||
* raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a
|
* callback. This violates lock nesting if the caller acquired an always
|
||||||
* false positive. For PREEMPT_RT the false positive condition does not
|
* spinning lock (raw_spinlock_t) while invoking printk(). This is not a
|
||||||
* occur.
|
* problem on PREEMPT_RT because legacy consoles print always from a
|
||||||
*
|
* dedicated thread and never from within printk(). Therefore we tell
|
||||||
* This map is used to temporarily establish LD_WAIT_SLEEP context for the
|
* lockdep that a sleeping spin lock (spinlock_t) is valid here.
|
||||||
* console write() callback when legacy printing to avoid false positive
|
|
||||||
* lockdep complaints, thus allowing lockdep to continue to function for
|
|
||||||
* real issues.
|
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PREEMPT_RT
|
#ifdef CONFIG_PREEMPT_RT
|
||||||
static inline void printk_legacy_allow_spinlock_enter(void) { }
|
static inline void printk_legacy_allow_spinlock_enter(void) { }
|
||||||
static inline void printk_legacy_allow_spinlock_exit(void) { }
|
static inline void printk_legacy_allow_spinlock_exit(void) { }
|
||||||
#else
|
#else
|
||||||
static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
|
static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG);
|
||||||
|
|
||||||
static inline void printk_legacy_allow_spinlock_enter(void)
|
static inline void printk_legacy_allow_spinlock_enter(void)
|
||||||
{
|
{
|
||||||
|
|
@ -3134,6 +3152,108 @@ static inline void printk_kthreads_check_locked(void) { }
|
||||||
|
|
||||||
#endif /* CONFIG_PRINTK */
|
#endif /* CONFIG_PRINTK */
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Print out one record for each console.
|
||||||
|
*
|
||||||
|
* @do_cond_resched is set by the caller. It can be true only in schedulable
|
||||||
|
* context.
|
||||||
|
*
|
||||||
|
* @next_seq is set to the sequence number after the last available record.
|
||||||
|
* The value is valid only when all usable consoles were flushed. It is
|
||||||
|
* when the function returns true (can do the job) and @try_again parameter
|
||||||
|
* is set to false, see below.
|
||||||
|
*
|
||||||
|
* @handover will be set to true if a printk waiter has taken over the
|
||||||
|
* console_lock, in which case the caller is no longer holding the
|
||||||
|
* console_lock. Otherwise it is set to false.
|
||||||
|
*
|
||||||
|
* @try_again will be set to true when it still makes sense to call this
|
||||||
|
* function again. The function could do the job, see the return value.
|
||||||
|
* And some consoles still make progress.
|
||||||
|
*
|
||||||
|
* Returns true when the function could do the job. Some consoles are usable,
|
||||||
|
* and there was no takeover and no panic_on_other_cpu().
|
||||||
|
*
|
||||||
|
* Requires the console_lock.
|
||||||
|
*/
|
||||||
|
static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
|
||||||
|
bool *try_again)
|
||||||
|
{
|
||||||
|
struct console_flush_type ft;
|
||||||
|
bool any_usable = false;
|
||||||
|
struct console *con;
|
||||||
|
int cookie;
|
||||||
|
|
||||||
|
*try_again = false;
|
||||||
|
|
||||||
|
printk_get_console_flush_type(&ft);
|
||||||
|
|
||||||
|
cookie = console_srcu_read_lock();
|
||||||
|
for_each_console_srcu(con) {
|
||||||
|
short flags = console_srcu_read_flags(con);
|
||||||
|
u64 printk_seq;
|
||||||
|
bool progress;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* console_flush_one_record() is only responsible for
|
||||||
|
* nbcon consoles when the nbcon consoles cannot print via
|
||||||
|
* their atomic or threaded flushing.
|
||||||
|
*/
|
||||||
|
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!console_is_usable(con, flags, !do_cond_resched))
|
||||||
|
continue;
|
||||||
|
any_usable = true;
|
||||||
|
|
||||||
|
if (flags & CON_NBCON) {
|
||||||
|
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
|
||||||
|
!do_cond_resched);
|
||||||
|
printk_seq = nbcon_seq_read(con);
|
||||||
|
} else {
|
||||||
|
progress = console_emit_next_record(con, handover, cookie);
|
||||||
|
printk_seq = con->seq;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a handover has occurred, the SRCU read lock
|
||||||
|
* is already released.
|
||||||
|
*/
|
||||||
|
if (*handover)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
/* Track the next of the highest seq flushed. */
|
||||||
|
if (printk_seq > *next_seq)
|
||||||
|
*next_seq = printk_seq;
|
||||||
|
|
||||||
|
if (!progress)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An usable console made a progress. There might still be
|
||||||
|
* pending messages.
|
||||||
|
*/
|
||||||
|
*try_again = true;
|
||||||
|
|
||||||
|
/* Allow panic_cpu to take over the consoles safely. */
|
||||||
|
if (panic_on_other_cpu())
|
||||||
|
goto fail_srcu;
|
||||||
|
|
||||||
|
if (do_cond_resched)
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
console_srcu_read_unlock(cookie);
|
||||||
|
|
||||||
|
return any_usable;
|
||||||
|
|
||||||
|
fail_srcu:
|
||||||
|
console_srcu_read_unlock(cookie);
|
||||||
|
fail:
|
||||||
|
*try_again = false;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print out all remaining records to all consoles.
|
* Print out all remaining records to all consoles.
|
||||||
*
|
*
|
||||||
|
|
@ -3159,77 +3279,18 @@ static inline void printk_kthreads_check_locked(void) { }
|
||||||
*/
|
*/
|
||||||
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
|
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
|
||||||
{
|
{
|
||||||
struct console_flush_type ft;
|
bool try_again;
|
||||||
bool any_usable = false;
|
bool ret;
|
||||||
struct console *con;
|
|
||||||
bool any_progress;
|
|
||||||
int cookie;
|
|
||||||
|
|
||||||
*next_seq = 0;
|
*next_seq = 0;
|
||||||
*handover = false;
|
*handover = false;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
any_progress = false;
|
ret = console_flush_one_record(do_cond_resched, next_seq,
|
||||||
|
handover, &try_again);
|
||||||
|
} while (try_again);
|
||||||
|
|
||||||
printk_get_console_flush_type(&ft);
|
return ret;
|
||||||
|
|
||||||
cookie = console_srcu_read_lock();
|
|
||||||
for_each_console_srcu(con) {
|
|
||||||
short flags = console_srcu_read_flags(con);
|
|
||||||
u64 printk_seq;
|
|
||||||
bool progress;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* console_flush_all() is only responsible for nbcon
|
|
||||||
* consoles when the nbcon consoles cannot print via
|
|
||||||
* their atomic or threaded flushing.
|
|
||||||
*/
|
|
||||||
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!console_is_usable(con, flags, !do_cond_resched))
|
|
||||||
continue;
|
|
||||||
any_usable = true;
|
|
||||||
|
|
||||||
if (flags & CON_NBCON) {
|
|
||||||
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
|
|
||||||
!do_cond_resched);
|
|
||||||
printk_seq = nbcon_seq_read(con);
|
|
||||||
} else {
|
|
||||||
progress = console_emit_next_record(con, handover, cookie);
|
|
||||||
printk_seq = con->seq;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If a handover has occurred, the SRCU read lock
|
|
||||||
* is already released.
|
|
||||||
*/
|
|
||||||
if (*handover)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* Track the next of the highest seq flushed. */
|
|
||||||
if (printk_seq > *next_seq)
|
|
||||||
*next_seq = printk_seq;
|
|
||||||
|
|
||||||
if (!progress)
|
|
||||||
continue;
|
|
||||||
any_progress = true;
|
|
||||||
|
|
||||||
/* Allow panic_cpu to take over the consoles safely. */
|
|
||||||
if (panic_on_other_cpu())
|
|
||||||
goto abandon;
|
|
||||||
|
|
||||||
if (do_cond_resched)
|
|
||||||
cond_resched();
|
|
||||||
}
|
|
||||||
console_srcu_read_unlock(cookie);
|
|
||||||
} while (any_progress);
|
|
||||||
|
|
||||||
return any_usable;
|
|
||||||
|
|
||||||
abandon:
|
|
||||||
console_srcu_read_unlock(cookie);
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __console_flush_and_unlock(void)
|
static void __console_flush_and_unlock(void)
|
||||||
|
|
@ -3331,12 +3392,10 @@ void console_unblank(void)
|
||||||
*/
|
*/
|
||||||
cookie = console_srcu_read_lock();
|
cookie = console_srcu_read_lock();
|
||||||
for_each_console_srcu(c) {
|
for_each_console_srcu(c) {
|
||||||
short flags = console_srcu_read_flags(c);
|
if (!console_is_usable(c, console_srcu_read_flags(c), true))
|
||||||
|
|
||||||
if (flags & CON_SUSPENDED)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if ((flags & CON_ENABLED) && c->unblank) {
|
if (c->unblank) {
|
||||||
found_unblank = true;
|
found_unblank = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -3373,12 +3432,10 @@ void console_unblank(void)
|
||||||
|
|
||||||
cookie = console_srcu_read_lock();
|
cookie = console_srcu_read_lock();
|
||||||
for_each_console_srcu(c) {
|
for_each_console_srcu(c) {
|
||||||
short flags = console_srcu_read_flags(c);
|
if (!console_is_usable(c, console_srcu_read_flags(c), true))
|
||||||
|
|
||||||
if (flags & CON_SUSPENDED)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if ((flags & CON_ENABLED) && c->unblank)
|
if (c->unblank)
|
||||||
c->unblank();
|
c->unblank();
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
|
|
@ -3601,17 +3658,26 @@ static bool legacy_kthread_should_wakeup(void)
|
||||||
|
|
||||||
static int legacy_kthread_func(void *unused)
|
static int legacy_kthread_func(void *unused)
|
||||||
{
|
{
|
||||||
for (;;) {
|
bool try_again;
|
||||||
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
|
|
||||||
|
wait_for_event:
|
||||||
|
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
|
||||||
|
|
||||||
|
do {
|
||||||
|
bool handover = false;
|
||||||
|
u64 next_seq = 0;
|
||||||
|
|
||||||
if (kthread_should_stop())
|
if (kthread_should_stop())
|
||||||
break;
|
return 0;
|
||||||
|
|
||||||
console_lock();
|
console_lock();
|
||||||
__console_flush_and_unlock();
|
console_flush_one_record(true, &next_seq, &handover, &try_again);
|
||||||
}
|
if (!handover)
|
||||||
|
__console_unlock();
|
||||||
|
|
||||||
return 0;
|
} while (try_again);
|
||||||
|
|
||||||
|
goto wait_for_event;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool legacy_kthread_create(void)
|
static bool legacy_kthread_create(void)
|
||||||
|
|
@ -4511,6 +4577,13 @@ static void __wake_up_klogd(int val)
|
||||||
if (!printk_percpu_data_ready())
|
if (!printk_percpu_data_ready())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is not allowed to call this function when console irq_work
|
||||||
|
* is blocked.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(console_irqwork_blocked))
|
||||||
|
return;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
/*
|
/*
|
||||||
* Guarantee any new records can be seen by tasks preparing to wait
|
* Guarantee any new records can be seen by tasks preparing to wait
|
||||||
|
|
@ -4567,9 +4640,30 @@ void defer_console_output(void)
|
||||||
__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
|
__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* printk_trigger_flush - Attempt to flush printk buffer to consoles.
|
||||||
|
*
|
||||||
|
* If possible, flush the printk buffer to all consoles in the caller's
|
||||||
|
* context. If offloading is available, trigger deferred printing.
|
||||||
|
*
|
||||||
|
* This is best effort. Depending on the system state, console states,
|
||||||
|
* and caller context, no actual flushing may result from this call.
|
||||||
|
*/
|
||||||
void printk_trigger_flush(void)
|
void printk_trigger_flush(void)
|
||||||
{
|
{
|
||||||
defer_console_output();
|
struct console_flush_type ft;
|
||||||
|
|
||||||
|
printk_get_console_flush_type(&ft);
|
||||||
|
if (ft.nbcon_atomic)
|
||||||
|
nbcon_atomic_flush_pending();
|
||||||
|
if (ft.nbcon_offload)
|
||||||
|
nbcon_kthreads_wake();
|
||||||
|
if (ft.legacy_direct) {
|
||||||
|
if (console_trylock())
|
||||||
|
console_unlock();
|
||||||
|
}
|
||||||
|
if (ft.legacy_offload)
|
||||||
|
defer_console_output();
|
||||||
}
|
}
|
||||||
|
|
||||||
int vprintk_deferred(const char *fmt, va_list args)
|
int vprintk_deferred(const char *fmt, va_list args)
|
||||||
|
|
|
||||||
|
|
@ -411,6 +411,23 @@ static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
|
||||||
return to_blk_size(size) <= DATA_SIZE(data_ring) / 2;
|
return to_blk_size(size) <= DATA_SIZE(data_ring) / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compare the current and requested logical position and decide
|
||||||
|
* whether more space is needed.
|
||||||
|
*
|
||||||
|
* Return false when @lpos_current is already at or beyond @lpos_target.
|
||||||
|
*
|
||||||
|
* Also return false when the difference between the positions is bigger
|
||||||
|
* than the size of the data buffer. It might happen only when the caller
|
||||||
|
* raced with another CPU(s) which already made and used the space.
|
||||||
|
*/
|
||||||
|
static bool need_more_space(struct prb_data_ring *data_ring,
|
||||||
|
unsigned long lpos_current,
|
||||||
|
unsigned long lpos_target)
|
||||||
|
{
|
||||||
|
return lpos_target - lpos_current - 1 < DATA_SIZE(data_ring);
|
||||||
|
}
|
||||||
|
|
||||||
/* Query the state of a descriptor. */
|
/* Query the state of a descriptor. */
|
||||||
static enum desc_state get_desc_state(unsigned long id,
|
static enum desc_state get_desc_state(unsigned long id,
|
||||||
unsigned long state_val)
|
unsigned long state_val)
|
||||||
|
|
@ -577,7 +594,7 @@ static bool data_make_reusable(struct printk_ringbuffer *rb,
|
||||||
unsigned long id;
|
unsigned long id;
|
||||||
|
|
||||||
/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
|
/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
|
||||||
while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
|
while (need_more_space(data_ring, lpos_begin, lpos_end)) {
|
||||||
blk = to_block(data_ring, lpos_begin);
|
blk = to_block(data_ring, lpos_begin);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -668,7 +685,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
|
||||||
* sees the new tail lpos, any descriptor states that transitioned to
|
* sees the new tail lpos, any descriptor states that transitioned to
|
||||||
* the reusable state must already be visible.
|
* the reusable state must already be visible.
|
||||||
*/
|
*/
|
||||||
while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
|
while (need_more_space(data_ring, tail_lpos, lpos)) {
|
||||||
/*
|
/*
|
||||||
* Make all descriptors reusable that are associated with
|
* Make all descriptors reusable that are associated with
|
||||||
* data blocks before @lpos.
|
* data blocks before @lpos.
|
||||||
|
|
@ -999,6 +1016,17 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_blk_wrapped(struct prb_data_ring *data_ring,
|
||||||
|
unsigned long begin_lpos, unsigned long next_lpos)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Subtract one from next_lpos since it's not actually part of this data
|
||||||
|
* block. This allows perfectly fitting records to not wrap.
|
||||||
|
*/
|
||||||
|
return DATA_WRAPS(data_ring, begin_lpos) !=
|
||||||
|
DATA_WRAPS(data_ring, next_lpos - 1);
|
||||||
|
}
|
||||||
|
|
||||||
/* Determine the end of a data block. */
|
/* Determine the end of a data block. */
|
||||||
static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
|
static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
|
||||||
unsigned long lpos, unsigned int size)
|
unsigned long lpos, unsigned int size)
|
||||||
|
|
@ -1010,7 +1038,7 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
|
||||||
next_lpos = lpos + size;
|
next_lpos = lpos + size;
|
||||||
|
|
||||||
/* First check if the data block does not wrap. */
|
/* First check if the data block does not wrap. */
|
||||||
if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
|
if (!is_blk_wrapped(data_ring, begin_lpos, next_lpos))
|
||||||
return next_lpos;
|
return next_lpos;
|
||||||
|
|
||||||
/* Wrapping data blocks store their data at the beginning. */
|
/* Wrapping data blocks store their data at the beginning. */
|
||||||
|
|
@ -1087,7 +1115,7 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
|
||||||
blk = to_block(data_ring, begin_lpos);
|
blk = to_block(data_ring, begin_lpos);
|
||||||
blk->id = id; /* LMM(data_alloc:B) */
|
blk->id = id; /* LMM(data_alloc:B) */
|
||||||
|
|
||||||
if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
|
if (is_blk_wrapped(data_ring, begin_lpos, next_lpos)) {
|
||||||
/* Wrapping data blocks store their data at the beginning. */
|
/* Wrapping data blocks store their data at the beginning. */
|
||||||
blk = to_block(data_ring, 0);
|
blk = to_block(data_ring, 0);
|
||||||
|
|
||||||
|
|
@ -1131,14 +1159,21 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Keep track if @blk_lpos was a wrapping data block. */
|
/* Keep track if @blk_lpos was a wrapping data block. */
|
||||||
wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
|
wrapped = is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next);
|
||||||
|
|
||||||
size = to_blk_size(size);
|
size = to_blk_size(size);
|
||||||
|
|
||||||
next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
|
next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
|
||||||
|
|
||||||
/* If the data block does not increase, there is nothing to do. */
|
/*
|
||||||
if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
|
* Use the current data block when the size does not increase, i.e.
|
||||||
|
* when @head_lpos is already able to accommodate the new @next_lpos.
|
||||||
|
*
|
||||||
|
* Note that need_more_space() could never return false here because
|
||||||
|
* the difference between the positions was bigger than the data
|
||||||
|
* buffer size. The data block is reopened and can't get reused.
|
||||||
|
*/
|
||||||
|
if (!need_more_space(data_ring, head_lpos, next_lpos)) {
|
||||||
if (wrapped)
|
if (wrapped)
|
||||||
blk = to_block(data_ring, 0);
|
blk = to_block(data_ring, 0);
|
||||||
else
|
else
|
||||||
|
|
@ -1167,7 +1202,7 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
|
||||||
|
|
||||||
blk = to_block(data_ring, blk_lpos->begin);
|
blk = to_block(data_ring, blk_lpos->begin);
|
||||||
|
|
||||||
if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
|
if (is_blk_wrapped(data_ring, blk_lpos->begin, next_lpos)) {
|
||||||
struct prb_data_block *old_blk = blk;
|
struct prb_data_block *old_blk = blk;
|
||||||
|
|
||||||
/* Wrapping data blocks store their data at the beginning. */
|
/* Wrapping data blocks store their data at the beginning. */
|
||||||
|
|
@ -1203,7 +1238,7 @@ static unsigned int space_used(struct prb_data_ring *data_ring,
|
||||||
if (BLK_DATALESS(blk_lpos))
|
if (BLK_DATALESS(blk_lpos))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
|
if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) {
|
||||||
/* Data block does not wrap. */
|
/* Data block does not wrap. */
|
||||||
return (DATA_INDEX(data_ring, blk_lpos->next) -
|
return (DATA_INDEX(data_ring, blk_lpos->next) -
|
||||||
DATA_INDEX(data_ring, blk_lpos->begin));
|
DATA_INDEX(data_ring, blk_lpos->begin));
|
||||||
|
|
@ -1249,15 +1284,15 @@ static const char *get_data(struct prb_data_ring *data_ring,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Regular data block: @begin less than @next and in same wrap. */
|
/* Regular data block: @begin and @next in the same wrap. */
|
||||||
if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
|
if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) {
|
||||||
blk_lpos->begin < blk_lpos->next) {
|
|
||||||
db = to_block(data_ring, blk_lpos->begin);
|
db = to_block(data_ring, blk_lpos->begin);
|
||||||
*data_size = blk_lpos->next - blk_lpos->begin;
|
*data_size = blk_lpos->next - blk_lpos->begin;
|
||||||
|
|
||||||
/* Wrapping data block: @begin is one wrap behind @next. */
|
/* Wrapping data block: @begin is one wrap behind @next. */
|
||||||
} else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
|
} else if (!is_blk_wrapped(data_ring,
|
||||||
DATA_WRAPS(data_ring, blk_lpos->next)) {
|
blk_lpos->begin + DATA_SIZE(data_ring),
|
||||||
|
blk_lpos->next)) {
|
||||||
db = to_block(data_ring, 0);
|
db = to_block(data_ring, 0);
|
||||||
*data_size = DATA_INDEX(data_ring, blk_lpos->next);
|
*data_size = DATA_INDEX(data_ring, blk_lpos->next);
|
||||||
|
|
||||||
|
|
@ -1267,6 +1302,10 @@ static const char *get_data(struct prb_data_ring *data_ring,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sanity check. Data-less blocks were handled earlier. */
|
||||||
|
if (WARN_ON_ONCE(!data_check_size(data_ring, *data_size) || !*data_size))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/* A valid data block will always be aligned to the ID size. */
|
/* A valid data block will always be aligned to the ID size. */
|
||||||
if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
|
if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
|
||||||
WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
|
WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
|
||||||
|
|
|
||||||
|
|
@ -1467,12 +1467,12 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
|
||||||
|
|
||||||
trace_assign_type(field, entry);
|
trace_assign_type(field, entry);
|
||||||
|
|
||||||
trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
|
trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d",
|
||||||
field->seqnum,
|
field->seqnum,
|
||||||
field->duration,
|
field->duration,
|
||||||
field->outer_duration,
|
field->outer_duration,
|
||||||
(long long)field->timestamp.tv_sec,
|
&field->timestamp,
|
||||||
field->timestamp.tv_nsec, field->count);
|
field->count);
|
||||||
|
|
||||||
if (field->nmi_count) {
|
if (field->nmi_count) {
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -504,6 +504,7 @@ time_and_date(struct kunit *kunittest)
|
||||||
};
|
};
|
||||||
/* 2019-01-04T15:32:23 */
|
/* 2019-01-04T15:32:23 */
|
||||||
time64_t t = 1546615943;
|
time64_t t = 1546615943;
|
||||||
|
struct timespec64 ts = { .tv_sec = t, .tv_nsec = 11235813 };
|
||||||
|
|
||||||
test("(%pt?)", "%pt", &tm);
|
test("(%pt?)", "%pt", &tm);
|
||||||
test("2018-11-26T05:35:43", "%ptR", &tm);
|
test("2018-11-26T05:35:43", "%ptR", &tm);
|
||||||
|
|
@ -522,6 +523,9 @@ time_and_date(struct kunit *kunittest)
|
||||||
test("0119-00-04 15:32:23", "%ptTsr", &t);
|
test("0119-00-04 15:32:23", "%ptTsr", &t);
|
||||||
test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
|
test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
|
||||||
test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
|
test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
|
||||||
|
|
||||||
|
test("2019-01-04T15:32:23.011235813", "%ptS", &ts);
|
||||||
|
test("1546615943.011235813", "%ptSp", &ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
||||||
|
|
@ -582,17 +582,18 @@ char *number(char *buf, char *end, unsigned long long num,
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define special_hex_spec(size) \
|
||||||
|
(struct printf_spec) { \
|
||||||
|
.field_width = 2 + 2 * (size), /* 0x + hex */ \
|
||||||
|
.flags = SPECIAL | SMALL | ZEROPAD, \
|
||||||
|
.base = 16, \
|
||||||
|
.precision = -1, \
|
||||||
|
}
|
||||||
|
|
||||||
static noinline_for_stack
|
static noinline_for_stack
|
||||||
char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
|
char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
|
||||||
{
|
{
|
||||||
struct printf_spec spec;
|
return number(buf, end, num, special_hex_spec(size));
|
||||||
|
|
||||||
spec.field_width = 2 + 2 * size; /* 0x + hex */
|
|
||||||
spec.flags = SPECIAL | SMALL | ZEROPAD;
|
|
||||||
spec.base = 16;
|
|
||||||
spec.precision = -1;
|
|
||||||
|
|
||||||
return number(buf, end, num, spec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void move_right(char *buf, char *end, unsigned len, unsigned spaces)
|
static void move_right(char *buf, char *end, unsigned len, unsigned spaces)
|
||||||
|
|
@ -1164,18 +1165,11 @@ char *range_string(char *buf, char *end, const struct range *range,
|
||||||
char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")];
|
char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")];
|
||||||
char *p = sym, *pend = sym + sizeof(sym);
|
char *p = sym, *pend = sym + sizeof(sym);
|
||||||
|
|
||||||
struct printf_spec range_spec = {
|
|
||||||
.field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */
|
|
||||||
.flags = SPECIAL | SMALL | ZEROPAD,
|
|
||||||
.base = 16,
|
|
||||||
.precision = -1,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (check_pointer(&buf, end, range, spec))
|
if (check_pointer(&buf, end, range, spec))
|
||||||
return buf;
|
return buf;
|
||||||
|
|
||||||
p = string_nocheck(p, pend, "[range ", default_str_spec);
|
p = string_nocheck(p, pend, "[range ", default_str_spec);
|
||||||
p = hex_range(p, pend, range->start, range->end, range_spec);
|
p = hex_range(p, pend, range->start, range->end, special_hex_spec(sizeof(range->start)));
|
||||||
*p++ = ']';
|
*p++ = ']';
|
||||||
*p = '\0';
|
*p = '\0';
|
||||||
|
|
||||||
|
|
@ -1928,9 +1922,6 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
|
||||||
bool found = true;
|
bool found = true;
|
||||||
int count = 2;
|
int count = 2;
|
||||||
|
|
||||||
if (check_pointer(&buf, end, tm, spec))
|
|
||||||
return buf;
|
|
||||||
|
|
||||||
switch (fmt[count]) {
|
switch (fmt[count]) {
|
||||||
case 'd':
|
case 'd':
|
||||||
have_t = false;
|
have_t = false;
|
||||||
|
|
@ -1992,13 +1983,40 @@ char *time64_str(char *buf, char *end, const time64_t time,
|
||||||
return rtc_str(buf, end, &rtc_time, spec, fmt);
|
return rtc_str(buf, end, &rtc_time, spec, fmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinline_for_stack
|
||||||
|
char *timespec64_str(char *buf, char *end, const struct timespec64 *ts,
|
||||||
|
struct printf_spec spec, const char *fmt)
|
||||||
|
{
|
||||||
|
static const struct printf_spec default_dec09_spec = {
|
||||||
|
.base = 10,
|
||||||
|
.field_width = 9,
|
||||||
|
.precision = -1,
|
||||||
|
.flags = ZEROPAD,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (fmt[2] == 'p')
|
||||||
|
buf = number(buf, end, ts->tv_sec, default_dec_spec);
|
||||||
|
else
|
||||||
|
buf = time64_str(buf, end, ts->tv_sec, spec, fmt);
|
||||||
|
if (buf < end)
|
||||||
|
*buf = '.';
|
||||||
|
buf++;
|
||||||
|
|
||||||
|
return number(buf, end, ts->tv_nsec, default_dec09_spec);
|
||||||
|
}
|
||||||
|
|
||||||
static noinline_for_stack
|
static noinline_for_stack
|
||||||
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
|
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
|
||||||
const char *fmt)
|
const char *fmt)
|
||||||
{
|
{
|
||||||
|
if (check_pointer(&buf, end, ptr, spec))
|
||||||
|
return buf;
|
||||||
|
|
||||||
switch (fmt[1]) {
|
switch (fmt[1]) {
|
||||||
case 'R':
|
case 'R':
|
||||||
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
|
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
|
||||||
|
case 'S':
|
||||||
|
return timespec64_str(buf, end, (const struct timespec64 *)ptr, spec, fmt);
|
||||||
case 'T':
|
case 'T':
|
||||||
return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
|
return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
|
||||||
default:
|
default:
|
||||||
|
|
@ -2462,9 +2480,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
|
||||||
* - 'd[234]' For a dentry name (optionally 2-4 last components)
|
* - 'd[234]' For a dentry name (optionally 2-4 last components)
|
||||||
* - 'D[234]' Same as 'd' but for a struct file
|
* - 'D[234]' Same as 'd' but for a struct file
|
||||||
* - 'g' For block_device name (gendisk + partition number)
|
* - 'g' For block_device name (gendisk + partition number)
|
||||||
* - 't[RT][dt][r][s]' For time and date as represented by:
|
* - 't[RST][dt][r][s]' For time and date as represented by:
|
||||||
* R struct rtc_time
|
* R struct rtc_time
|
||||||
|
* S struct timespec64
|
||||||
* T time64_t
|
* T time64_t
|
||||||
|
* - 'tSp' For time represented by struct timespec64 printed as <seconds>.<nanoseconds>
|
||||||
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
|
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
|
||||||
* (legacy clock framework) of the clock
|
* (legacy clock framework) of the clock
|
||||||
* - 'G' For flags to be printed as a collection of symbolic strings that would
|
* - 'G' For flags to be printed as a collection of symbolic strings that would
|
||||||
|
|
@ -2883,10 +2903,11 @@ int vsnprintf(char *buf, size_t size, const char *fmt_str, va_list args)
|
||||||
|
|
||||||
case FORMAT_STATE_NUM: {
|
case FORMAT_STATE_NUM: {
|
||||||
unsigned long long num;
|
unsigned long long num;
|
||||||
if (fmt.size <= sizeof(int))
|
|
||||||
num = convert_num_spec(va_arg(args, int), fmt.size, spec);
|
if (fmt.size > sizeof(int))
|
||||||
else
|
|
||||||
num = va_arg(args, long long);
|
num = va_arg(args, long long);
|
||||||
|
else
|
||||||
|
num = convert_num_spec(va_arg(args, int), fmt.size, spec);
|
||||||
str = number(str, end, num, spec);
|
str = number(str, end, num, spec);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
@ -3054,8 +3075,8 @@ EXPORT_SYMBOL(scnprintf);
|
||||||
* @fmt: The format string to use
|
* @fmt: The format string to use
|
||||||
* @args: Arguments for the format string
|
* @args: Arguments for the format string
|
||||||
*
|
*
|
||||||
* The function returns the number of characters written
|
* The return value is the number of characters written into @buf not including
|
||||||
* into @buf. Use vsnprintf() or vscnprintf() in order to avoid
|
* the trailing '\0'. Use vsnprintf() or vscnprintf() in order to avoid
|
||||||
* buffer overflows.
|
* buffer overflows.
|
||||||
*
|
*
|
||||||
* If you're not already dealing with a va_list consider using sprintf().
|
* If you're not already dealing with a va_list consider using sprintf().
|
||||||
|
|
@ -3074,8 +3095,8 @@ EXPORT_SYMBOL(vsprintf);
|
||||||
* @fmt: The format string to use
|
* @fmt: The format string to use
|
||||||
* @...: Arguments for the format string
|
* @...: Arguments for the format string
|
||||||
*
|
*
|
||||||
* The function returns the number of characters written
|
* The return value is the number of characters written into @buf not including
|
||||||
* into @buf. Use snprintf() or scnprintf() in order to avoid
|
* the trailing '\0'. Use snprintf() or scnprintf() in order to avoid
|
||||||
* buffer overflows.
|
* buffer overflows.
|
||||||
*
|
*
|
||||||
* See the vsnprintf() documentation for format string extensions over C99.
|
* See the vsnprintf() documentation for format string extensions over C99.
|
||||||
|
|
@ -3394,11 +3415,10 @@ int bstr_printf(char *buf, size_t size, const char *fmt_str, const u32 *bin_buf)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
case FORMAT_STATE_NUM:
|
case FORMAT_STATE_NUM:
|
||||||
if (fmt.size > sizeof(int)) {
|
if (fmt.size > sizeof(int))
|
||||||
num = get_arg(long long);
|
num = get_arg(long long);
|
||||||
} else {
|
else
|
||||||
num = convert_num_spec(get_arg(int), fmt.size, spec);
|
num = convert_num_spec(get_arg(int), fmt.size, spec);
|
||||||
}
|
|
||||||
str = number(str, end, num, spec);
|
str = number(str, end, num, spec);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1538,8 +1538,7 @@ static int prepare_keepalive2(struct ceph_connection *con)
|
||||||
struct timespec64 now;
|
struct timespec64 now;
|
||||||
|
|
||||||
ktime_get_real_ts64(&now);
|
ktime_get_real_ts64(&now);
|
||||||
dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
|
dout("%s con %p timestamp %ptSp\n", __func__, con, &now);
|
||||||
now.tv_nsec);
|
|
||||||
|
|
||||||
ceph_encode_timespec64(ts, &now);
|
ceph_encode_timespec64(ts, &now);
|
||||||
|
|
||||||
|
|
@ -2732,8 +2731,7 @@ static int process_keepalive2_ack(struct ceph_connection *con,
|
||||||
ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
|
ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
|
||||||
ceph_decode_timespec64(&con->last_keepalive_ack, p);
|
ceph_decode_timespec64(&con->last_keepalive_ack, p);
|
||||||
|
|
||||||
dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
|
dout("%s con %p timestamp %ptSp\n", __func__, con, &con->last_keepalive_ack);
|
||||||
con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue