summaryrefslogtreecommitdiff
path: root/kernel/printk/printk_ringbuffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 10:45:11 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 10:45:11 -0800
commitd3eb52113d162cc88975fbd03c9e6f9cf2f8a771 (patch)
tree7ef2b73e6174d355d625c2a05377b14c0939cca7 /kernel/printk/printk_ringbuffer.c
parent5e60366d56c630e32befce7ef05c569e04391ca3 (diff)
parent5ed37174e6c7e1ed4abfd0d8e932a3044441fb5f (diff)
Merge tag 'printk-for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux
Pull printk updates from Petr Mladek: - Finally allow parallel writes and reads into/from the lockless ringbuffer. But it is not a complete solution. Readers are still serialized against each other. And nested writes are still prevented by printk_safe per-CPU buffers. - Use ttynull as the ultimate fallback for /dev/console. - Officially allow disabling console output by using console="" or console=null - A few code cleanups * tag 'printk-for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux: printk: remove logbuf_lock writer-protection of ringbuffer printk: inline log_output(),log_store() in vprintk_store() printk: remove obsolete dead assignment printk/console: Allow to disable console output by using console="" or console=null init/console: Use ttynull as a fallback when there is no console printk: ringbuffer: Reference text_data_ring directly in callees.
Diffstat (limited to 'kernel/printk/printk_ringbuffer.c')
-rw-r--r--kernel/printk/printk_ringbuffer.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 74e25a1704f2..6704f06e0417 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -559,11 +559,12 @@ static void desc_make_reusable(struct prb_desc_ring *desc_ring,
* on error the caller can re-load the tail lpos to determine the situation.
*/
static bool data_make_reusable(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring,
unsigned long lpos_begin,
unsigned long lpos_end,
unsigned long *lpos_out)
{
+
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_desc_ring *desc_ring = &rb->desc_ring;
struct prb_data_block *blk;
enum desc_state d_state;
@@ -625,10 +626,9 @@ static bool data_make_reusable(struct printk_ringbuffer *rb,
* descriptors into the reusable state if the tail is pushed beyond
* their associated data block.
*/
-static bool data_push_tail(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring,
- unsigned long lpos)
+static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
{
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
unsigned long tail_lpos_new;
unsigned long tail_lpos;
unsigned long next_lpos;
@@ -669,8 +669,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb,
* Make all descriptors reusable that are associated with
* data blocks before @lpos.
*/
- if (!data_make_reusable(rb, data_ring, tail_lpos, lpos,
- &next_lpos)) {
+ if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) {
/*
* 1. Guarantee the block ID loaded in
* data_make_reusable() is performed before
@@ -807,7 +806,7 @@ static bool desc_push_tail(struct printk_ringbuffer *rb,
* data blocks once their associated descriptor is gone.
*/
- if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
+ if (!data_push_tail(rb, desc.text_blk_lpos.next))
return false;
/*
@@ -1019,10 +1018,10 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
* if necessary. This function also associates the data block with
* a specified descriptor.
*/
-static char *data_alloc(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring, unsigned int size,
+static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
struct prb_data_blk_lpos *blk_lpos, unsigned long id)
{
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_data_block *blk;
unsigned long begin_lpos;
unsigned long next_lpos;
@@ -1041,7 +1040,7 @@ static char *data_alloc(struct printk_ringbuffer *rb,
do {
next_lpos = get_next_lpos(data_ring, begin_lpos, size);
- if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
+ if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
/* Failed to allocate, specify a data-less block. */
blk_lpos->begin = FAILED_LPOS;
blk_lpos->next = FAILED_LPOS;
@@ -1100,10 +1099,10 @@ static char *data_alloc(struct printk_ringbuffer *rb,
* Return a pointer to the beginning of the entire data buffer or NULL on
* failure.
*/
-static char *data_realloc(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring, unsigned int size,
+static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
struct prb_data_blk_lpos *blk_lpos, unsigned long id)
{
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_data_block *blk;
unsigned long head_lpos;
unsigned long next_lpos;
@@ -1130,7 +1129,7 @@ static char *data_realloc(struct printk_ringbuffer *rb,
return &blk->data[0];
}
- if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring)))
+ if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring)))
return NULL;
/* The memory barrier involvement is the same as data_alloc:A. */
@@ -1395,7 +1394,7 @@ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer
if (r->text_buf_size > max_size)
goto fail;
- r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
+ r->text_buf = data_alloc(rb, r->text_buf_size,
&d->text_blk_lpos, id);
} else {
if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
@@ -1419,7 +1418,7 @@ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer
if (r->text_buf_size > max_size)
goto fail;
- r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size,
+ r->text_buf = data_realloc(rb, r->text_buf_size,
&d->text_blk_lpos, id);
}
if (r->text_buf_size && !r->text_buf)
@@ -1547,8 +1546,7 @@ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
if (info->seq > 0)
desc_make_final(desc_ring, DESC_ID(id - 1));
- r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
- &d->text_blk_lpos, id);
+ r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
/* If text data allocation fails, a data-less record is committed. */
if (r->text_buf_size && !r->text_buf) {
prb_commit(e);