1 // SPDX-License-Identifier: GPL-2.0
3 * background writeback - scan btree for dirty data and write it to the backing
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
22 static void __update_writeback_rate(struct cached_dev *dc)
24 struct cache_set *c = dc->disk.c;
25 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
26 bcache_flash_devs_sectors_dirty(c);
27 uint64_t cache_dirty_target =
28 div_u64(cache_sectors * dc->writeback_percent, 100);
29 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
30 c->cached_dev_sectors);
34 * Figures out the amount that should be written per second.
36 * First, the error (number of sectors that are dirty beyond our
37 * target) is calculated. The error is accumulated (numerically
40 * Then, the proportional value and integral value are scaled
41 * based on configured values. These are stored as inverses to
42 * avoid fixed point math and to make configuration easy-- e.g.
43 * the default value of 40 for writeback_rate_p_term_inverse
44 * attempts to write at a rate that would retire all the dirty
45 * blocks in 40 seconds.
47 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
48 * of the error is accumulated in the integral term per second.
49 * This acts as a slow, long-term average that is not subject to
50 * variations in usage like the p term.
52 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
53 int64_t error = dirty - target;
54 int64_t proportional_scaled =
55 div_s64(error, dc->writeback_rate_p_term_inverse);
56 int64_t integral_scaled;
59 if ((error < 0 && dc->writeback_rate_integral > 0) ||
60 (error > 0 && time_before64(local_clock(),
61 dc->writeback_rate.next + NSEC_PER_MSEC))) {
63 * Only decrease the integral term if it's more than
64 * zero. Only increase the integral term if the device
65 * is keeping up. (Don't wind up the integral
66 * ineffectively in either case).
68 * It's necessary to scale this by
69 * writeback_rate_update_seconds to keep the integral
70 * term dimensioned properly.
72 dc->writeback_rate_integral += error *
73 dc->writeback_rate_update_seconds;
76 integral_scaled = div_s64(dc->writeback_rate_integral,
77 dc->writeback_rate_i_term_inverse);
79 new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
80 dc->writeback_rate_minimum, NSEC_PER_SEC);
82 dc->writeback_rate_proportional = proportional_scaled;
83 dc->writeback_rate_integral_scaled = integral_scaled;
84 dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
85 dc->writeback_rate.rate = new_rate;
86 dc->writeback_rate_target = target;
89 static void update_writeback_rate(struct work_struct *work)
91 struct cached_dev *dc = container_of(to_delayed_work(work),
93 writeback_rate_update);
95 down_read(&dc->writeback_lock);
97 if (atomic_read(&dc->has_dirty) &&
98 dc->writeback_percent)
99 __update_writeback_rate(dc);
101 up_read(&dc->writeback_lock);
103 schedule_delayed_work(&dc->writeback_rate_update,
104 dc->writeback_rate_update_seconds * HZ);
107 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
109 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
110 !dc->writeback_percent)
113 return bch_next_delay(&dc->writeback_rate, sectors);
118 struct cached_dev *dc;
122 static void dirty_init(struct keybuf_key *w)
124 struct dirty_io *io = w->private;
125 struct bio *bio = &io->bio;
127 bio_init(bio, bio->bi_inline_vecs,
128 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
129 if (!io->dc->writeback_percent)
130 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
132 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
134 bch_bio_map(bio, NULL);
137 static void dirty_io_destructor(struct closure *cl)
139 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
143 static void write_dirty_finish(struct closure *cl)
145 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
146 struct keybuf_key *w = io->bio.bi_private;
147 struct cached_dev *dc = io->dc;
149 bio_free_pages(&io->bio);
151 /* This is kind of a dumb way of signalling errors. */
152 if (KEY_DIRTY(&w->key)) {
157 bch_keylist_init(&keys);
159 bkey_copy(keys.top, &w->key);
160 SET_KEY_DIRTY(keys.top, false);
161 bch_keylist_push(&keys);
163 for (i = 0; i < KEY_PTRS(&w->key); i++)
164 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
166 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
169 trace_bcache_writeback_collision(&w->key);
172 ? &dc->disk.c->writeback_keys_failed
173 : &dc->disk.c->writeback_keys_done);
176 bch_keybuf_del(&dc->writeback_keys, w);
179 closure_return_with_destructor(cl, dirty_io_destructor);
182 static void dirty_endio(struct bio *bio)
184 struct keybuf_key *w = bio->bi_private;
185 struct dirty_io *io = w->private;
188 SET_KEY_DIRTY(&w->key, false);
190 closure_put(&io->cl);
193 static void write_dirty(struct closure *cl)
195 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
196 struct keybuf_key *w = io->bio.bi_private;
199 * IO errors are signalled using the dirty bit on the key.
200 * If we failed to read, we should not attempt to write to the
201 * backing device. Instead, immediately go to write_dirty_finish
204 if (KEY_DIRTY(&w->key)) {
206 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
207 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
208 bio_set_dev(&io->bio, io->dc->bdev);
209 io->bio.bi_end_io = dirty_endio;
211 closure_bio_submit(&io->bio, cl);
214 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
217 static void read_dirty_endio(struct bio *bio)
219 struct keybuf_key *w = bio->bi_private;
220 struct dirty_io *io = w->private;
222 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
223 bio->bi_status, "reading dirty data from cache");
228 static void read_dirty_submit(struct closure *cl)
230 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
232 closure_bio_submit(&io->bio, cl);
234 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
237 static void read_dirty(struct cached_dev *dc)
240 struct keybuf_key *w;
244 closure_init_stack(&cl);
247 * XXX: if we error, background writeback just spins. Should use some
251 while (!kthread_should_stop()) {
253 w = bch_keybuf_next(&dc->writeback_keys);
257 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
259 if (KEY_START(&w->key) != dc->last_read ||
260 jiffies_to_msecs(delay) > 50)
261 while (!kthread_should_stop() && delay)
262 delay = schedule_timeout_interruptible(delay);
264 dc->last_read = KEY_OFFSET(&w->key);
266 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
267 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
276 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
277 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
278 bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
279 io->bio.bi_end_io = read_dirty_endio;
281 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
284 trace_bcache_writeback(&w->key);
286 down(&dc->in_flight);
287 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
289 delay = writeback_delay(dc, KEY_SIZE(&w->key));
296 bch_keybuf_del(&dc->writeback_keys, w);
300 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
301 * freed) before refilling again
306 /* Scan for dirty data */
308 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
309 uint64_t offset, int nr_sectors)
311 struct bcache_device *d = c->devices[inode];
312 unsigned stripe_offset, stripe, sectors_dirty;
317 stripe = offset_to_stripe(d, offset);
318 stripe_offset = offset & (d->stripe_size - 1);
321 int s = min_t(unsigned, abs(nr_sectors),
322 d->stripe_size - stripe_offset);
327 if (stripe >= d->nr_stripes)
330 sectors_dirty = atomic_add_return(s,
331 d->stripe_sectors_dirty + stripe);
332 if (sectors_dirty == d->stripe_size)
333 set_bit(stripe, d->full_dirty_stripes);
335 clear_bit(stripe, d->full_dirty_stripes);
343 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
345 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
347 BUG_ON(KEY_INODE(k) != dc->disk.id);
352 static void refill_full_stripes(struct cached_dev *dc)
354 struct keybuf *buf = &dc->writeback_keys;
355 unsigned start_stripe, stripe, next_stripe;
356 bool wrapped = false;
358 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
360 if (stripe >= dc->disk.nr_stripes)
363 start_stripe = stripe;
366 stripe = find_next_bit(dc->disk.full_dirty_stripes,
367 dc->disk.nr_stripes, stripe);
369 if (stripe == dc->disk.nr_stripes)
372 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
373 dc->disk.nr_stripes, stripe);
375 buf->last_scanned = KEY(dc->disk.id,
376 stripe * dc->disk.stripe_size, 0);
378 bch_refill_keybuf(dc->disk.c, buf,
380 next_stripe * dc->disk.stripe_size, 0),
383 if (array_freelist_empty(&buf->freelist))
386 stripe = next_stripe;
388 if (wrapped && stripe > start_stripe)
391 if (stripe == dc->disk.nr_stripes) {
399 * Returns true if we scanned the entire disk
401 static bool refill_dirty(struct cached_dev *dc)
403 struct keybuf *buf = &dc->writeback_keys;
404 struct bkey start = KEY(dc->disk.id, 0, 0);
405 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
406 struct bkey start_pos;
409 * make sure keybuf pos is inside the range for this disk - at bringup
410 * we might not be attached yet so this disk's inode nr isn't
413 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
414 bkey_cmp(&buf->last_scanned, &end) > 0)
415 buf->last_scanned = start;
417 if (dc->partial_stripes_expensive) {
418 refill_full_stripes(dc);
419 if (array_freelist_empty(&buf->freelist))
423 start_pos = buf->last_scanned;
424 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
426 if (bkey_cmp(&buf->last_scanned, &end) < 0)
430 * If we get to the end start scanning again from the beginning, and
431 * only scan up to where we initially started scanning from:
433 buf->last_scanned = start;
434 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
436 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
439 static int bch_writeback_thread(void *arg)
441 struct cached_dev *dc = arg;
442 bool searched_full_index;
444 bch_ratelimit_reset(&dc->writeback_rate);
446 while (!kthread_should_stop()) {
447 down_write(&dc->writeback_lock);
448 if (!atomic_read(&dc->has_dirty) ||
449 (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
450 !dc->writeback_running)) {
451 up_write(&dc->writeback_lock);
452 set_current_state(TASK_INTERRUPTIBLE);
454 if (kthread_should_stop())
461 searched_full_index = refill_dirty(dc);
463 if (searched_full_index &&
464 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
465 atomic_set(&dc->has_dirty, 0);
467 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
468 bch_write_bdev_super(dc, NULL);
471 up_write(&dc->writeback_lock);
475 if (searched_full_index) {
476 unsigned delay = dc->writeback_delay * HZ;
479 !kthread_should_stop() &&
480 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
481 delay = schedule_timeout_interruptible(delay);
483 bch_ratelimit_reset(&dc->writeback_rate);
492 struct sectors_dirty_init {
497 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
500 struct sectors_dirty_init *op = container_of(_op,
501 struct sectors_dirty_init, op);
502 if (KEY_INODE(k) > op->inode)
506 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
507 KEY_START(k), KEY_SIZE(k));
512 void bch_sectors_dirty_init(struct bcache_device *d)
514 struct sectors_dirty_init op;
516 bch_btree_op_init(&op.op, -1);
519 bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
520 sectors_dirty_init_fn, 0);
523 void bch_cached_dev_writeback_init(struct cached_dev *dc)
525 sema_init(&dc->in_flight, 64);
526 init_rwsem(&dc->writeback_lock);
527 bch_keybuf_init(&dc->writeback_keys);
529 dc->writeback_metadata = true;
530 dc->writeback_running = true;
531 dc->writeback_percent = 10;
532 dc->writeback_delay = 30;
533 dc->writeback_rate.rate = 1024;
534 dc->writeback_rate_minimum = 8;
536 dc->writeback_rate_update_seconds = 5;
537 dc->writeback_rate_p_term_inverse = 40;
538 dc->writeback_rate_i_term_inverse = 10000;
540 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
543 int bch_cached_dev_writeback_start(struct cached_dev *dc)
545 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
547 if (!dc->writeback_write_wq)
550 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
552 if (IS_ERR(dc->writeback_thread))
553 return PTR_ERR(dc->writeback_thread);
555 schedule_delayed_work(&dc->writeback_rate_update,
556 dc->writeback_rate_update_seconds * HZ);
558 bch_writeback_queue(dc);